From bc9c6c22dad7f17590d476fe9f7a25e7e7a167ad Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 24 Nov 2020 15:05:47 -0600 Subject: Collect logs from integration test runs (#675) During teardown of every cloud instance, run 'cloud-init collect-logs', then transfer and unpack locally. Two new integration settings have been added to specify when to perform this action (ALWAYS, ON_ERROR, NEVER), and where to store these logs. --- tests/integration_tests/conftest.py | 46 +++++++++++++++++++++++++ tests/integration_tests/instances.py | 8 ++--- tests/integration_tests/integration_settings.py | 10 ++++++ 3 files changed, 60 insertions(+), 4 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 73b44bfc..6e1465be 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -1,9 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +import datetime import logging import os import pytest import sys +from tarfile import TarFile from contextlib import contextmanager +from pathlib import Path from tests.integration_tests import integration_settings from tests.integration_tests.clouds import ( @@ -14,6 +17,7 @@ from tests.integration_tests.clouds import ( LxdContainerCloud, LxdVmCloud, ) +from tests.integration_tests.instances import IntegrationInstance log = logging.getLogger('integration_testing') @@ -29,6 +33,8 @@ platforms = { 'lxd_vm': LxdVmCloud, } +session_start_time = datetime.datetime.now().strftime('%y%m%d%H%M%S') + def pytest_runtest_setup(item): """Skip tests on unsupported clouds. @@ -114,6 +120,43 @@ def setup_image(session_cloud): log.info('Done with environment setup') +def _collect_logs(instance: IntegrationInstance, node_id: str, + test_failed: bool): + """Collect logs from remote instance. + + Args: + instance: The current IntegrationInstance to collect logs from + node_id: The pytest representation of this test, E.g.: + tests/integration_tests/test_example.py::TestExample.test_example + test_failed: If test failed or not + """ + if any([ + integration_settings.COLLECT_LOGS == 'NEVER', + integration_settings.COLLECT_LOGS == 'ON_ERROR' and not test_failed + ]): + return + instance.execute( + 'cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz') + node_id_path = Path( + node_id + .replace('.py', '') # Having a directory with '.py' would be weird + .replace('::', os.path.sep) # Turn classes/tests into paths + .replace('[', '-') # For parametrized names + .replace(']', '') # For parameterized names + ) + log_dir = Path( + integration_settings.LOCAL_LOG_PATH + ) / session_start_time / node_id_path + if not log_dir.exists(): + log_dir.mkdir(parents=True) + tarball_path = log_dir / 'cloud-init.tar.gz' + instance.pull_file('/var/tmp/cloud-init.tar.gz', tarball_path) + + tarball = TarFile.open(str(tarball_path)) + tarball.extractall(path=str(log_dir)) + tarball_path.unlink() + + @contextmanager def _client(request, fixture_utils, session_cloud): """Fixture implementation for the client fixtures. @@ -132,7 +175,10 @@ def _client(request, fixture_utils, session_cloud): with session_cloud.launch( user_data=user_data, launch_kwargs=launch_kwargs ) as instance: + previous_failures = request.session.testsfailed yield instance + test_failed = request.session.testsfailed - previous_failures > 0 + _collect_logs(instance, request.node.nodeid, test_failed) @pytest.yield_fixture diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 9b13288c..a0a5fb6b 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -47,14 +47,14 @@ class IntegrationInstance: def pull_file(self, remote_path, local_path): # First copy to a temporary directory because of permissions issues tmp_path = _get_tmp_path() - self.instance.execute('cp {} {}'.format(remote_path, tmp_path)) - self.instance.pull_file(tmp_path, local_path) + self.instance.execute('cp {} {}'.format(str(remote_path), tmp_path)) + self.instance.pull_file(tmp_path, str(local_path)) def push_file(self, local_path, remote_path): # First push to a temporary directory because of permissions issues tmp_path = _get_tmp_path() - self.instance.push_file(local_path, tmp_path) - self.execute('mv {} {}'.format(tmp_path, remote_path)) + self.instance.push_file(str(local_path), tmp_path) + self.execute('mv {} {}'.format(tmp_path, str(remote_path))) def read_from_file(self, remote_path) -> str: result = self.execute('cat {}'.format(remote_path)) diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index a0609f7e..9be9a94f 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -55,6 +55,16 @@ EXISTING_INSTANCE_ID = None # A path to a valid package to be uploaded and installed CLOUD_INIT_SOURCE = 'NONE' +# Before an instance is torn down, we run `cloud-init collect-logs` +# and transfer them locally. These settings specify when to collect these +# logs and where to put them on the local filesystem +# One of: +# 'ALWAYS' +# 'ON_ERROR' +# 'NEVER' +COLLECT_LOGS = 'ON_ERROR' +LOCAL_LOG_PATH = '/tmp/cloud_init_test_logs' + ################################################################## # GCE SPECIFIC SETTINGS ################################################################## -- cgit v1.2.3 From 53f2bfbb92c8281fd610250cb1a6107bd4eeb50a Mon Sep 17 00:00:00 2001 From: lucasmoura Date: Thu, 26 Nov 2020 18:53:57 -0300 Subject: Drop use_sudo attribute on IntegrationInstance (#694) pycloudlib will stop running commands as root by default on LXD. To align with that change and make the behavior consistent with other clouds we support, our LXD instances will now run the commands with sudo by default. --- tests/integration_tests/instances.py | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index a0a5fb6b..033847b8 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -26,8 +26,6 @@ def _get_tmp_path(): class IntegrationInstance: - use_sudo = True - def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance, settings=integration_settings): self.cloud = cloud @@ -37,11 +35,9 @@ class IntegrationInstance: def destroy(self): self.instance.delete() - def execute(self, command, *, use_sudo=None) -> Result: + def execute(self, command, *, use_sudo=True) -> Result: if self.instance.username == 'root' and use_sudo is False: raise Exception('Root user cannot run unprivileged') - if use_sudo is None: - use_sudo = self.use_sudo return self.instance.execute(command, use_sudo=use_sudo) def pull_file(self, remote_path, local_path): @@ -97,21 +93,21 @@ class IntegrationInstance: def install_proposed_image(self): log.info('Installing proposed image') remote_script = ( - '{sudo} echo deb "http://archive.ubuntu.com/ubuntu ' + 'echo deb "http://archive.ubuntu.com/ubuntu ' '$(lsb_release -sc)-proposed main" | ' - '{sudo} tee /etc/apt/sources.list.d/proposed.list\n' - '{sudo} apt-get update -q\n' - '{sudo} apt-get install -qy cloud-init' - ).format(sudo='sudo' if self.use_sudo else '') + 'tee /etc/apt/sources.list.d/proposed.list\n' + 'apt-get update -q\n' + 'apt-get install -qy cloud-init' + ) self._install_new_cloud_init(remote_script) def install_ppa(self, repo): log.info('Installing PPA') remote_script = ( - '{sudo} add-apt-repository {repo} -y && ' - '{sudo} apt-get update -q && ' - '{sudo} apt-get install -qy cloud-init' - ).format(sudo='sudo' if self.use_sudo else '', repo=repo) + 'add-apt-repository {repo} -y && ' + 'apt-get update -q && ' + 'apt-get install -qy cloud-init' + ).format(repo=repo) self._install_new_cloud_init(remote_script) def install_deb(self): @@ -122,8 +118,7 @@ class IntegrationInstance: self.push_file( local_path=integration_settings.CLOUD_INIT_SOURCE, remote_path=remote_path) - remote_script = '{sudo} dpkg -i {path}'.format( - sudo='sudo' if self.use_sudo else '', path=remote_path) + remote_script = 'dpkg -i {path}'.format(path=remote_path) self._install_new_cloud_init(remote_script) def __enter__(self): @@ -151,4 +146,4 @@ class IntegrationOciInstance(IntegrationInstance): class IntegrationLxdInstance(IntegrationInstance): - use_sudo = False + pass -- cgit v1.2.3 From de3183c1ff4660dda23f2624c1cc24bb76de5bf5 Mon Sep 17 00:00:00 2001 From: lucasmoura Date: Thu, 26 Nov 2020 19:03:53 -0300 Subject: Parametrize ssh_keys_provided integration test (#700) --- .../modules/test_ssh_keys_provided.py | 138 +++++++++++---------- 1 file changed, 75 insertions(+), 63 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py index 27d193c1..6aae96ae 100644 --- a/tests/integration_tests/modules/test_ssh_keys_provided.py +++ b/tests/integration_tests/modules/test_ssh_keys_provided.py @@ -83,66 +83,78 @@ ssh_keys: @pytest.mark.user_data(USER_DATA) class TestSshKeysProvided: - def test_ssh_dsa_keys_provided(self, class_client): - """Test dsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key.pub") - assert ( - "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" - "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM") in out - - """Test dsa private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key") - assert ( - "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" - "hOVAfzZ6+jklP") in out - - def test_ssh_rsa_keys_provided(self, class_client): - """Test rsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key.pub") - assert ( - "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" - "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4") in out - - """Test rsa private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key") - assert ( - "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" - "RQvLZpMRdywBm") in out - - def test_ssh_rsa_certificate_provided(self, class_client): - """Test rsa certificate was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key-cert.pub") - assert ( - "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" - "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD") in out - - def test_ssh_certificate_updated_sshd_config(self, class_client): - """Test ssh certificate was added to /etc/ssh/sshd_config.""" - out = class_client.read_from_file("/etc/ssh/sshd_config").strip() - assert "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub" in out - - def test_ssh_ecdsa_keys_provided(self, class_client): - """Test ecdsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key.pub") - assert ( - "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" - "BBFsS5Tvky/IC/dXhE/afxxU") in out - - """Test ecdsa private key generated.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key") - assert ( - "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" - "5mpZqxgX4vcgb") in out - - def test_ssh_ed25519_keys_provided(self, class_client): - """Test ed25519 public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key.pub") - assert ( - "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" - "G15dqjQ2XkNVOEnb5") in out - - """Test ed25519 private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key") - assert ( - "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" - "OhteXao0Nl5DVThJ2+Q") in out + @pytest.mark.parametrize( + "config_path,expected_out", + ( + ( + "/etc/ssh/ssh_host_dsa_key.pub", + ( + "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" + "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM" + ), + ), + ( + "/etc/ssh/ssh_host_dsa_key", + ( + "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" + "hOVAfzZ6+jklP" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key.pub", + ( + "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" + "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key", + ( + "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" + "RQvLZpMRdywBm" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key-cert.pub", + ( + "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" + "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD" + ), + ), + ( + "/etc/ssh/sshd_config", + "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub", + ), + ( + "/etc/ssh/ssh_host_ecdsa_key.pub", + ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" + "BBFsS5Tvky/IC/dXhE/afxxU" + ), + ), + ( + "/etc/ssh/ssh_host_ecdsa_key", + ( + "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" + "5mpZqxgX4vcgb" + ), + ), + ( + "/etc/ssh/ssh_host_ed25519_key.pub", + ( + "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" + "G15dqjQ2XkNVOEnb5" + ), + ), + ( + "/etc/ssh/ssh_host_ed25519_key", + ( + "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" + "OhteXao0Nl5DVThJ2+Q" + ), + ), + ) + ) + def test_ssh_provided_keys(self, config_path, expected_out, class_client): + out = class_client.read_from_file(config_path).strip() + assert expected_out in out -- cgit v1.2.3 From 2bd34bda9543c1b66f53eaf26706f0de887db187 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 30 Nov 2020 10:20:15 -0600 Subject: Delete image snapshots created for integration tests (#682) Integration tests have been leaving behind snapshot images, so now we clean them up. Also, in testing, found that in Azure, deleting a resource group will automatically delete the instance, so if KEEP_INSTANCE is True, we no longer delete the resource group. Co-authored-by: Daniel Watkins --- tests/integration_tests/clouds.py | 32 +++++++++++++++++++++++++++----- tests/integration_tests/conftest.py | 5 ++++- tests/integration_tests/instances.py | 6 +++--- 3 files changed, 34 insertions(+), 9 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 88ac4408..4d5c2c2a 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -32,7 +32,14 @@ class IntegrationCloud(ABC): def __init__(self, settings=integration_settings): self.settings = settings self.cloud_instance = self._get_cloud_instance() - self.image_id = self._get_initial_image() + self._released_image_id = self._get_initial_image() + self.snapshot_id = None + + @property + def image_id(self): + if self.snapshot_id: + return self.snapshot_id + return self._released_image_id def emit_settings_to_log(self) -> None: log.info( @@ -50,13 +57,13 @@ class IntegrationCloud(ABC): raise NotImplementedError def _get_initial_image(self): - image_id = self.settings.OS_IMAGE + _released_image_id = self.settings.OS_IMAGE try: - image_id = self.cloud_instance.released_image( + _released_image_id = self.cloud_instance.released_image( self.settings.OS_IMAGE) except (ValueError, IndexError): pass - return image_id + return _released_image_id def _perform_launch(self, launch_kwargs): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) @@ -100,6 +107,14 @@ class IntegrationCloud(ABC): def snapshot(self, instance): return self.cloud_instance.snapshot(instance, clean=True) + def delete_snapshot(self): + if self.snapshot_id: + log.info( + 'Deleting snapshot image created for this testrun: %s', + self.snapshot_id + ) + self.cloud_instance.delete_image(self.snapshot_id) + class Ec2Cloud(IntegrationCloud): datasource = 'ec2' @@ -130,7 +145,14 @@ class AzureCloud(IntegrationCloud): return Azure(tag='azure-integration-test') def destroy(self): - self.cloud_instance.delete_resource_group() + if self.settings.KEEP_INSTANCE: + log.info( + 'NOT deleting resource group because KEEP_INSTANCE is true ' + 'and deleting resource group would also delete instance. ' + 'Instance and resource group must both be manually deleted.' + ) + else: + self.cloud_instance.delete_resource_group() class OciCloud(IntegrationCloud): diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 6e1465be..d7e0fca2 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -81,7 +81,10 @@ def session_cloud(): cloud = platforms[integration_settings.PLATFORM]() cloud.emit_settings_to_log() yield cloud - cloud.destroy() + try: + cloud.delete_snapshot() + finally: + cloud.destroy() @pytest.fixture(scope='session', autouse=True) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 033847b8..8f6573cd 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -86,9 +86,9 @@ class IntegrationInstance: version = self.execute('cloud-init -v').split()[-1] log.info('Installed cloud-init version: %s', version) self.instance.clean() - image_id = self.snapshot() - log.info('Created new image: %s', image_id) - self.cloud.image_id = image_id + snapshot_id = self.snapshot() + log.info('Created new image: %s', snapshot_id) + self.cloud.snapshot_id = snapshot_id def install_proposed_image(self): log.info('Installing proposed image') -- cgit v1.2.3 From bd76d5cfbde9c0801cefa851db82887e0bab34c1 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 2 Dec 2020 10:11:58 -0600 Subject: Add lxd-vm to list of valid integration test platforms (#705) --- tests/integration_tests/integration_settings.py | 1 + 1 file changed, 1 insertion(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 9be9a94f..94d54f74 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -10,6 +10,7 @@ KEEP_INSTANCE = False # One of: # lxd_container +# lxd_vm # azure # ec2 # gce -- cgit v1.2.3 From ed9bd19ca88e4c6458c95d26151c734112615e9a Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 3 Dec 2020 12:46:24 -0500 Subject: integration_tests: introduce IntegrationInstance.restart (#708) This wraps pycloudlib's `BaseInstance.restart` and `BaseInstance.wait` to pass the same parameters as on launch, to avoid cloud-init failures on the _reboot_ raising an exception. --- tests/integration_tests/bugs/test_lp1900837.py | 2 +- tests/integration_tests/instances.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py index 3fe7d0d0..18b00475 100644 --- a/tests/integration_tests/bugs/test_lp1900837.py +++ b/tests/integration_tests/bugs/test_lp1900837.py @@ -22,7 +22,7 @@ class TestLogPermissionsNotResetOnReboot: assert "600" == _get_log_perms(client) # Reboot - client.instance.restart() + client.restart() # Check that permissions are not reset on reboot assert "600" == _get_log_perms(client) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 8f6573cd..c68ee753 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -35,6 +35,17 @@ class IntegrationInstance: def destroy(self): self.instance.delete() + def restart(self): + """Restart this instance (via cloud mechanism) and wait for boot. + + This wraps pycloudlib's `BaseInstance.restart` to pass + `raise_on_cloudinit_failure=False` to `BaseInstance.wait`, mirroring + our launch behaviour. + """ + self.instance.restart(wait=False) + log.info("Instance restarted; waiting for boot") + self.instance.wait(raise_on_cloudinit_failure=False) + def execute(self, command, *, use_sudo=True) -> Result: if self.instance.username == 'root' and use_sudo is False: raise Exception('Root user cannot run unprivileged') -- cgit v1.2.3 From 6c4e87bf336073183f8ae8964366d574c7ee4823 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 3 Dec 2020 13:17:55 -0500 Subject: integration_tests: introduce skipping of tests by OS (#702) This introduces an optional, more complex OS_IMAGE format (`::::`) which allows the specification of the OS/OS release which the given image ID corresponds to. This information is used to skip tests which do not apply to the image. This commit is comprised of the following discrete changes: * introduce the IntegrationImage class, to handle parsing and storing the new OS_IMAGE format * support inferring the OS and OS release of Ubuntu series, so that we can continue to set OS_IMAGE to just a series name and have test skipping work * add documentation on Image Selection to integration_tests.rst * introduce the actual skipping behaviour based on OS marks * apply the `ubuntu` mark to all tests that should be skipped on non-Ubuntu operating systems --- doc/rtd/topics/integration_tests.rst | 30 ++++++++++ tests/integration_tests/clouds.py | 69 ++++++++++++++++++++-- tests/integration_tests/conftest.py | 12 +++- tests/integration_tests/integration_settings.py | 7 ++- .../modules/test_apt_configure_sources_list.py | 1 + .../modules/test_package_update_upgrade_install.py | 1 + tests/integration_tests/modules/test_snap.py | 1 + .../modules/test_ssh_import_id.py | 5 ++ .../integration_tests/modules/test_users_groups.py | 5 ++ tox.ini | 1 + 10 files changed, 122 insertions(+), 10 deletions(-) (limited to 'tests/integration_tests') diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst index aeda326c..3cfca31e 100644 --- a/doc/rtd/topics/integration_tests.rst +++ b/doc/rtd/topics/integration_tests.rst @@ -14,6 +14,36 @@ laid out in :ref:`unit_testing` should be followed for integration tests. Setup is accomplished via a set of fixtures located in ``tests/integration_tests/conftest.py``. +Image Selection +=============== + +Each integration testing run uses a single image as its basis. This +image is configured using the ``OS_IMAGE`` variable; see +:ref:`Configuration` for details of how configuration works. + +``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g. +"focal"), or an image specification. If an Ubuntu series name is +given, then the most recent image for that series on the target cloud +will be used. For other use cases, an image specification is used. + +In its simplest form, an image specification can simply be a cloud's +image ID (e.g. "ami-deadbeef", "ubuntu:focal"). In this case, the +image so-identified will be used as the basis for this testing run. + +This has a drawback, however: as we do not know what OS or release is +within the image, the integration testing framework will run *all* +tests against the image in question. If it's a RHEL8 image, then we +would expect Ubuntu-specific tests to fail (and vice versa). + +To address this, a full image specification can be given. This is of +the form: ``[::[:: list: + """Use distro-info-data's ubuntu.csv to get a list of Ubuntu series""" + out = "" + try: + out, _err = subp(["ubuntu-distro-info", "-a"]) + except ProcessExecutionError: + log.info( + "ubuntu-distro-info (from the distro-info package) must be" + " installed to guess Ubuntu os/release" + ) + return out.splitlines() + + +class ImageSpecification: + """A specification of an image to launch for testing. + + If either of ``os`` and ``release`` are not specified, an attempt will be + made to infer the correct values for these on instantiation. + + :param image_id: + The image identifier used by the rest of the codebase to launch this + image. + :param os: + An optional string describing the operating system this image is for + (e.g. "ubuntu", "rhel", "freebsd"). + :param release: + A optional string describing the operating system release (e.g. + "focal", "8"; the exact values here will depend on the OS). + """ + + def __init__( + self, + image_id: str, + os: "Optional[str]" = None, + release: "Optional[str]" = None, + ): + if image_id in _get_ubuntu_series(): + if os is None: + os = "ubuntu" + if release is None: + release = image_id + + self.image_id = image_id + self.os = os + self.release = release + log.info( + "Detected image: image_id=%s os=%s release=%s", + self.image_id, + self.os, + self.release, + ) + + @classmethod + def from_os_image(cls): + """Return an ImageSpecification for integration_settings.OS_IMAGE.""" + parts = integration_settings.OS_IMAGE.split("::", 2) + return cls(*parts) + + class IntegrationCloud(ABC): datasource = None # type: Optional[str] integration_instance_cls = IntegrationInstance @@ -57,13 +116,11 @@ class IntegrationCloud(ABC): raise NotImplementedError def _get_initial_image(self): - _released_image_id = self.settings.OS_IMAGE + image = ImageSpecification.from_os_image() try: - _released_image_id = self.cloud_instance.released_image( - self.settings.OS_IMAGE) + return self.cloud_instance.released_image(image.image_id) except (ValueError, IndexError): - pass - return _released_image_id + return image.image_id def _perform_launch(self, launch_kwargs): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index d7e0fca2..cc545b0f 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -10,12 +10,13 @@ from pathlib import Path from tests.integration_tests import integration_settings from tests.integration_tests.clouds import ( + AzureCloud, Ec2Cloud, GceCloud, - AzureCloud, - OciCloud, + ImageSpecification, LxdContainerCloud, LxdVmCloud, + OciCloud, ) from tests.integration_tests.instances import IntegrationInstance @@ -32,6 +33,7 @@ platforms = { 'lxd_container': LxdContainerCloud, 'lxd_vm': LxdVmCloud, } +os_list = ["ubuntu"] session_start_time = datetime.datetime.now().strftime('%y%m%d%H%M%S') @@ -60,6 +62,12 @@ def pytest_runtest_setup(item): if supported_platforms and current_platform not in supported_platforms: pytest.skip(unsupported_message) + image = ImageSpecification.from_os_image() + current_os = image.os + supported_os_set = set(os_list).intersection(test_marks) + if current_os and supported_os_set and current_os not in supported_os_set: + pytest.skip("Cannot run on OS {}".format(current_os)) + # disable_subp_usage is defined at a higher level, but we don't # want it applied here diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 94d54f74..07a6d541 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -22,8 +22,11 @@ PLATFORM = 'lxd_container' INSTANCE_TYPE = None # Determines the base image to use or generate new images from. -# Can be the name of the OS if running a stock image, -# otherwise the id of the image being used if using a custom image +# +# This can be the name of an Ubuntu release, or in the format +# [::[::]]. If given, os and release should describe +# the image specified by image_id. (Ubuntu releases are converted to this +# format internally; in this case, to "focal::ubuntu::focal".) OS_IMAGE = 'focal' # Populate if you want to use a pre-launched instance instead of diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py index d2bcc61a..28cbe19f 100644 --- a/tests/integration_tests/modules/test_apt_configure_sources_list.py +++ b/tests/integration_tests/modules/test_apt_configure_sources_list.py @@ -40,6 +40,7 @@ EXPECTED_REGEXES = [ @pytest.mark.ci +@pytest.mark.ubuntu class TestAptConfigureSourcesList: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index 8a38ad84..28d741bc 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -26,6 +26,7 @@ package_upgrade: true """ +@pytest.mark.ubuntu @pytest.mark.user_data(USER_DATA) class TestPackageUpdateUpgradeInstall: diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py index b626f6b0..481edbaa 100644 --- a/tests/integration_tests/modules/test_snap.py +++ b/tests/integration_tests/modules/test_snap.py @@ -20,6 +20,7 @@ snap: @pytest.mark.ci +@pytest.mark.ubuntu class TestSnap: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py index 45d37d6c..3db573b5 100644 --- a/tests/integration_tests/modules/test_ssh_import_id.py +++ b/tests/integration_tests/modules/test_ssh_import_id.py @@ -3,6 +3,10 @@ This test specifies ssh keys to be imported by the ``ssh_import_id`` module and then checks that if the ssh keys were successfully imported. +TODO: +* This test assumes that SSH keys will be imported into the /home/ubuntu; this + will need modification to run on other OSes. + (This is ported from ``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)""" @@ -18,6 +22,7 @@ ssh_import_id: @pytest.mark.ci +@pytest.mark.ubuntu class TestSshImportId: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index 6a51f5a6..ee08d87b 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -2,6 +2,10 @@ This test specifies a number of users and groups via user-data, and confirms that they have been configured correctly in the system under test. + +TODO: +* This test assumes that the "ubuntu" user will be created when "default" is + specified; this will need modification to run on other OSes. """ import re @@ -41,6 +45,7 @@ AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestUsersGroups: + @pytest.mark.ubuntu @pytest.mark.parametrize( "getent_args,regex", [ diff --git a/tox.ini b/tox.ini index 022b918d..df1deb6f 100644 --- a/tox.ini +++ b/tox.ini @@ -179,3 +179,4 @@ markers = user_data: the user data to be passed to the test instance instance_name: the name to be used for the test instance sru_2020_11: test is part of the 2020/11 SRU verification + ubuntu: this test should run on Ubuntu -- cgit v1.2.3 From 06f7b4522aaa2f5c7f773f42f6c88aed50bb00d5 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 3 Dec 2020 16:41:46 -0600 Subject: Integration test for pull #586 (#706) If a non-default AuthorizedKeysFile is specified in /etc/ssh/sshd_config, ensure we can still ssh as expected --- tests/integration_tests/assets/__init__.py | 16 ++++++++++ tests/integration_tests/assets/test_id_rsa | 38 +++++++++++++++++++++++ tests/integration_tests/assets/test_id_rsa.pub | 1 + tests/integration_tests/bugs/test_gh586.py | 42 ++++++++++++++++++++++++++ 4 files changed, 97 insertions(+) create mode 100644 tests/integration_tests/assets/__init__.py create mode 100755 tests/integration_tests/assets/test_id_rsa create mode 100644 tests/integration_tests/assets/test_id_rsa.pub create mode 100644 tests/integration_tests/bugs/test_gh586.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/assets/__init__.py b/tests/integration_tests/assets/__init__.py new file mode 100644 index 00000000..0cf27982 --- /dev/null +++ b/tests/integration_tests/assets/__init__.py @@ -0,0 +1,16 @@ +import os +from collections import namedtuple +from pathlib import Path + +ASSET_DIR = Path(os.path.dirname(os.path.realpath(__file__))) +PRIVATE_RSA_KEY_PATH = ASSET_DIR / 'test_id_rsa' +PUBLIC_RSA_KEY_PATH = ASSET_DIR / 'test_id_rsa.pub' + + +def get_test_rsa_keypair(): + with PUBLIC_RSA_KEY_PATH.open() as public_file: + public_key = public_file.read() + with PRIVATE_RSA_KEY_PATH.open() as private_file: + private_key = private_file.read() + KeyPair = namedtuple('KeyPair', 'public_rsa_key private_rsa_key') + return KeyPair(public_key, private_key) diff --git a/tests/integration_tests/assets/test_id_rsa b/tests/integration_tests/assets/test_id_rsa new file mode 100755 index 00000000..1ef67135 --- /dev/null +++ b/tests/integration_tests/assets/test_id_rsa @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAnY0A/9s891CKmS3CBVGgLjdEifgYf8y3Ht+TC3+ajvEIcEoxevgK +t2mXoPw4P/5QrT4QmgG5N2Jr4Q5eNcgZ0zytoAlNxcM/R/soKi46hiLSArMNEiOlVi/vbf +q3Du5XIiX8fZD+4xgq3+jQ3PMxzqBf3AU6RAC9Jj94pBHCbaKPSFfWU7fq/JIvukKIWckv +UCkzNiaAWop3jNSEwFZbqKe2A1yesfgSpCmycggOVN/tMpFeVn8rzzG+LJ47TSoBav1P0F +CegVKq5iyGhLdM4TnS3ajbSIf3+SSDMImyzzmXqPyG5zwcH8zuEI3aR9DTMmi49HnjVX0A +N+61iQf8MEz9nnpUCnaeogiI4zfJQIMKHijGcFYR092BqJUmy50s239nFFBzCFRcmERdyh +fxrrG5kpEPllPGO4AuCtR4aW9yVTqfDNO2dfX5xvF8ZakUCMdR7JomuzW097U8zHE6EOTI +XVXatlI1cHt+KeugVrjzK2YNE1HEeGCt4l7qmpAXAAAFgL+LUj+/i1I/AAAAB3NzaC1yc2 +EAAAGBAJ2NAP/bPPdQipktwgVRoC43RIn4GH/Mtx7fkwt/mo7xCHBKMXr4Crdpl6D8OD/+ +UK0+EJoBuTdia+EOXjXIGdM8raAJTcXDP0f7KCouOoYi0gKzDRIjpVYv7236tw7uVyIl/H +2Q/uMYKt/o0NzzMc6gX9wFOkQAvSY/eKQRwm2ij0hX1lO36vySL7pCiFnJL1ApMzYmgFqK +d4zUhMBWW6intgNcnrH4EqQpsnIIDlTf7TKRXlZ/K88xviyeO00qAWr9T9BQnoFSquYsho +S3TOE50t2o20iH9/kkgzCJss85l6j8huc8HB/M7hCN2kfQ0zJouPR541V9ADfutYkH/DBM +/Z56VAp2nqIIiOM3yUCDCh4oxnBWEdPdgaiVJsudLNt/ZxRQcwhUXJhEXcoX8a6xuZKRD5 +ZTxjuALgrUeGlvclU6nwzTtnX1+cbxfGWpFAjHUeyaJrs1tPe1PMxxOhDkyF1V2rZSNXB7 +finroFa48ytmDRNRxHhgreJe6pqQFwAAAAMBAAEAAAGALgAgfZPGnjMu9ICOuLzXdwb+BQ +aiKJZeFS6UIXRVbUzk+NxAzDWl811qPz/FMLIRXjPT5xN/v7MF6oUmbq+JEssRqrtssMRM +MrkbRg2PWuDJzq32sAgmWx7N2p+sWTivyjGrIgJ22VmSEyRH72s2bK0YsAX6uCY7E/LOR6 +FD0nz3Ntkmo/T8MFiChPCuHQEHxnDxGett6IGrXDwksn/EbV7iXuLpFu9mifX+uxqtDI0B +FZWqJLkm0m0kqKReji4oHIUJXw6yQrVYTy71WZ/VXzJ+B7w/WvFGJjAvZQSrwzElRfH8Th +3knN6csSvC2sGCx5Dqi7fxpL4nZsYdGyB8siIxbvye4DUO8RXMjfJP9R+HMsBd4tSxjzLE +lVNBrClJt2yt1JbuAwzsZpbk5jvJ2PqupqQX+WLrJ+i66nByUDCxtXuHG+e3FnATdcLtSv +QvpuzSN3pfurK/C/J3kf6BTF9avK+bgw+MhbD3Ct4Lqi9s3VSgBQQCp7Hr67JEv4jZAAAA +wGTnfuIb3F3p/NhEudAucWkXEbV9zsnIAtSoguSh7fPC21MGyfx6chsf4mXKHUNfuw8m5W +2+tYa18af7Tp2nHWHPmWbY1z+g/DFCMCPaEbi1zffMlcTw7OZ8eV6D4isD30ul+eVguSYE +Hw+ILgWQXVen0oFtM1FOHiIPtB8CWyaX6njyLRQU9dt3qcWBm/tocy4hTSfheeU21pupEw +6/kfvqkq8J5MYTWF9yE1dNGEuoV2XpidX4ell/oH0qEPy4vQAAAMEAzZlbUVJ8OimOLivJ +J0KYJD/QDPMk1bDsHi3MhBnWYEJ4t78L63aG2CAIy9DazueJH7S7h1N0Wbx5v0gSGA1gb2 +8YaEeUJDrrV7I1IbG9Gu4/Swfm4YAE+1mwK54CE5Nkefd+nz7jxLQSRUNO+AB3ldRpgqXV +CGT+rkhF3wY9D4dpzRl9KQiVKIQLfqyUlO5PHWEGKh3gPRbHUv8GWSA463L3aSl26cnAmv +CE++Ssusa9cIczTO7H+BsFc7MpWJKrAAAAwQDELFCX4ES/0HcVlTH3IjoRtIvPMb6lq545 +hPXwOb9ZpBmrgd5byZ0cEkjgEklNUZ9A9QIxrPNlVsB+z8QU+sV1qsExrPA9Fo8t9vFApi +u6WFR5QbIl9A2fh+2wZR2i0ftsndGdy9zA4LBiBj4EzQYYG7PEzSHIvyGGc0iaG4YCP+D0 +ljbVv47DruKa3RYv8Feuevo2hhRBRLnwah8cqXyehprqwA96WOoRXZVPgz7LQHKKAVYKWY +THmWZfry/xGEUAAAAKamFtZXNAbmV3dAE= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/test_id_rsa.pub b/tests/integration_tests/assets/test_id_rsa.pub new file mode 100644 index 00000000..c14c3ea5 --- /dev/null +++ b/tests/integration_tests/assets/test_id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCdjQD/2zz3UIqZLcIFUaAuN0SJ+Bh/zLce35MLf5qO8QhwSjF6+Aq3aZeg/Dg//lCtPhCaAbk3YmvhDl41yBnTPK2gCU3Fwz9H+ygqLjqGItICsw0SI6VWL+9t+rcO7lciJfx9kP7jGCrf6NDc8zHOoF/cBTpEAL0mP3ikEcJtoo9IV9ZTt+r8ki+6QohZyS9QKTM2JoBaineM1ITAVluop7YDXJ6x+BKkKbJyCA5U3+0ykV5WfyvPMb4snjtNKgFq/U/QUJ6BUqrmLIaEt0zhOdLdqNtIh/f5JIMwibLPOZeo/IbnPBwfzO4QjdpH0NMyaLj0eeNVfQA37rWJB/wwTP2eelQKdp6iCIjjN8lAgwoeKMZwVhHT3YGolSbLnSzbf2cUUHMIVFyYRF3KF/GusbmSkQ+WU8Y7gC4K1Hhpb3JVOp8M07Z19fnG8XxlqRQIx1Hsmia7NbT3tTzMcToQ5MhdVdq2UjVwe34p66BWuPMrZg0TUcR4YK3iXuqakBc= test@example diff --git a/tests/integration_tests/bugs/test_gh586.py b/tests/integration_tests/bugs/test_gh586.py new file mode 100644 index 00000000..44b643f1 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh586.py @@ -0,0 +1,42 @@ +"""Integration test for pull #586 + +If a non-default AuthorizedKeysFile is specified in /etc/ssh/sshd_config, +ensure we can still ssh as expected. +""" +import paramiko +import pytest +from io import StringIO +from tests.integration_tests.assets import get_test_rsa_keypair + + +public_rsa_key, private_rsa_key = get_test_rsa_keypair() +USER_DATA = """\ +#cloud-config +bootcmd: + - sed -i 's/#AuthorizedKeysFile.*/AuthorizedKeysFile\\ .ssh\\/authorized_keys2/' /etc/ssh/sshd_config +ssh_authorized_keys: + - {public_key} +""".format(public_key=public_rsa_key) # noqa: E501 + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(USER_DATA) +def test_non_default_authorized_keys(client): + sshd = client.read_from_file('/etc/ssh/sshd_config') + assert 'AuthorizedKeysFile .ssh/authorized_keys2' in sshd + assert sshd.count('AuthorizedKeysFile') == 1 + + ssh_dir = client.execute('ls /home/ubuntu/.ssh').stdout + assert 'authorized_keys2' in ssh_dir + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + paramiko_key = paramiko.RSAKey.from_private_key(StringIO(private_rsa_key)) + + # Will fail with paramiko.ssh_exception.AuthenticationException + # if this bug isn't fixed + ssh.connect( + client.instance.ip, + username=client.instance.username, + pkey=paramiko_key, + ) -- cgit v1.2.3 From 974145d063afac3934a9b7c506bebf4318f9424d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 4 Dec 2020 10:56:29 -0600 Subject: Add ability to keep snapshotted images in integration tests (#711) For SRU test development, every single time we start a new test run, we need to first install the PROPOSED version and create an image snapshot. Instead of automatically deleting a snapshot, add an integration setting to allow us to keep the snapshot. The end of the test run will log the image name which can then be used as the OS_IMAGE in subsequent test runs. --- tests/integration_tests/clouds.py | 15 ++++++++++----- tests/integration_tests/integration_settings.py | 2 ++ 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 8cdb3fcc..ea42b6d5 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -166,11 +166,16 @@ class IntegrationCloud(ABC): def delete_snapshot(self): if self.snapshot_id: - log.info( - 'Deleting snapshot image created for this testrun: %s', - self.snapshot_id - ) - self.cloud_instance.delete_image(self.snapshot_id) + if self.settings.KEEP_IMAGE: + log.info( + 'NOT deleting snapshot image created for this testrun ' + 'because KEEP_IMAGE is True: %s', self.snapshot_id) + else: + log.info( + 'Deleting snapshot image created for this testrun: %s', + self.snapshot_id + ) + self.cloud_instance.delete_image(self.snapshot_id) class Ec2Cloud(IntegrationCloud): diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 07a6d541..ad6d453a 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -7,6 +7,8 @@ import os # Keep instance (mostly for debugging) when test is finished KEEP_INSTANCE = False +# Keep snapshot image (mostly for debugging) when test is finished +KEEP_IMAGE = False # One of: # lxd_container -- cgit v1.2.3 From aa6350f61d05bd18ecc1d92dd08090c43e4ef927 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 4 Dec 2020 14:55:46 -0600 Subject: Integration test for 570 (#712) Test that we can add optional vendor-data to the seedfrom file in a NoCloud environment. Also added the option to pass raise_on_cloudinit_failure through an instance restart so we get automatic failure checking when we need to manually reboot. --- tests/integration_tests/bugs/test_gh570.py | 38 ++++++++++++++++++++++++++ tests/integration_tests/bugs/test_lp1900837.py | 2 +- tests/integration_tests/instances.py | 6 ++-- 3 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 tests/integration_tests/bugs/test_gh570.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py new file mode 100644 index 00000000..b8866edd --- /dev/null +++ b/tests/integration_tests/bugs/test_gh570.py @@ -0,0 +1,38 @@ +"""Integration test for #570. + +Test that we can add optional vendor-data to the seedfrom file in a +NoCloud environment +""" + +from tests.integration_tests.instances import IntegrationInstance +import pytest + +VENDOR_DATA = """\ +#cloud-config +runcmd: + - touch /var/tmp/seeded_vendordata_test_file +""" + + +# Only running on LXD because we need NoCloud for this test +@pytest.mark.sru_2020_11 +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +def test_nocloud_seedfrom_vendordata(client: IntegrationInstance): + seed_dir = '/var/tmp/test_seed_dir' + result = client.execute( + "mkdir {seed_dir} && " + "touch {seed_dir}/user-data && " + "touch {seed_dir}/meta-data && " + "echo 'seedfrom: {seed_dir}/' > " + "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir) + ) + assert result.return_code == 0 + + client.write_to_file( + '{}/vendor-data'.format(seed_dir), + VENDOR_DATA, + ) + client.execute('cloud-init clean --logs') + client.restart(raise_on_cloudinit_failure=True) + assert 'seeded_vendordata_test_file' in client.execute('ls /var/tmp') diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py index 18b00475..395cace0 100644 --- a/tests/integration_tests/bugs/test_lp1900837.py +++ b/tests/integration_tests/bugs/test_lp1900837.py @@ -22,7 +22,7 @@ class TestLogPermissionsNotResetOnReboot: assert "600" == _get_log_perms(client) # Reboot - client.restart() + client.restart(raise_on_cloudinit_failure=True) # Check that permissions are not reset on reboot assert "600" == _get_log_perms(client) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index c68ee753..f8f98e42 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -35,7 +35,7 @@ class IntegrationInstance: def destroy(self): self.instance.delete() - def restart(self): + def restart(self, raise_on_cloudinit_failure=False): """Restart this instance (via cloud mechanism) and wait for boot. This wraps pycloudlib's `BaseInstance.restart` to pass @@ -44,7 +44,9 @@ class IntegrationInstance: """ self.instance.restart(wait=False) log.info("Instance restarted; waiting for boot") - self.instance.wait(raise_on_cloudinit_failure=False) + self.instance.wait( + raise_on_cloudinit_failure=raise_on_cloudinit_failure + ) def execute(self, command, *, use_sudo=True) -> Result: if self.instance.username == 'root' and use_sudo is False: -- cgit v1.2.3 From 54e202a6480e48dbb8a72004f7a5003f7c4edfae Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 7 Dec 2020 12:02:08 -0600 Subject: Add upgrade integration test (#693) Add an integration test that roughly mimics many of the manual cloud SRU tests. Also refactored some of the image setup code to make it easier to use in non-fixture code. --- tests/integration_tests/conftest.py | 73 +++++++++++++++------------- tests/integration_tests/instances.py | 61 ++++++++++++++++++----- tests/integration_tests/test_upgrade.py | 86 +++++++++++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 45 deletions(-) create mode 100644 tests/integration_tests/test_upgrade.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index cc545b0f..160fc085 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -1,8 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. import datetime import logging -import os import pytest +import os import sys from tarfile import TarFile from contextlib import contextmanager @@ -14,11 +14,15 @@ from tests.integration_tests.clouds import ( Ec2Cloud, GceCloud, ImageSpecification, + IntegrationCloud, LxdContainerCloud, LxdVmCloud, OciCloud, ) -from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.instances import ( + CloudInitSource, + IntegrationInstance, +) log = logging.getLogger('integration_testing') @@ -95,39 +99,42 @@ def session_cloud(): cloud.destroy() -@pytest.fixture(scope='session', autouse=True) -def setup_image(session_cloud): +def get_validated_source( + source=integration_settings.CLOUD_INIT_SOURCE +) -> CloudInitSource: + if source == 'NONE': + return CloudInitSource.NONE + elif source == 'IN_PLACE': + if session_cloud.datasource not in ['lxd_container', 'lxd_vm']: + raise ValueError( + 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') + return CloudInitSource.IN_PLACE + elif source == 'PROPOSED': + return CloudInitSource.PROPOSED + elif source.startswith('ppa:'): + return CloudInitSource.PPA + elif os.path.isfile(str(source)): + return CloudInitSource.DEB_PACKAGE + raise ValueError( + 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(source)) + + +@pytest.fixture(scope='session') +def setup_image(session_cloud: IntegrationCloud): """Setup the target environment with the correct version of cloud-init. So we can launch instances / run tests with the correct image """ - client = None + + source = get_validated_source() + if not source.installs_new_version(): + return log.info('Setting up environment for %s', session_cloud.datasource) - if integration_settings.CLOUD_INIT_SOURCE == 'NONE': - pass # that was easy - elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE': - if session_cloud.datasource not in ['lxd_container', 'lxd_vm']: - raise ValueError( - 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') - # The mount needs to happen after the instance is created, so - # no further action needed here - elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED': - client = session_cloud.launch() - client.install_proposed_image() - elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'): - client = session_cloud.launch() - client.install_ppa(integration_settings.CLOUD_INIT_SOURCE) - elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)): - client = session_cloud.launch() - client.install_deb() - else: - raise ValueError( - 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format( - integration_settings.CLOUD_INIT_SOURCE)) - if client: - # Even if we're keeping instances, we don't want to keep this - # one around as it was just for image creation - client.destroy() + client = session_cloud.launch() + client.install_new_cloud_init(source) + # Even if we're keeping instances, we don't want to keep this + # one around as it was just for image creation + client.destroy() log.info('Done with environment setup') @@ -193,21 +200,21 @@ def _client(request, fixture_utils, session_cloud): @pytest.yield_fixture -def client(request, fixture_utils, session_cloud): +def client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs for every test.""" with _client(request, fixture_utils, session_cloud) as client: yield client @pytest.yield_fixture(scope='module') -def module_client(request, fixture_utils, session_cloud): +def module_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per module.""" with _client(request, fixture_utils, session_cloud) as client: yield client @pytest.yield_fixture(scope='class') -def class_client(request, fixture_utils, session_cloud): +def class_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per class.""" with _client(request, fixture_utils, session_cloud) as client: yield client diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index f8f98e42..4321ce07 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +from enum import Enum import logging import os import uuid @@ -25,6 +26,26 @@ def _get_tmp_path(): return '/var/tmp/{}.tmp'.format(tmp_filename) +class CloudInitSource(Enum): + """Represents the cloud-init image source setting as a defined value. + + Values here represent all possible values for CLOUD_INIT_SOURCE in + tests/integration_tests/integration_settings.py. See that file for an + explanation of these values. If the value set there can't be parsed into + one of these values, an exception will be raised + """ + NONE = 1 + IN_PLACE = 2 + PROPOSED = 3 + PPA = 4 + DEB_PACKAGE = 5 + + def installs_new_version(self): + if self.name in [self.NONE.name, self.IN_PLACE.name]: + return False + return True + + class IntegrationInstance: def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance, settings=integration_settings): @@ -92,16 +113,32 @@ class IntegrationInstance: os.unlink(tmp_file.name) def snapshot(self): - return self.cloud.snapshot(self.instance) - - def _install_new_cloud_init(self, remote_script): - self.execute(remote_script) + image_id = self.cloud.snapshot(self.instance) + log.info('Created new image: %s', image_id) + return image_id + + def install_new_cloud_init( + self, + source: CloudInitSource, + take_snapshot=True + ): + if source == CloudInitSource.DEB_PACKAGE: + self.install_deb() + elif source == CloudInitSource.PPA: + self.install_ppa() + elif source == CloudInitSource.PROPOSED: + self.install_proposed_image() + else: + raise Exception( + "Specified to install {} which isn't supported here".format( + source) + ) version = self.execute('cloud-init -v').split()[-1] log.info('Installed cloud-init version: %s', version) self.instance.clean() - snapshot_id = self.snapshot() - log.info('Created new image: %s', snapshot_id) - self.cloud.snapshot_id = snapshot_id + if take_snapshot: + snapshot_id = self.snapshot() + self.cloud.snapshot_id = snapshot_id def install_proposed_image(self): log.info('Installing proposed image') @@ -112,16 +149,16 @@ class IntegrationInstance: 'apt-get update -q\n' 'apt-get install -qy cloud-init' ) - self._install_new_cloud_init(remote_script) + self.execute(remote_script) - def install_ppa(self, repo): + def install_ppa(self): log.info('Installing PPA') remote_script = ( 'add-apt-repository {repo} -y && ' 'apt-get update -q && ' 'apt-get install -qy cloud-init' - ).format(repo=repo) - self._install_new_cloud_init(remote_script) + ).format(repo=self.settings.CLOUD_INIT_SOURCE) + self.execute(remote_script) def install_deb(self): log.info('Installing deb package') @@ -132,7 +169,7 @@ class IntegrationInstance: local_path=integration_settings.CLOUD_INIT_SOURCE, remote_path=remote_path) remote_script = 'dpkg -i {path}'.format(path=remote_path) - self._install_new_cloud_init(remote_script) + self.execute(remote_script) def __enter__(self): return self diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py new file mode 100644 index 00000000..78fbc992 --- /dev/null +++ b/tests/integration_tests/test_upgrade.py @@ -0,0 +1,86 @@ +import logging +import pytest +from pathlib import Path + +from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud +from tests.integration_tests.conftest import ( + get_validated_source, + session_start_time, +) + +log = logging.getLogger('integration_testing') + +USER_DATA = """\ +#cloud-config +hostname: SRU-worked +""" + + +def _output_to_compare(instance, file_path, netcfg_path): + commands = [ + 'hostname', + 'dpkg-query --show cloud-init', + 'cat /run/cloud-init/result.json', + # 'cloud-init init' helps us understand if our pickling upgrade paths + # have broken across re-constitution of a cached datasource. Some + # platforms invalidate their datasource cache on reboot, so we run + # it here to ensure we get a dirty run. + 'cloud-init init' + 'grep Trace /var/log/cloud-init.log', + 'cloud-id' + 'cat {}'.format(netcfg_path), + 'systemd-analyze', + 'systemd-analyze blame', + 'cloud-init analyze show', + 'cloud-init analyze blame', + ] + with file_path.open('w') as f: + for command in commands: + f.write('===== {} ====='.format(command) + '\n') + f.write(instance.execute(command) + '\n') + + +@pytest.mark.sru_2020_11 +def test_upgrade(session_cloud: IntegrationCloud): + source = get_validated_source() + if not source.installs_new_version(): + pytest.skip("Install method '{}' not supported for this test".format( + source + )) + return # type checking doesn't understand that skip raises + + launch_kwargs = { + 'name': 'integration-upgrade-test', + 'image_id': session_cloud._get_initial_image(), + 'wait': True, + } + + image = ImageSpecification.from_os_image() + + # Get the paths to write test logs + output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH) + output_dir.mkdir(parents=True, exist_ok=True) + base_filename = 'test_upgrade_{os}_{{stage}}_{time}.log'.format( + os=image.release, + time=session_start_time, + ) + before_path = output_dir / base_filename.format(stage='before') + after_path = output_dir / base_filename.format(stage='after') + + # Get the network cfg file + netcfg_path = '/dev/null' + if image.os == 'ubuntu': + netcfg_path = '/etc/netplan/50-cloud-init.yaml' + if image.release == 'xenial': + netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg' + + with session_cloud.launch( + launch_kwargs=launch_kwargs, user_data=USER_DATA + ) as instance: + _output_to_compare(instance, before_path, netcfg_path) + instance.install_new_cloud_init(source, take_snapshot=False) + instance.execute('hostname something-else') + instance.restart(raise_on_cloudinit_failure=True) + _output_to_compare(instance, after_path, netcfg_path) + + log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) -- cgit v1.2.3 From 17ca02e10623b12065532b26de9cefcccee0062c Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 9 Dec 2020 15:49:11 -0600 Subject: Add integration test for power_state_change module (#717) Also introduce the `unstable` mark, to allow us to land tests which run inconsistently (such as this one). --- tests/integration_tests/clouds.py | 15 ++-- tests/integration_tests/conftest.py | 4 +- tests/integration_tests/integration_settings.py | 2 + tests/integration_tests/log_utils.py | 13 ++++ .../modules/test_power_state_change.py | 91 ++++++++++++++++++++++ tox.ini | 1 + 6 files changed, 119 insertions(+), 7 deletions(-) create mode 100644 tests/integration_tests/log_utils.py create mode 100644 tests/integration_tests/modules/test_power_state_change.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index ea42b6d5..9f6a6380 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -124,11 +124,12 @@ class IntegrationCloud(ABC): def _perform_launch(self, launch_kwargs): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) - pycloudlib_instance.wait(raise_on_cloudinit_failure=False) return pycloudlib_instance - def launch(self, user_data=None, launch_kwargs=None, + def launch(self, user_data=None, launch_kwargs=None, wait=True, settings=integration_settings): + if launch_kwargs is None: + launch_kwargs = {} if self.settings.EXISTING_INSTANCE_ID: log.info( 'Not launching instance due to EXISTING_INSTANCE_ID. ' @@ -137,13 +138,15 @@ class IntegrationCloud(ABC): self.settings.EXISTING_INSTANCE_ID ) return + if 'wait' in launch_kwargs: + raise Exception("Specify 'wait' directly to launch, " + "not in 'launch_kwargs'") kwargs = { 'image_id': self.image_id, 'user_data': user_data, 'wait': False, } - if launch_kwargs: - kwargs.update(launch_kwargs) + kwargs.update(launch_kwargs) log.info( "Launching instance with launch_kwargs:\n{}".format( "\n".join("{}={}".format(*item) for item in kwargs.items()) @@ -151,7 +154,8 @@ class IntegrationCloud(ABC): ) pycloudlib_instance = self._perform_launch(kwargs) - + if wait: + pycloudlib_instance.wait(raise_on_cloudinit_failure=False) log.info('Launched instance: %s', pycloudlib_instance) return self.get_instance(pycloudlib_instance, settings) @@ -275,7 +279,6 @@ class _LxdIntegrationCloud(IntegrationCloud): if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE': self._mount_source(pycloudlib_instance) pycloudlib_instance.start(wait=False) - pycloudlib_instance.wait(raise_on_cloudinit_failure=False) return pycloudlib_instance diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 160fc085..53ca5fb5 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -71,6 +71,8 @@ def pytest_runtest_setup(item): supported_os_set = set(os_list).intersection(test_marks) if current_os and supported_os_set and current_os not in supported_os_set: pytest.skip("Cannot run on OS {}".format(current_os)) + if 'unstable' in test_marks and not integration_settings.RUN_UNSTABLE: + pytest.skip('Test marked unstable. Manually remove mark to run it') # disable_subp_usage is defined at a higher level, but we don't @@ -176,7 +178,7 @@ def _collect_logs(instance: IntegrationInstance, node_id: str, @contextmanager -def _client(request, fixture_utils, session_cloud): +def _client(request, fixture_utils, session_cloud: IntegrationCloud): """Fixture implementation for the client fixtures. Launch the dynamic IntegrationClient instance using any provided diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index ad6d453a..9948d479 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -9,6 +9,8 @@ import os KEEP_INSTANCE = False # Keep snapshot image (mostly for debugging) when test is finished KEEP_IMAGE = False +# Run tests marked as unstable. Expect failures and dragons. +RUN_UNSTABLE = False # One of: # lxd_container diff --git a/tests/integration_tests/log_utils.py b/tests/integration_tests/log_utils.py new file mode 100644 index 00000000..fa807389 --- /dev/null +++ b/tests/integration_tests/log_utils.py @@ -0,0 +1,13 @@ +def ordered_items_in_text(to_verify: list, text: str) -> bool: + """Return if all items in list appear in order in text. + + Examples: + ordered_items_in_text(['a', '1'], 'ab1') # Returns True + ordered_items_in_text(['1', 'a'], 'ab1') # Returns False + """ + index = 0 + for item in to_verify: + index = text[index:].find(item) + if index < 0: + return False + return True diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py new file mode 100644 index 00000000..60e0e583 --- /dev/null +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -0,0 +1,91 @@ +"""Integration test of the cc_power_state_change module. + +Test that the power state config options work as expected. +""" + +import time + +import pytest + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.log_utils import ordered_items_in_text + +USER_DATA = """\ +#cloud-config +power_state: + delay: {delay} + mode: {mode} + message: msg + timeout: {timeout} + condition: {condition} +""" + + +def _detect_reboot(instance: IntegrationInstance): + # We'll wait for instance up here, but we don't know if we're + # detecting the first boot or second boot, so we also check + # the logs to ensure we've booted twice. If the logs show we've + # only booted once, wait until we've booted twice + instance.instance.wait(raise_on_cloudinit_failure=False) + for _ in range(600): + try: + log = instance.read_from_file('/var/log/cloud-init.log') + boot_count = log.count("running 'init-local'") + if boot_count == 1: + instance.instance.wait(raise_on_cloudinit_failure=False) + elif boot_count > 1: + break + except Exception: + pass + time.sleep(1) + else: + raise Exception('Could not detect reboot') + + +def _can_connect(instance): + return instance.execute('true').ok + + +# This test is marked unstable because even though it should be able to +# run anywhere, I can only get it to run in an lxd container, and even then +# occasionally some timing issues will crop up. +@pytest.mark.unstable +@pytest.mark.sru_2020_11 +@pytest.mark.ubuntu +@pytest.mark.lxd_container +class TestPowerChange: + @pytest.mark.parametrize('mode,delay,timeout,expected', [ + ('poweroff', 'now', '10', 'will execute: shutdown -P now msg'), + ('reboot', 'now', '0', 'will execute: shutdown -r now msg'), + ('halt', '+1', '0', 'will execute: shutdown -H +1 msg'), + ]) + def test_poweroff(self, session_cloud: IntegrationCloud, + mode, delay, timeout, expected): + with session_cloud.launch( + user_data=USER_DATA.format( + delay=delay, mode=mode, timeout=timeout, condition='true'), + wait=False + ) as instance: + if mode == 'reboot': + _detect_reboot(instance) + else: + instance.instance.wait_for_stop() + instance.instance.start(wait=True) + log = instance.read_from_file('/var/log/cloud-init.log') + assert _can_connect(instance) + lines_to_check = [ + 'Running module power-state-change', + expected, + "running 'init-local'", + 'config-power-state-change already ran', + ] + assert ordered_items_in_text(lines_to_check, log), ( + 'Expected data not in logs') + + @pytest.mark.user_data(USER_DATA.format(delay='0', mode='poweroff', + timeout='0', condition='false')) + def test_poweroff_false_condition(self, client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert _can_connect(client) + assert 'Condition was false. Will not perform state change' in log diff --git a/tox.ini b/tox.ini index df1deb6f..1841247b 100644 --- a/tox.ini +++ b/tox.ini @@ -180,3 +180,4 @@ markers = instance_name: the name to be used for the test instance sru_2020_11: test is part of the 2020/11 SRU verification ubuntu: this test should run on Ubuntu + unstable: skip this test because it is flakey -- cgit v1.2.3 From 05216aa37e1fad32433dbea102dc3ceae7d8565d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 9 Dec 2020 18:11:18 -0500 Subject: integration_tests: add test for LP: #1898997 (#713) integration_tests: add test for LP: #1898997 This introduces the `lxd_config_dict` mark, used to specify a free-form configuration dict to LXD for tests which only run there; and the `not_xenial` and `not_bionic` marks, used to skip tests on xenial/bionic via a basic release skipping mechanism. This also bumps the pycloudlib commit we depend upon, as the latest commit includes the changes required for LXD network config to work. (The `lxd_config_dict` change further complicated `_client`, so a minor refactoring is applied.) --- integration-requirements.txt | 2 +- tests/integration_tests/bugs/test_lp1898997.py | 71 ++++++++++++++++++++++++++ tests/integration_tests/conftest.py | 23 +++++++-- tox.ini | 3 ++ 4 files changed, 93 insertions(+), 6 deletions(-) create mode 100644 tests/integration_tests/bugs/test_lp1898997.py (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index 3648a0f1..b7e22ce7 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@4b8d2cd5ac6316810ce16d081842da575625ca4f +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@324763289c9a38fc9ac7ec524acb9eb11c7d6c13 pytest diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py new file mode 100644 index 00000000..54c88d82 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -0,0 +1,71 @@ +"""Integration test for LP: #1898997 + +cloud-init was incorrectly excluding Open vSwitch bridge members from its list +of interfaces. This meant that instances which had only one interface which +was in an Open vSwitch bridge would not boot correctly: cloud-init would not +find the expected physical interfaces, so would not apply network config. + +This test checks that cloud-init believes it has successfully applied the +network configuration, and confirms that the bridge can be used to ping the +default gateway. +""" +import pytest + +MAC_ADDRESS = "de:ad:be:ef:12:34" + + +NETWORK_CONFIG = """\ +bridges: + ovs-br: + dhcp4: true + interfaces: + - enp5s0 + macaddress: 52:54:00:d9:08:1c + mtu: 1500 + openvswitch: {{}} +ethernets: + enp5s0: + mtu: 1500 + set-name: enp5s0 + match: + macaddress: {} +version: 2 +""".format(MAC_ADDRESS) + + +@pytest.mark.lxd_config_dict({ + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, +}) +@pytest.mark.lxd_vm +@pytest.mark.not_bionic +@pytest.mark.not_xenial +@pytest.mark.sru_2020_11 +@pytest.mark.ubuntu +class TestInterfaceListingWithOpenvSwitch: + def test_ovs_member_interfaces_not_excluded(self, client): + # We need to install openvswitch for our provided network configuration + # to apply (on next boot), so DHCP on our default interface to fetch it + client.execute("dhclient enp5s0") + client.execute("apt update -qqy") + client.execute("apt-get install -qqy openvswitch-switch") + + # Now our networking config should successfully apply on a clean reboot + client.execute("cloud-init clean --logs") + client.restart() + + cloudinit_output = client.read_from_file("/var/log/cloud-init.log") + + # Confirm that the network configuration was applied successfully + assert "WARN" not in cloudinit_output + # Confirm that the applied network config created the OVS bridge + assert "ovs-br" in client.execute("ip addr") + + # Test that we can ping our gateway using our bridge + gateway = client.execute( + "ip -4 route show default | awk '{ print $3 }'" + ) + ping_result = client.execute( + "ping -c 1 -W 1 -I ovs-br {}".format(gateway) + ) + assert ping_result.ok diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 53ca5fb5..50b300c4 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import datetime +import functools import logging import pytest import os @@ -18,6 +19,7 @@ from tests.integration_tests.clouds import ( LxdContainerCloud, LxdVmCloud, OciCloud, + _LxdIntegrationCloud, ) from tests.integration_tests.instances import ( CloudInitSource, @@ -74,6 +76,10 @@ def pytest_runtest_setup(item): if 'unstable' in test_marks and not integration_settings.RUN_UNSTABLE: pytest.skip('Test marked unstable. Manually remove mark to run it') + current_release = image.release + if "not_{}".format(current_release) in test_marks: + pytest.skip("Cannot run on release {}".format(current_release)) + # disable_subp_usage is defined at a higher level, but we don't # want it applied here @@ -184,14 +190,21 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): Launch the dynamic IntegrationClient instance using any provided userdata, yield to the test, then cleanup """ - user_data = fixture_utils.closest_marker_first_arg_or( - request, 'user_data', None) - name = fixture_utils.closest_marker_first_arg_or( - request, 'instance_name', None + getter = functools.partial( + fixture_utils.closest_marker_first_arg_or, request, default=None ) + user_data = getter('user_data') + name = getter('instance_name') + lxd_config_dict = getter('lxd_config_dict') + launch_kwargs = {} if name is not None: - launch_kwargs = {"name": name} + launch_kwargs["name"] = name + if lxd_config_dict is not None: + if not isinstance(session_cloud, _LxdIntegrationCloud): + pytest.skip("lxd_config_dict requires LXD") + launch_kwargs["config_dict"] = lxd_config_dict + with session_cloud.launch( user_data=user_data, launch_kwargs=launch_kwargs ) as instance: diff --git a/tox.ini b/tox.ini index 1841247b..2e242551 100644 --- a/tox.ini +++ b/tox.ini @@ -173,8 +173,11 @@ markers = gce: test will only run on GCE platform azure: test will only run on Azure platform oci: test will only run on OCI platform + lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container lxd_vm: test will only run in LXD VM + not_xenial: test cannot run on the xenial release + not_bionic: test cannot run on the bionic release no_container: test cannot run in a container user_data: the user data to be passed to the test instance instance_name: the name to be used for the test instance -- cgit v1.2.3 From 97bfd21d2dca978a662c87f0255a2a2bb97fc5d6 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 10 Dec 2020 10:58:12 -0600 Subject: Integration test for LP: #1813396 and #669 (#719) Ensure gpg is called with --no-tty flag. Also, refactored the "ordered_items_in_text" to assert if the line is missing and provide a more useful error message. --- tests/integration_tests/bugs/test_lp1813396.py | 34 ++++++++++++++++++++++ tests/integration_tests/log_utils.py | 12 ++++---- .../modules/test_power_state_change.py | 5 ++-- 3 files changed, 41 insertions(+), 10 deletions(-) create mode 100644 tests/integration_tests/bugs/test_lp1813396.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py new file mode 100644 index 00000000..7ad0e809 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1813396.py @@ -0,0 +1,34 @@ +"""Integration test for lp-1813396 + +Ensure gpg is called with no tty flag. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.log_utils import verify_ordered_items_in_text + + +USER_DATA = """\ +#cloud-config +apt: + sources: + cloudinit: + source: 'deb [arch=amd64] http://ppa.launchpad.net/cloud-init-dev/daily/ubuntu focal main' + keyserver: keyserver.ubuntu.com + keyid: E4D304DF +""" # noqa: E501 + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(USER_DATA) +def test_gpg_no_tty(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + to_verify = [ + "Running command ['gpg', '--no-tty', " + "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] " + "with allowed return codes [0] (shell=False, capture=True)", + "Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'", + "finish: modules-config/config-apt-configure: SUCCESS", + ] + verify_ordered_items_in_text(to_verify, log) diff --git a/tests/integration_tests/log_utils.py b/tests/integration_tests/log_utils.py index fa807389..40baae7b 100644 --- a/tests/integration_tests/log_utils.py +++ b/tests/integration_tests/log_utils.py @@ -1,13 +1,11 @@ -def ordered_items_in_text(to_verify: list, text: str) -> bool: - """Return if all items in list appear in order in text. +def verify_ordered_items_in_text(to_verify: list, text: str): + """Assert all items in list appear in order in text. Examples: - ordered_items_in_text(['a', '1'], 'ab1') # Returns True - ordered_items_in_text(['1', 'a'], 'ab1') # Returns False + verify_ordered_items_in_text(['a', '1'], 'ab1') # passes + verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError """ index = 0 for item in to_verify: index = text[index:].find(item) - if index < 0: - return False - return True + assert index > -1, "Expected item not found: '{}'".format(item) diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py index 60e0e583..844dccfa 100644 --- a/tests/integration_tests/modules/test_power_state_change.py +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -9,7 +9,7 @@ import pytest from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.instances import IntegrationInstance -from tests.integration_tests.log_utils import ordered_items_in_text +from tests.integration_tests.log_utils import verify_ordered_items_in_text USER_DATA = """\ #cloud-config @@ -80,8 +80,7 @@ class TestPowerChange: "running 'init-local'", 'config-power-state-change already ran', ] - assert ordered_items_in_text(lines_to_check, log), ( - 'Expected data not in logs') + verify_ordered_items_in_text(lines_to_check, log) @pytest.mark.user_data(USER_DATA.format(delay='0', mode='poweroff', timeout='0', condition='false')) -- cgit v1.2.3 From 8321f0c00dc72cbe0ea40cda7b2843b9d2270b11 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 11 Dec 2020 11:58:48 -0600 Subject: Integration test for gh-671 (#724) Verify that on Azure that if a default user and password are specified through the Azure API that a change in the default password overwrites the old password --- integration-requirements.txt | 2 +- tests/integration_tests/bugs/test_gh671.py | 55 ++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 tests/integration_tests/bugs/test_gh671.py (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index 82c77111..ec765763 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@d1c63026c76ed3ef02dc21982f9f7eeea2606f2a +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@72e800b8e99c5b735348c4778f19f92cf6c63de0 pytest diff --git a/tests/integration_tests/bugs/test_gh671.py b/tests/integration_tests/bugs/test_gh671.py new file mode 100644 index 00000000..5e90cdda --- /dev/null +++ b/tests/integration_tests/bugs/test_gh671.py @@ -0,0 +1,55 @@ +"""Integration test for gh-671. + +Verify that on Azure that if a default user and password are specified +through the Azure API that a change in the default password overwrites +the old password +""" + +import crypt + +import pytest + +from tests.integration_tests.clouds import IntegrationCloud + +OLD_PASSWORD = 'DoIM33tTheComplexityRequirements!??' +NEW_PASSWORD = 'DoIM33tTheComplexityRequirementsNow!??' + + +def _check_password(instance, unhashed_password): + shadow_password = instance.execute('getent shadow ubuntu').split(':')[1] + salt = shadow_password.rsplit('$', 1)[0] + hashed_password = crypt.crypt(unhashed_password, salt) + assert shadow_password == hashed_password + + +@pytest.mark.azure +@pytest.mark.sru_2020_11 +def test_update_default_password(setup_image, session_cloud: IntegrationCloud): + os_profile = { + 'os_profile': { + 'admin_password': '', + 'linux_configuration': { + 'disable_password_authentication': False + } + } + } + os_profile['os_profile']['admin_password'] = OLD_PASSWORD + instance1 = session_cloud.launch(launch_kwargs={'vm_params': os_profile}) + + _check_password(instance1, OLD_PASSWORD) + + snapshot_id = instance1.cloud.cloud_instance.snapshot( + instance1.instance, + delete_provisioned_user=False + ) + + os_profile['os_profile']['admin_password'] = NEW_PASSWORD + try: + with session_cloud.launch(launch_kwargs={ + 'image_id': snapshot_id, + 'vm_params': os_profile, + }) as instance2: + _check_password(instance2, NEW_PASSWORD) + finally: + session_cloud.cloud_instance.delete_image(snapshot_id) + instance1.destroy() -- cgit v1.2.3 From 9ea845dbdde81d773cea76bd47ff566470366f38 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 11 Dec 2020 13:51:48 -0600 Subject: Integration test for gh-632. (#725) Verify that if cloud-init is using DataSourceRbxCloud, there is no traceback if the metadata disk cannot be found. --- tests/integration_tests/bugs/test_gh632.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 tests/integration_tests/bugs/test_gh632.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py new file mode 100644 index 00000000..cf166356 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh632.py @@ -0,0 +1,30 @@ +"""Integration test for gh-632. + +Verify that if cloud-init is using DataSourceRbxCloud, there is +no traceback if the metadata disk cannot be found. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +@pytest.mark.sru_2020_11 +def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): + client.write_to_file( + '/etc/cloud/cloud.cfg.d/90_dpkg.cfg', + 'datasource_list: [ RbxCloud, NoCloud ]\n', + ) + client.write_to_file( + '/etc/cloud/ds-identify.cfg', + 'policy: enabled\n', + ) + client.execute('cloud-init clean --logs') + client.restart() + + log = client.read_from_file('/var/log/cloud-init.log') + assert 'WARNING' not in log + assert 'Traceback' not in log + assert 'Failed to load metadata and userdata' not in log + assert ("Getting data from failed") not in log -- cgit v1.2.3 From 3339a5a4680e79fecb09907089239e6749e08a54 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 15 Dec 2020 11:21:31 -0500 Subject: integration_tests: port lxd_bridge test from cloud_tests (#718) --- tests/integration_tests/modules/test_lxd_bridge.py | 46 ++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tests/integration_tests/modules/test_lxd_bridge.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py new file mode 100644 index 00000000..f68c6f2d --- /dev/null +++ b/tests/integration_tests/modules/test_lxd_bridge.py @@ -0,0 +1,46 @@ +"""Integration tests for LXD bridge creation. + +(This is ported from +``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.) +""" +import pytest +import yaml + + +USER_DATA = """\ +#cloud-config +lxd: + init: + storage_backend: dir + bridge: + mode: new + name: lxdbr0 + ipv4_address: 10.100.100.1 + ipv4_netmask: 24 + ipv4_dhcp_first: 10.100.100.100 + ipv4_dhcp_last: 10.100.100.200 + ipv4_nat: true + domain: lxd +""" + + +@pytest.mark.user_data(USER_DATA) +class TestLxdBridge: + + @pytest.mark.parametrize("binary_name", ["lxc", "lxd"]) + def test_binaries_installed(self, class_client, binary_name): + """Check that the expected LXD binaries are installed""" + assert class_client.execute(["which", binary_name]).ok + + @pytest.mark.sru_2020_11 + def test_bridge(self, class_client): + """Check that the given bridge is configured""" + cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log") + assert "WARN" not in cloud_init_log + + # The bridge should exist + assert class_client.execute("ip addr show lxdbr0") + + raw_network_config = class_client.execute("lxc network show lxdbr0") + network_config = yaml.safe_load(raw_network_config) + assert "10.100.100.1/24" == network_config["config"]["ipv4.address"] -- cgit v1.2.3 From 2af3f6d7247d0ef0dd3d4c287c8d8e71fda5173a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 15 Dec 2020 10:31:27 -0600 Subject: Ensure overriding test vars with env vars works for booleans (#727) --- tests/integration_tests/integration_settings.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 9948d479..d7e02f04 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -1,6 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +from distutils.util import strtobool + ################################################################## # LAUNCH SETTINGS ################################################################## @@ -109,6 +111,12 @@ except ImportError: # Perhaps a bit too hacky, but it works :) current_settings = [var for var in locals() if var.isupper()] for setting in current_settings: - globals()[setting] = os.getenv( + env_setting = os.getenv( 'CLOUD_INIT_{}'.format(setting), globals()[setting] ) + if isinstance(env_setting, str): + try: + env_setting = bool(strtobool(env_setting.strip())) + except ValueError: + pass + globals()[setting] = env_setting -- cgit v1.2.3 From ca49e27b21ccc65ec20a10fd8e085bec36d5bf3d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 15 Dec 2020 13:11:48 -0600 Subject: Some test_upgrade fixes (#726) - workaround pad.lv/1908287 for restarting instances - move wait param from launch_kwargs to launch call - remove name param as it's not universally supported - add platform to log names --- tests/integration_tests/test_upgrade.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 78fbc992..76ad055b 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -1,5 +1,6 @@ import logging import pytest +import time from pathlib import Path from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud @@ -40,6 +41,19 @@ def _output_to_compare(instance, file_path, netcfg_path): f.write(instance.execute(command) + '\n') +def _restart(instance): + # work around pad.lv/1908287 + try: + instance.restart(raise_on_cloudinit_failure=True) + except OSError as e: + for _ in range(10): + time.sleep(5) + result = instance.execute('cloud-init status --wait --long') + if result.ok: + return + raise e + + @pytest.mark.sru_2020_11 def test_upgrade(session_cloud: IntegrationCloud): source = get_validated_source() @@ -50,9 +64,7 @@ def test_upgrade(session_cloud: IntegrationCloud): return # type checking doesn't understand that skip raises launch_kwargs = { - 'name': 'integration-upgrade-test', 'image_id': session_cloud._get_initial_image(), - 'wait': True, } image = ImageSpecification.from_os_image() @@ -60,7 +72,8 @@ def test_upgrade(session_cloud: IntegrationCloud): # Get the paths to write test logs output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH) output_dir.mkdir(parents=True, exist_ok=True) - base_filename = 'test_upgrade_{os}_{{stage}}_{time}.log'.format( + base_filename = 'test_upgrade_{platform}_{os}_{{stage}}_{time}.log'.format( + platform=session_cloud.settings.PLATFORM, os=image.release, time=session_start_time, ) @@ -75,12 +88,12 @@ def test_upgrade(session_cloud: IntegrationCloud): netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg' with session_cloud.launch( - launch_kwargs=launch_kwargs, user_data=USER_DATA + launch_kwargs=launch_kwargs, user_data=USER_DATA, wait=True, ) as instance: _output_to_compare(instance, before_path, netcfg_path) instance.install_new_cloud_init(source, take_snapshot=False) instance.execute('hostname something-else') - instance.restart(raise_on_cloudinit_failure=True) + _restart(instance) _output_to_compare(instance, after_path, netcfg_path) log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) -- cgit v1.2.3 From 2022bc72829869939fb0b75dea5e96da81f29bfc Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 15 Dec 2020 13:40:25 -0600 Subject: Integration test for gh-626 (#728) Ensure if wakeonlan is specified in the network config that it is rendered in the /etc/network/interfaces or netplan config. --- tests/integration_tests/bugs/test_gh626.py | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 tests/integration_tests/bugs/test_gh626.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py new file mode 100644 index 00000000..2d336462 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh626.py @@ -0,0 +1,42 @@ +"""Integration test for gh-626. + +Ensure if wakeonlan is specified in the network config that it is rendered +in the /etc/network/interfaces or netplan config. +""" + +import pytest +import yaml + +from tests.integration_tests.clouds import ImageSpecification +from tests.integration_tests.instances import IntegrationInstance + + +NETWORK_CONFIG = """\ +version: 2 +ethernets: + eth0: + dhcp4: true + wakeonlan: true +""" + +EXPECTED_ENI_END = """\ +iface eth0 inet dhcp + ethernet-wol g""" + + +@pytest.mark.sru_2020_11 +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.lxd_config_dict({ + 'user.network-config': NETWORK_CONFIG +}) +def test_wakeonlan(client: IntegrationInstance): + if ImageSpecification.from_os_image().release == 'xenial': + eni = client.execute('cat /etc/network/interfaces.d/50-cloud-init.cfg') + assert eni.endswith(EXPECTED_ENI_END) + return + + netplan_cfg = client.execute('cat /etc/netplan/50-cloud-init.yaml') + netplan_yaml = yaml.safe_load(netplan_cfg) + assert 'wakeonlan' in netplan_yaml['network']['ethernets']['eth0'] + assert netplan_yaml['network']['ethernets']['eth0']['wakeonlan'] is True -- cgit v1.2.3 From 9e89ca7063e121065d9af5d0d6dbb42e8cccf2e3 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 15 Dec 2020 14:06:20 -0600 Subject: Add integration tests for CLI functionality (#729) This currently covers functionality added in #575 --- tests/integration_tests/modules/test_cli.py | 45 +++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 tests/integration_tests/modules/test_cli.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py new file mode 100644 index 00000000..3f41b34d --- /dev/null +++ b/tests/integration_tests/modules/test_cli.py @@ -0,0 +1,45 @@ +"""Integration tests for CLI functionality + +These would be for behavior manually invoked by user from the command line +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +VALID_USER_DATA = """\ +#cloud-config +runcmd: + - echo 'hi' > /var/tmp/test +""" + +INVALID_USER_DATA = """\ +runcmd: + - echo 'hi' > /var/tmp/test +""" + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(VALID_USER_DATA) +def test_valid_userdata(client: IntegrationInstance): + """Test `cloud-init devel schema` with valid userdata. + + PR #575 + """ + result = client.execute('cloud-init devel schema --system') + assert result.ok + assert 'Valid cloud-config: system userdata' == result.stdout.strip() + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(INVALID_USER_DATA) +def test_invalid_userdata(client: IntegrationInstance): + """Test `cloud-init devel schema` with invalid userdata. + + PR #575 + """ + result = client.execute('cloud-init devel schema --system') + assert not result.ok + assert 'Cloud config schema errors' in result.stderr + assert 'needs to begin with "#cloud-config"' in result.stderr -- cgit v1.2.3 From eadadaa17b1c4484cce1392fee20134c2b4d1d1a Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 15 Dec 2020 16:42:03 -0500 Subject: integration_tests: restrict test_lxd_bridge appropriately (#730) On xenial, the bridge test fails because xenial's LXD doesn't include the `network` subcommand. On bionic, the bridge test fails within containers, because LXD isn't able to manipulate the host kernel as it expects. (focal and later do run successfully in containers, but we don't have a good way of expressing that presently.) --- tests/integration_tests/modules/test_lxd_bridge.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py index f68c6f2d..cbf11179 100644 --- a/tests/integration_tests/modules/test_lxd_bridge.py +++ b/tests/integration_tests/modules/test_lxd_bridge.py @@ -24,6 +24,7 @@ lxd: """ +@pytest.mark.no_container @pytest.mark.user_data(USER_DATA) class TestLxdBridge: @@ -32,6 +33,7 @@ class TestLxdBridge: """Check that the expected LXD binaries are installed""" assert class_client.execute(["which", binary_name]).ok + @pytest.mark.not_xenial @pytest.mark.sru_2020_11 def test_bridge(self, class_client): """Check that the given bridge is configured""" -- cgit v1.2.3 From 0b932e55aa32e417b3ad5f2101acf6964108f0e0 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 17 Dec 2020 12:32:06 -0500 Subject: integration_tests: fix IN_PLACE CLOUD_INIT_SOURCE (#731) This fixes up an issue introduced in 54e202a6480e48dbb8a72004f7a5003f7c4edfae. --- tests/integration_tests/conftest.py | 3 ++- tests/integration_tests/test_upgrade.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 50b300c4..2d12045c 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -108,6 +108,7 @@ def session_cloud(): def get_validated_source( + session_cloud: IntegrationCloud, source=integration_settings.CLOUD_INIT_SOURCE ) -> CloudInitSource: if source == 'NONE': @@ -134,7 +135,7 @@ def setup_image(session_cloud: IntegrationCloud): So we can launch instances / run tests with the correct image """ - source = get_validated_source() + source = get_validated_source(session_cloud) if not source.installs_new_version(): return log.info('Setting up environment for %s', session_cloud.datasource) diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 76ad055b..a115bea7 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -56,7 +56,7 @@ def _restart(instance): @pytest.mark.sru_2020_11 def test_upgrade(session_cloud: IntegrationCloud): - source = get_validated_source() + source = get_validated_source(session_cloud) if not source.installs_new_version(): pytest.skip("Install method '{}' not supported for this test".format( source -- cgit v1.2.3 From f38ba3cafcc838b81d45238603089340e016d353 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 18 Dec 2020 12:13:25 -0500 Subject: integration_tests: log the path we collect logs into (#733) This makes it easier to find the failure logs when you're running a bunch of similar tests in parallel. --- tests/integration_tests/conftest.py | 1 + 1 file changed, 1 insertion(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 2d12045c..68d506bd 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -174,6 +174,7 @@ def _collect_logs(instance: IntegrationInstance, node_id: str, log_dir = Path( integration_settings.LOCAL_LOG_PATH ) / session_start_time / node_id_path + log.info("Writing logs to %s", log_dir) if not log_dir.exists(): log_dir.mkdir(parents=True) tarball_path = log_dir / 'cloud-init.tar.gz' -- cgit v1.2.3 From b944cbe069703dffbe7b554473a0d8e0adf20eac Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 18 Dec 2020 12:51:22 -0500 Subject: integration_tests: set log-cli-level to INFO by default (#737) This gives us more detailed integration testing output by default. This will make debugging failures reported by users/developers easier to debug, as it removes the need for an initial round-trip to get the output we need for debugging. It will also make debugging intermittent failures easier: there will definitely be log output from runs which exhibit the intermittent failure. --- tests/integration_tests/conftest.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 68d506bd..99dd8d9e 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -261,3 +261,20 @@ def pytest_assertrepr_compare(op, left, right): '"{}" not in cloud-init.log string; unexpectedly found on' " these lines:".format(left) ] + found_lines + + +def pytest_configure(config): + """Perform initial configuration, before the test runs start. + + This hook is only called if integration tests are being executed, so we can + use it to configure defaults for integration testing that differ from the + rest of the tests in the codebase. + + See + https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure + for pytest's documentation. + """ + if "log_cli_level" in config.option and not config.option.log_cli_level: + # If log_cli_level is available in this version of pytest and not set + # to anything, set it to INFO. + config.option.log_cli_level = "INFO" -- cgit v1.2.3 From aa72426b6d7d2d81b4b609ebdb21284cc7f3d650 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Jan 2021 17:02:30 -0500 Subject: integration_tests: port ca_certs tests from cloud_tests (#732) --- tests/integration_tests/modules/test_ca_certs.py | 91 ++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 tests/integration_tests/modules/test_ca_certs.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py new file mode 100644 index 00000000..89c01a9c --- /dev/null +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -0,0 +1,91 @@ +"""Integration tests for cc_ca_certs. + +(This is ported from ``tests/cloud_tests//testcases/modules/ca_certs.yaml``.) + +TODO: +* Mark this as running on Debian and Alpine (once we have marks for that) +* Implement testing for the RHEL-specific paths +""" +import os.path + +import pytest + + +USER_DATA = """\ +#cloud-config +ca-certs: + remove-defaults: true + trusted: + - | + -----BEGIN CERTIFICATE----- + MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx + DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP + d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl + bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW + E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz + MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93 + d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl + MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG + 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc + k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ + yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX + RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6 + q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2 + uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S + vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o + 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4 + Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF + z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1 + SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3 + Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT + TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5 + ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3 + DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr + mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf + PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ + 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM + slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L + ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT + Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro + RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586 + CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l + hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i + DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== + -----END CERTIFICATE----- +""" + + +@pytest.mark.ubuntu +@pytest.mark.user_data(USER_DATA) +class TestCaCerts: + def test_certs_updated(self, class_client): + """Test that /etc/ssl/certs is updated as we expect.""" + root = "/etc/ssl/certs" + filenames = class_client.execute(["ls", "-1", root]).splitlines() + unlinked_files = [] + links = {} + for filename in filenames: + full_path = os.path.join(root, filename) + symlink_target = class_client.execute(["readlink", full_path]) + is_symlink = symlink_target.ok + if is_symlink: + links[filename] = symlink_target + else: + unlinked_files.append(filename) + + assert ["ca-certificates.crt"] == unlinked_files + assert "cloud-init-ca-certs.pem" == links["a535c1f3.0"] + assert ( + "/usr/share/ca-certificates/cloud-init-ca-certs.crt" + == links["cloud-init-ca-certs.pem"] + ) + + def test_cert_installed(self, class_client): + """Test that our specified cert has been installed""" + checksum = class_client.execute( + "sha256sum /etc/ssl/certs/ca-certificates.crt" + ) + assert ( + "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062" + in checksum + ) -- cgit v1.2.3 From e5c67478136e1942a981ee18785ceb26579d0041 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 6 Jan 2021 16:19:15 -0500 Subject: integration_tests: add SSH key selection settings (#754) This introduces PUBLIC_SSH_KEY, to configure what public SSH key should be used to access systems under test, and KEYPAIR_NAME, to configure the name used in clouds for that SSH key (or the default SSH key, in PUBLIC_SSH_KEY's absence). --- tests/integration_tests/clouds.py | 9 +++++++++ tests/integration_tests/integration_settings.py | 12 ++++++++++++ 2 files changed, 21 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 9f6a6380..8ae20442 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -91,6 +91,15 @@ class IntegrationCloud(ABC): def __init__(self, settings=integration_settings): self.settings = settings self.cloud_instance = self._get_cloud_instance() + if settings.PUBLIC_SSH_KEY is not None: + # If we have a non-default key, use it. + self.cloud_instance.use_key( + settings.PUBLIC_SSH_KEY, name=settings.KEYPAIR_NAME + ) + elif settings.KEYPAIR_NAME is not None: + # Even if we're using the default key, it may still have a + # different name in the clouds, so we need to set it separately. + self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME self._released_image_id = self._get_initial_image() self.snapshot_id = None diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index d7e02f04..6cabf3d8 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -75,6 +75,18 @@ CLOUD_INIT_SOURCE = 'NONE' COLLECT_LOGS = 'ON_ERROR' LOCAL_LOG_PATH = '/tmp/cloud_init_test_logs' +################################################################## +# SSH KEY SETTINGS +################################################################## + +# A path to the public SSH key to use for test runs. (Defaults to pycloudlib's +# default behaviour, using ~/.ssh/id_rsa.pub.) +PUBLIC_SSH_KEY = None + +# For clouds which use named keypairs for SSH connection, the name that is used +# for the keypair. (Defaults to pycloudlib's default behaviour.) +KEYPAIR_NAME = None + ################################################################## # GCE SPECIFIC SETTINGS ################################################################## -- cgit v1.2.3 From 88581e540728035c74af6173824e2cc517d13391 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 Jan 2021 14:29:08 -0500 Subject: integration_tests: log cloud-init version in SUT (#758) --- tests/integration_tests/clouds.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 8ae20442..72d77058 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -166,7 +166,14 @@ class IntegrationCloud(ABC): if wait: pycloudlib_instance.wait(raise_on_cloudinit_failure=False) log.info('Launched instance: %s', pycloudlib_instance) - return self.get_instance(pycloudlib_instance, settings) + instance = self.get_instance(pycloudlib_instance, settings) + if wait: + # If we aren't waiting, we can't rely on command execution here + log.info( + 'cloud-init version: %s', + instance.execute("cloud-init --version") + ) + return instance def get_instance(self, cloud_instance, settings=integration_settings): return self.integration_instance_cls(self, cloud_instance, settings) -- cgit v1.2.3 From 2bfa461b01b14fb9fac1a3c6bee400bb4e549f81 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 12 Jan 2021 09:42:01 -0500 Subject: integration_tests: add integration test for LP: #1910835 (#761) --- tests/integration_tests/bugs/test_lp1910835.py | 66 ++++++++++++++++++++++++++ tox.ini | 1 + 2 files changed, 67 insertions(+) create mode 100644 tests/integration_tests/bugs/test_lp1910835.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py new file mode 100644 index 00000000..87f92d5e --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1910835.py @@ -0,0 +1,66 @@ +"""Integration test for LP: #1910835. + +If users do not provide an SSH key and instead ask Azure to generate a key for +them, the key material available in the IMDS may include CRLF sequences. Prior +to e56b55452549cb037da0a4165154ffa494e9678a, the Azure datasource handled keys +via a certificate, the tooling for which removed these sequences. This test +ensures that cloud-init does not regress support for this Azure behaviour. + +This test provides the SSH key configured for tests to the instance in two +ways: firstly, with CRLFs to mimic the generated keys, via the Azure API; +secondly, as user-data in unmodified form. This means that even on systems +which exhibit the bug fetching the platform's metadata, we can SSH into the SUT +to confirm this (instead of having to assert SSH failure; there are lots of +reasons SSH might fail). + +Once SSH'd in, we check that the two keys in .ssh/authorized_keys have the same +material: if the Azure datasource has removed the CRLFs correctly, then they +will match. +""" +import pytest + + +USER_DATA_TMPL = """\ +#cloud-config +ssh_authorized_keys: + - {}""" + + +@pytest.mark.sru_2021_01 +@pytest.mark.azure +def test_crlf_in_azure_metadata_ssh_keys(session_cloud, setup_image): + authorized_keys_path = "/home/{}/.ssh/authorized_keys".format( + session_cloud.cloud_instance.username + ) + # Pass in user-data to allow us to access the instance when the normal + # path fails + key_data = session_cloud.cloud_instance.key_pair.public_key_content + user_data = USER_DATA_TMPL.format(key_data) + # Throw a CRLF into the otherwise good key data, to emulate Azure's + # behaviour for generated keys + key_data = key_data[:20] + "\r\n" + key_data[20:] + vm_params = { + "os_profile": { + "linux_configuration": { + "ssh": { + "public_keys": [ + {"path": authorized_keys_path, "key_data": key_data} + ] + } + } + } + } + with session_cloud.launch( + launch_kwargs={"vm_params": vm_params, "user_data": user_data} + ) as client: + authorized_keys = ( + client.read_from_file(authorized_keys_path).strip().splitlines() + ) + # We expect one key from the cloud, one from user-data + assert 2 == len(authorized_keys) + # And those two keys should be the same, except for a possible key + # comment, which Azure strips out + assert ( + authorized_keys[0].rsplit(" ")[:2] + == authorized_keys[1].split(" ")[:2] + ) diff --git a/tox.ini b/tox.ini index 7c71f0f6..9444e66c 100644 --- a/tox.ini +++ b/tox.ini @@ -184,5 +184,6 @@ markers = user_data: the user data to be passed to the test instance instance_name: the name to be used for the test instance sru_2020_11: test is part of the 2020/11 SRU verification + sru_2021_01: test is part of the 2021/01 SRU verification ubuntu: this test should run on Ubuntu unstable: skip this test because it is flakey -- cgit v1.2.3 From 503435d1b2deb648c3cfc436f1e40b8ad86260fc Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 12 Jan 2021 11:32:27 -0600 Subject: Fix test gh-632 test to only run on NoCloud (#770) LP: #1911230 --- tests/integration_tests/bugs/test_gh632.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py index cf166356..3c1f9347 100644 --- a/tests/integration_tests/bugs/test_gh632.py +++ b/tests/integration_tests/bugs/test_gh632.py @@ -9,6 +9,9 @@ import pytest from tests.integration_tests.instances import IntegrationInstance +# With some datasource hacking, we can run this on a NoCloud instance +@pytest.mark.lxd_container +@pytest.mark.lxd_vm @pytest.mark.sru_2020_11 def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): client.write_to_file( -- cgit v1.2.3 From 37abbc43334d522cfbda595fcee2e52592b4d354 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 13 Jan 2021 10:26:32 -0500 Subject: cc_seed_random: update documentation and fix integration test (#771) The documentation did not mention that the given data may not be the exact string written: the cloud's random data may be added to it. Additionally, the documentation of the command key was incorrect. test_seed_random_data was updated to check that the given data is a prefix of the written data, to match cloud-init's expected (and, now, documented) behaviour. LP: #1911227 --- cloudinit/config/cc_seed_random.py | 12 ++++++++---- tests/integration_tests/modules/test_seed_random_data.py | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 4fb9b44e..911789c7 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -24,15 +24,19 @@ Configuration for this module is under the ``random_seed`` config key. The optionally be specified in encoded form, with the encoding specified in ``encoding``. +If the cloud provides its own random seed data, it will be appended to ``data`` +before it is written to ``file``. + .. note:: when using a multiline value for ``data`` or specifying binary data, be sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format specifiers when appropriate -Instead of specifying a data string, a command can be run to generate/collect -the data to be written. The command should be specified as a list of args in -the ``command`` key. If a command is specified that cannot be run, no error -will be reported unless ``command_required`` is set to true. +If the ``command`` key is specified, the given command will be executed. This +will happen after ``file`` has been populated. That command's environment will +contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is +specified that cannot be run, no error will be reported unless +``command_required`` is set to true. For example, to use ``pollinate`` to gather data from a remote entropy server and write it to ``/dev/urandom``, the following could be diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py index b365fa98..f6a67c19 100644 --- a/tests/integration_tests/modules/test_seed_random_data.py +++ b/tests/integration_tests/modules/test_seed_random_data.py @@ -25,4 +25,4 @@ class TestSeedRandomData: @pytest.mark.user_data(USER_DATA) def test_seed_random_data(self, client): seed_output = client.read_from_file("/root/seed") - assert seed_output.strip() == "MYUb34023nD:LFDK10913jk;dfnk:Df" + assert seed_output.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df") -- cgit v1.2.3 From 7b58982abdf58b948ee7c5f91a6c6fdfb99b59f1 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 13 Jan 2021 10:46:06 -0500 Subject: test_upgrade: add some missing commas (#769) --- tests/integration_tests/test_upgrade.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index a115bea7..660d363f 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -26,9 +26,9 @@ def _output_to_compare(instance, file_path, netcfg_path): # have broken across re-constitution of a cached datasource. Some # platforms invalidate their datasource cache on reboot, so we run # it here to ensure we get a dirty run. - 'cloud-init init' + 'cloud-init init', 'grep Trace /var/log/cloud-init.log', - 'cloud-id' + 'cloud-id', 'cat {}'.format(netcfg_path), 'systemd-analyze', 'systemd-analyze blame', -- cgit v1.2.3 From 9a258eebd96aa5ad4486dba1fe86bea5bcf00c2f Mon Sep 17 00:00:00 2001 From: Pavel Abalikhin Date: Thu, 14 Jan 2021 01:19:17 +0300 Subject: net: Fix static routes to host in eni renderer (#668) Route '-net' parameter is incompatible with /32 IPv4 addresses so we have to use '-host' in that case. --- cloudinit/net/eni.py | 2 ++ tests/integration_tests/bugs/test_gh668.py | 37 ++++++++++++++++++++++++++++++ tests/unittests/test_net.py | 7 ++++++ tools/.github-cla-signers | 1 + 4 files changed, 47 insertions(+) create mode 100644 tests/integration_tests/bugs/test_gh668.py (limited to 'tests/integration_tests') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 0074691b..a89e5ad2 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -387,6 +387,8 @@ class Renderer(renderer.Renderer): if k == 'network': if ':' in route[k]: route_line += ' -A inet6' + elif route.get('prefix') == 32: + route_line += ' -host' else: route_line += ' -net' if 'prefix' in route: diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py new file mode 100644 index 00000000..a3a0c374 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh668.py @@ -0,0 +1,37 @@ +"""Integration test for gh-668. + +Ensure that static route to host is working correctly. +The original problem is specific to the ENI renderer but that test is suitable +for all network configuration outputs. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +DESTINATION_IP = "172.16.0.10" +GATEWAY_IP = "10.0.0.100" + +NETWORK_CONFIG = """\ +version: 2 +ethernets: + eth0: + addresses: [10.0.0.10/8] + dhcp4: false + routes: + - to: {}/32 + via: {} +""".format(DESTINATION_IP, GATEWAY_IP) + +EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP) + + +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.lxd_config_dict({ + "user.network-config": NETWORK_CONFIG, +}) +def test_static_route_to_host(client: IntegrationInstance): + route = client.execute("ip route | grep {}".format(DESTINATION_IP)) + assert route.startswith(EXPECTED_ROUTE) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index bf0cdabb..38d934d4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4820,6 +4820,9 @@ class TestEniRoundTrip(CiTestCase): {'type': 'route', 'id': 6, 'metric': 1, 'destination': '10.0.200.0/16', 'gateway': '172.23.31.1'}, + {'type': 'route', 'id': 7, + 'metric': 1, 'destination': '10.0.0.100/32', + 'gateway': '172.23.31.1'}, ] files = self._render_and_read( @@ -4843,6 +4846,10 @@ class TestEniRoundTrip(CiTestCase): '172.23.31.1 metric 1 || true'), ('pre-down route del -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), + ('post-up route add -host 10.0.0.100/32 gw ' + '172.23.31.1 metric 1 || true'), + ('pre-down route del -host 10.0.0.100/32 gw ' + '172.23.31.1 metric 1 || true'), ] found = files['/etc/network/interfaces'].splitlines() diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index e6e3bdd1..ac95b422 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -32,6 +32,7 @@ slyon smoser sshedi TheRealFalcon +tnt-dev tomponline tsanghan WebSpider -- cgit v1.2.3 From c25118acc7542e93ccc56a0cc34f32c24b0e1044 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 19 Jan 2021 12:40:16 -0500 Subject: integration_tests: log image serial if available (#772) Ubuntu cloud images ship /etc/cloud/build.info which includes a line with the build serial used to identify the image: serial: 20210108 This is valuable information when verifying Ubuntu issues (to confirm that testing is happening against the expected image), but is also useful when debugging test failures: manifests of all packages in (the base) images can be found at http://cloud-images.ubuntu.com/ --- tests/integration_tests/clouds.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 72d77058..fc0a61d5 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -173,6 +173,9 @@ class IntegrationCloud(ABC): 'cloud-init version: %s', instance.execute("cloud-init --version") ) + serial = instance.execute("grep serial /etc/cloud/build.info") + if serial: + log.info('image serial: %s', serial.split()[1]) return instance def get_instance(self, cloud_instance, settings=integration_settings): -- cgit v1.2.3 From 1527efa740ce7e66d9506ea62b0b8010d71a4104 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 22 Jan 2021 09:25:38 -0600 Subject: Use more cloud defaults in integration tests (#757) Stop requiring compartment_id for OCI and project_id for GCE since they can now be inferred in pycloudlib. --- integration-requirements.txt | 2 +- tests/integration_tests/clouds.py | 6 +----- tests/integration_tests/integration_settings.py | 18 ------------------ 3 files changed, 2 insertions(+), 24 deletions(-) (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index ec765763..4682caa0 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@72e800b8e99c5b735348c4778f19f92cf6c63de0 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@39805087affaed07b266d64cf0d883be775b5c0f pytest diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index fc0a61d5..63240d17 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -216,9 +216,6 @@ class GceCloud(IntegrationCloud): def _get_cloud_instance(self): return GCE( tag='gce-integration-test', - project=self.settings.GCE_PROJECT, - region=self.settings.GCE_REGION, - zone=self.settings.GCE_ZONE, ) @@ -246,8 +243,7 @@ class OciCloud(IntegrationCloud): def _get_cloud_instance(self): return OCI( - tag='oci-integration-test', - compartment_id=self.settings.OCI_COMPARTMENT_ID + tag='oci-integration-test' ) diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 6cabf3d8..22b4fdda 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -87,24 +87,6 @@ PUBLIC_SSH_KEY = None # for the keypair. (Defaults to pycloudlib's default behaviour.) KEYPAIR_NAME = None -################################################################## -# GCE SPECIFIC SETTINGS -################################################################## -# Required for GCE -GCE_PROJECT = None - -# You probably want to override these -GCE_REGION = 'us-central1' -GCE_ZONE = 'a' - -################################################################## -# OCI SPECIFIC SETTINGS -################################################################## -# Compartment-id found at -# https://console.us-phoenix-1.oraclecloud.com/a/identity/compartments -# Required for Oracle -OCI_COMPARTMENT_ID = None - ################################################################## # USER SETTINGS OVERRIDES ################################################################## -- cgit v1.2.3 From a9c904dc6438c908cd5341312311dfbbb18c81d2 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 26 Jan 2021 11:25:04 -0600 Subject: Remove 'remove-raise-on-failure' calls from integration_tests (#788) pycloudlib no longer raises exceptions when cloud-init fails to start, and the API has been updated accordingly. Changes have been made to integration tests accordingly --- integration-requirements.txt | 2 +- tests/integration_tests/bugs/test_gh570.py | 3 ++- tests/integration_tests/bugs/test_lp1900837.py | 3 ++- tests/integration_tests/clouds.py | 14 ++++---------- tests/integration_tests/instances.py | 13 ++++--------- tests/integration_tests/modules/test_power_state_change.py | 4 ++-- tests/integration_tests/test_upgrade.py | 7 +++---- 7 files changed, 18 insertions(+), 28 deletions(-) (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index 4682caa0..c959001e 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@39805087affaed07b266d64cf0d883be775b5c0f +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@878981e3c7caaf583a8c7c5494dba9d9447acee8 pytest diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py index b8866edd..534cfb9a 100644 --- a/tests/integration_tests/bugs/test_gh570.py +++ b/tests/integration_tests/bugs/test_gh570.py @@ -34,5 +34,6 @@ def test_nocloud_seedfrom_vendordata(client: IntegrationInstance): VENDOR_DATA, ) client.execute('cloud-init clean --logs') - client.restart(raise_on_cloudinit_failure=True) + client.restart() + assert client.execute('cloud-init status').ok assert 'seeded_vendordata_test_file' in client.execute('ls /var/tmp') diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py index 395cace0..fcc2b751 100644 --- a/tests/integration_tests/bugs/test_lp1900837.py +++ b/tests/integration_tests/bugs/test_lp1900837.py @@ -22,7 +22,8 @@ class TestLogPermissionsNotResetOnReboot: assert "600" == _get_log_perms(client) # Reboot - client.restart(raise_on_cloudinit_failure=True) + client.restart() + assert client.execute('cloud-init status').ok # Check that permissions are not reset on reboot assert "600" == _get_log_perms(client) diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 63240d17..9eebb10a 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -135,7 +135,7 @@ class IntegrationCloud(ABC): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) return pycloudlib_instance - def launch(self, user_data=None, launch_kwargs=None, wait=True, + def launch(self, user_data=None, launch_kwargs=None, settings=integration_settings): if launch_kwargs is None: launch_kwargs = {} @@ -147,13 +147,9 @@ class IntegrationCloud(ABC): self.settings.EXISTING_INSTANCE_ID ) return - if 'wait' in launch_kwargs: - raise Exception("Specify 'wait' directly to launch, " - "not in 'launch_kwargs'") kwargs = { 'image_id': self.image_id, 'user_data': user_data, - 'wait': False, } kwargs.update(launch_kwargs) log.info( @@ -163,11 +159,9 @@ class IntegrationCloud(ABC): ) pycloudlib_instance = self._perform_launch(kwargs) - if wait: - pycloudlib_instance.wait(raise_on_cloudinit_failure=False) log.info('Launched instance: %s', pycloudlib_instance) instance = self.get_instance(pycloudlib_instance, settings) - if wait: + if kwargs.get('wait', True): # If we aren't waiting, we can't rely on command execution here log.info( 'cloud-init version: %s', @@ -277,7 +271,7 @@ class _LxdIntegrationCloud(IntegrationCloud): def _perform_launch(self, launch_kwargs): launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None) - launch_kwargs.pop('wait') + wait = launch_kwargs.pop('wait', True) release = launch_kwargs.pop('image_id') try: @@ -293,7 +287,7 @@ class _LxdIntegrationCloud(IntegrationCloud): ) if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE': self._mount_source(pycloudlib_instance) - pycloudlib_instance.start(wait=False) + pycloudlib_instance.start(wait=wait) return pycloudlib_instance diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 4321ce07..0d1e1aef 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -56,18 +56,13 @@ class IntegrationInstance: def destroy(self): self.instance.delete() - def restart(self, raise_on_cloudinit_failure=False): + def restart(self): """Restart this instance (via cloud mechanism) and wait for boot. - This wraps pycloudlib's `BaseInstance.restart` to pass - `raise_on_cloudinit_failure=False` to `BaseInstance.wait`, mirroring - our launch behaviour. + This wraps pycloudlib's `BaseInstance.restart` """ - self.instance.restart(wait=False) - log.info("Instance restarted; waiting for boot") - self.instance.wait( - raise_on_cloudinit_failure=raise_on_cloudinit_failure - ) + log.info("Restarting instance and waiting for boot") + self.instance.restart() def execute(self, command, *, use_sudo=True) -> Result: if self.instance.username == 'root' and use_sudo is False: diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py index 844dccfa..32dfc86d 100644 --- a/tests/integration_tests/modules/test_power_state_change.py +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -27,13 +27,13 @@ def _detect_reboot(instance: IntegrationInstance): # detecting the first boot or second boot, so we also check # the logs to ensure we've booted twice. If the logs show we've # only booted once, wait until we've booted twice - instance.instance.wait(raise_on_cloudinit_failure=False) + instance.instance.wait() for _ in range(600): try: log = instance.read_from_file('/var/log/cloud-init.log') boot_count = log.count("running 'init-local'") if boot_count == 1: - instance.instance.wait(raise_on_cloudinit_failure=False) + instance.instance.wait() elif boot_count > 1: break except Exception: diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 660d363f..233a574b 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -43,15 +43,14 @@ def _output_to_compare(instance, file_path, netcfg_path): def _restart(instance): # work around pad.lv/1908287 - try: - instance.restart(raise_on_cloudinit_failure=True) - except OSError as e: + instance.restart() + if not instance.execute('cloud-init status --wait --long').ok: for _ in range(10): time.sleep(5) result = instance.execute('cloud-init status --wait --long') if result.ok: return - raise e + raise Exception("Cloud-init didn't finish starting up") @pytest.mark.sru_2020_11 -- cgit v1.2.3 From e7e7b42900d83d540d69adf37962864b53dab602 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 26 Jan 2021 15:51:35 -0600 Subject: Revert integration test associated with reverted #586 (#784) --- tests/integration_tests/assets/__init__.py | 16 ---------- tests/integration_tests/assets/test_id_rsa | 38 ----------------------- tests/integration_tests/assets/test_id_rsa.pub | 1 - tests/integration_tests/bugs/test_gh586.py | 42 -------------------------- 4 files changed, 97 deletions(-) delete mode 100644 tests/integration_tests/assets/__init__.py delete mode 100755 tests/integration_tests/assets/test_id_rsa delete mode 100644 tests/integration_tests/assets/test_id_rsa.pub delete mode 100644 tests/integration_tests/bugs/test_gh586.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/assets/__init__.py b/tests/integration_tests/assets/__init__.py deleted file mode 100644 index 0cf27982..00000000 --- a/tests/integration_tests/assets/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -import os -from collections import namedtuple -from pathlib import Path - -ASSET_DIR = Path(os.path.dirname(os.path.realpath(__file__))) -PRIVATE_RSA_KEY_PATH = ASSET_DIR / 'test_id_rsa' -PUBLIC_RSA_KEY_PATH = ASSET_DIR / 'test_id_rsa.pub' - - -def get_test_rsa_keypair(): - with PUBLIC_RSA_KEY_PATH.open() as public_file: - public_key = public_file.read() - with PRIVATE_RSA_KEY_PATH.open() as private_file: - private_key = private_file.read() - KeyPair = namedtuple('KeyPair', 'public_rsa_key private_rsa_key') - return KeyPair(public_key, private_key) diff --git a/tests/integration_tests/assets/test_id_rsa b/tests/integration_tests/assets/test_id_rsa deleted file mode 100755 index 1ef67135..00000000 --- a/tests/integration_tests/assets/test_id_rsa +++ /dev/null @@ -1,38 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn -NhAAAAAwEAAQAAAYEAnY0A/9s891CKmS3CBVGgLjdEifgYf8y3Ht+TC3+ajvEIcEoxevgK -t2mXoPw4P/5QrT4QmgG5N2Jr4Q5eNcgZ0zytoAlNxcM/R/soKi46hiLSArMNEiOlVi/vbf -q3Du5XIiX8fZD+4xgq3+jQ3PMxzqBf3AU6RAC9Jj94pBHCbaKPSFfWU7fq/JIvukKIWckv -UCkzNiaAWop3jNSEwFZbqKe2A1yesfgSpCmycggOVN/tMpFeVn8rzzG+LJ47TSoBav1P0F -CegVKq5iyGhLdM4TnS3ajbSIf3+SSDMImyzzmXqPyG5zwcH8zuEI3aR9DTMmi49HnjVX0A -N+61iQf8MEz9nnpUCnaeogiI4zfJQIMKHijGcFYR092BqJUmy50s239nFFBzCFRcmERdyh -fxrrG5kpEPllPGO4AuCtR4aW9yVTqfDNO2dfX5xvF8ZakUCMdR7JomuzW097U8zHE6EOTI -XVXatlI1cHt+KeugVrjzK2YNE1HEeGCt4l7qmpAXAAAFgL+LUj+/i1I/AAAAB3NzaC1yc2 -EAAAGBAJ2NAP/bPPdQipktwgVRoC43RIn4GH/Mtx7fkwt/mo7xCHBKMXr4Crdpl6D8OD/+ -UK0+EJoBuTdia+EOXjXIGdM8raAJTcXDP0f7KCouOoYi0gKzDRIjpVYv7236tw7uVyIl/H -2Q/uMYKt/o0NzzMc6gX9wFOkQAvSY/eKQRwm2ij0hX1lO36vySL7pCiFnJL1ApMzYmgFqK -d4zUhMBWW6intgNcnrH4EqQpsnIIDlTf7TKRXlZ/K88xviyeO00qAWr9T9BQnoFSquYsho -S3TOE50t2o20iH9/kkgzCJss85l6j8huc8HB/M7hCN2kfQ0zJouPR541V9ADfutYkH/DBM -/Z56VAp2nqIIiOM3yUCDCh4oxnBWEdPdgaiVJsudLNt/ZxRQcwhUXJhEXcoX8a6xuZKRD5 -ZTxjuALgrUeGlvclU6nwzTtnX1+cbxfGWpFAjHUeyaJrs1tPe1PMxxOhDkyF1V2rZSNXB7 -finroFa48ytmDRNRxHhgreJe6pqQFwAAAAMBAAEAAAGALgAgfZPGnjMu9ICOuLzXdwb+BQ -aiKJZeFS6UIXRVbUzk+NxAzDWl811qPz/FMLIRXjPT5xN/v7MF6oUmbq+JEssRqrtssMRM -MrkbRg2PWuDJzq32sAgmWx7N2p+sWTivyjGrIgJ22VmSEyRH72s2bK0YsAX6uCY7E/LOR6 -FD0nz3Ntkmo/T8MFiChPCuHQEHxnDxGett6IGrXDwksn/EbV7iXuLpFu9mifX+uxqtDI0B -FZWqJLkm0m0kqKReji4oHIUJXw6yQrVYTy71WZ/VXzJ+B7w/WvFGJjAvZQSrwzElRfH8Th -3knN6csSvC2sGCx5Dqi7fxpL4nZsYdGyB8siIxbvye4DUO8RXMjfJP9R+HMsBd4tSxjzLE -lVNBrClJt2yt1JbuAwzsZpbk5jvJ2PqupqQX+WLrJ+i66nByUDCxtXuHG+e3FnATdcLtSv -QvpuzSN3pfurK/C/J3kf6BTF9avK+bgw+MhbD3Ct4Lqi9s3VSgBQQCp7Hr67JEv4jZAAAA -wGTnfuIb3F3p/NhEudAucWkXEbV9zsnIAtSoguSh7fPC21MGyfx6chsf4mXKHUNfuw8m5W -2+tYa18af7Tp2nHWHPmWbY1z+g/DFCMCPaEbi1zffMlcTw7OZ8eV6D4isD30ul+eVguSYE -Hw+ILgWQXVen0oFtM1FOHiIPtB8CWyaX6njyLRQU9dt3qcWBm/tocy4hTSfheeU21pupEw -6/kfvqkq8J5MYTWF9yE1dNGEuoV2XpidX4ell/oH0qEPy4vQAAAMEAzZlbUVJ8OimOLivJ -J0KYJD/QDPMk1bDsHi3MhBnWYEJ4t78L63aG2CAIy9DazueJH7S7h1N0Wbx5v0gSGA1gb2 -8YaEeUJDrrV7I1IbG9Gu4/Swfm4YAE+1mwK54CE5Nkefd+nz7jxLQSRUNO+AB3ldRpgqXV -CGT+rkhF3wY9D4dpzRl9KQiVKIQLfqyUlO5PHWEGKh3gPRbHUv8GWSA463L3aSl26cnAmv -CE++Ssusa9cIczTO7H+BsFc7MpWJKrAAAAwQDELFCX4ES/0HcVlTH3IjoRtIvPMb6lq545 -hPXwOb9ZpBmrgd5byZ0cEkjgEklNUZ9A9QIxrPNlVsB+z8QU+sV1qsExrPA9Fo8t9vFApi -u6WFR5QbIl9A2fh+2wZR2i0ftsndGdy9zA4LBiBj4EzQYYG7PEzSHIvyGGc0iaG4YCP+D0 -ljbVv47DruKa3RYv8Feuevo2hhRBRLnwah8cqXyehprqwA96WOoRXZVPgz7LQHKKAVYKWY -THmWZfry/xGEUAAAAKamFtZXNAbmV3dAE= ------END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/test_id_rsa.pub b/tests/integration_tests/assets/test_id_rsa.pub deleted file mode 100644 index c14c3ea5..00000000 --- a/tests/integration_tests/assets/test_id_rsa.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCdjQD/2zz3UIqZLcIFUaAuN0SJ+Bh/zLce35MLf5qO8QhwSjF6+Aq3aZeg/Dg//lCtPhCaAbk3YmvhDl41yBnTPK2gCU3Fwz9H+ygqLjqGItICsw0SI6VWL+9t+rcO7lciJfx9kP7jGCrf6NDc8zHOoF/cBTpEAL0mP3ikEcJtoo9IV9ZTt+r8ki+6QohZyS9QKTM2JoBaineM1ITAVluop7YDXJ6x+BKkKbJyCA5U3+0ykV5WfyvPMb4snjtNKgFq/U/QUJ6BUqrmLIaEt0zhOdLdqNtIh/f5JIMwibLPOZeo/IbnPBwfzO4QjdpH0NMyaLj0eeNVfQA37rWJB/wwTP2eelQKdp6iCIjjN8lAgwoeKMZwVhHT3YGolSbLnSzbf2cUUHMIVFyYRF3KF/GusbmSkQ+WU8Y7gC4K1Hhpb3JVOp8M07Z19fnG8XxlqRQIx1Hsmia7NbT3tTzMcToQ5MhdVdq2UjVwe34p66BWuPMrZg0TUcR4YK3iXuqakBc= test@example diff --git a/tests/integration_tests/bugs/test_gh586.py b/tests/integration_tests/bugs/test_gh586.py deleted file mode 100644 index 44b643f1..00000000 --- a/tests/integration_tests/bugs/test_gh586.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Integration test for pull #586 - -If a non-default AuthorizedKeysFile is specified in /etc/ssh/sshd_config, -ensure we can still ssh as expected. -""" -import paramiko -import pytest -from io import StringIO -from tests.integration_tests.assets import get_test_rsa_keypair - - -public_rsa_key, private_rsa_key = get_test_rsa_keypair() -USER_DATA = """\ -#cloud-config -bootcmd: - - sed -i 's/#AuthorizedKeysFile.*/AuthorizedKeysFile\\ .ssh\\/authorized_keys2/' /etc/ssh/sshd_config -ssh_authorized_keys: - - {public_key} -""".format(public_key=public_rsa_key) # noqa: E501 - - -@pytest.mark.sru_2020_11 -@pytest.mark.user_data(USER_DATA) -def test_non_default_authorized_keys(client): - sshd = client.read_from_file('/etc/ssh/sshd_config') - assert 'AuthorizedKeysFile .ssh/authorized_keys2' in sshd - assert sshd.count('AuthorizedKeysFile') == 1 - - ssh_dir = client.execute('ls /home/ubuntu/.ssh').stdout - assert 'authorized_keys2' in ssh_dir - - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - paramiko_key = paramiko.RSAKey.from_private_key(StringIO(private_rsa_key)) - - # Will fail with paramiko.ssh_exception.AuthenticationException - # if this bug isn't fixed - ssh.connect( - client.instance.ip, - username=client.instance.username, - pkey=paramiko_key, - ) -- cgit v1.2.3 From 3a0a5894d112d667f313d7fb3ab0850a39bc3020 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 5 Feb 2021 17:11:14 -0500 Subject: test_gh668: fix failure on LXD VMs (#801) In LXD containers, the default interface is named eth0. In VMs, it isn't; it's renamed by systemd (likely to enp5s0, but we can't rely on that). This means that, on VMs, the network configuration we specify for "eth0" doesn't match an interface in the system and so is not applied. This modifies the test to set a MAC address in a match clause in the network configuration and on the eth0 interface (which is the LXD name in both containers and VMs pre-rename): this ensures that the specified configuration applies in both cases. --- tests/integration_tests/bugs/test_gh668.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py index a3a0c374..66ee302e 100644 --- a/tests/integration_tests/bugs/test_gh668.py +++ b/tests/integration_tests/bugs/test_gh668.py @@ -12,6 +12,7 @@ from tests.integration_tests.instances import IntegrationInstance DESTINATION_IP = "172.16.0.10" GATEWAY_IP = "10.0.0.100" +MAC_ADDRESS = "de:ad:be:ef:12:34" NETWORK_CONFIG = """\ version: 2 @@ -22,7 +23,9 @@ ethernets: routes: - to: {}/32 via: {} -""".format(DESTINATION_IP, GATEWAY_IP) + match: + macaddress: {} +""".format(DESTINATION_IP, GATEWAY_IP, MAC_ADDRESS) EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP) @@ -31,6 +34,7 @@ EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP) @pytest.mark.lxd_vm @pytest.mark.lxd_config_dict({ "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, }) def test_static_route_to_host(client: IntegrationInstance): route = client.execute("ip route | grep {}".format(DESTINATION_IP)) -- cgit v1.2.3 From 09193e5141ca45b822617399047204abd701047e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 9 Feb 2021 12:03:23 -0500 Subject: Remove wait argument from tests with session_cloud calls (#805) --- tests/integration_tests/modules/test_power_state_change.py | 2 +- tests/integration_tests/test_upgrade.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py index 32dfc86d..eebe6608 100644 --- a/tests/integration_tests/modules/test_power_state_change.py +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -65,7 +65,7 @@ class TestPowerChange: with session_cloud.launch( user_data=USER_DATA.format( delay=delay, mode=mode, timeout=timeout, condition='true'), - wait=False + launch_kwargs={'wait': False}, ) as instance: if mode == 'reboot': _detect_reboot(instance) diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 233a574b..c20cb3c1 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -87,7 +87,7 @@ def test_upgrade(session_cloud: IntegrationCloud): netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg' with session_cloud.launch( - launch_kwargs=launch_kwargs, user_data=USER_DATA, wait=True, + launch_kwargs=launch_kwargs, user_data=USER_DATA, ) as instance: _output_to_compare(instance, before_path, netcfg_path) instance.install_new_cloud_init(source, take_snapshot=False) -- cgit v1.2.3 From 84e56f84d24e8f18b73c559e219b70527b9347af Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 16 Feb 2021 13:15:16 -0500 Subject: Fix attempting to decode binary data in test_seed_random_data test (#806) `test_seed_random_data.py` was failing on openstack as openstack provides additional binary seed data to the end of the specified file. The test has been changed to only read the ascii porition of seed file. --- tests/integration_tests/modules/test_seed_random_data.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py index f6a67c19..94e982e0 100644 --- a/tests/integration_tests/modules/test_seed_random_data.py +++ b/tests/integration_tests/modules/test_seed_random_data.py @@ -24,5 +24,7 @@ class TestSeedRandomData: @pytest.mark.user_data(USER_DATA) def test_seed_random_data(self, client): - seed_output = client.read_from_file("/root/seed") - assert seed_output.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df") + # Only read the first 31 characters, because the rest could be + # binary data + result = client.execute("head -c 31 < /root/seed") + assert result.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df") -- cgit v1.2.3 From 85e88af06aee0315c48664d968cef5a4b40e7846 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 16 Feb 2021 17:24:46 -0500 Subject: integration_tests: fix test_gh626 on LXD VMs (#809) Without a MAC address match clause, the test network configuration is not applied to the primary interface in LXD VMs (which is named enp*s* rather than eth0). --- tests/integration_tests/bugs/test_gh626.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py index 2d336462..7b0df6eb 100644 --- a/tests/integration_tests/bugs/test_gh626.py +++ b/tests/integration_tests/bugs/test_gh626.py @@ -11,13 +11,16 @@ from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance +MAC_ADDRESS = "de:ad:be:ef:12:34" NETWORK_CONFIG = """\ version: 2 ethernets: eth0: dhcp4: true wakeonlan: true -""" + match: + macaddress: {} +""".format(MAC_ADDRESS) EXPECTED_ENI_END = """\ iface eth0 inet dhcp @@ -28,7 +31,8 @@ iface eth0 inet dhcp @pytest.mark.lxd_container @pytest.mark.lxd_vm @pytest.mark.lxd_config_dict({ - 'user.network-config': NETWORK_CONFIG + 'user.network-config': NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, }) def test_wakeonlan(client: IntegrationInstance): if ImageSpecification.from_os_image().release == 'xenial': -- cgit v1.2.3 From 7f1cefe90ba4253c9d67b75a4f82c9f30d322c48 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 16 Feb 2021 21:37:39 -0500 Subject: Port apt cloud_tests to integration tests (#808) --- tests/integration_tests/modules/test_apt.py | 291 +++++++++++++++++++++ .../modules/test_apt_configure_sources_list.py | 52 ---- 2 files changed, 291 insertions(+), 52 deletions(-) create mode 100644 tests/integration_tests/modules/test_apt.py delete mode 100644 tests/integration_tests/modules/test_apt_configure_sources_list.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py new file mode 100644 index 00000000..5e3d474c --- /dev/null +++ b/tests/integration_tests/modules/test_apt.py @@ -0,0 +1,291 @@ +"""Series of integration tests covering apt functionality.""" +import re +from tests.integration_tests.clouds import ImageSpecification + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +USER_DATA = """\ +#cloud-config +apt: + conf: | + APT { + Get { + Assume-Yes "true"; + Fix-Broken "true"; + } + } + proxy: "http://proxy.internal:3128" + http_proxy: "http://squid.internal:3128" + ftp_proxy: "ftp://squid.internal:3128" + https_proxy: "https://squid.internal:3128" + primary: + - arches: [default] + uri: http://badarchive.ubuntu.com/ubuntu + security: + - arches: [default] + uri: http://badsecurity.ubuntu.com/ubuntu + sources_list: | + deb $MIRROR $RELEASE main restricted + deb-src $MIRROR $RELEASE main restricted + deb $PRIMARY $RELEASE universe restricted + deb-src $PRIMARY $RELEASE universe restricted + deb $SECURITY $RELEASE-security multiverse + deb-src $SECURITY $RELEASE-security multiverse + sources: + test_keyserver: + keyid: 72600DB15B8E4C8B1964B868038ACC97C660A937 + keyserver: keyserver.ubuntu.com + source: "deb http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu $RELEASE main" + test_ppa: + keyid: 441614D8 + keyserver: keyserver.ubuntu.com + source: "ppa:simplestreams-dev/trunk" + test_key: + source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main" + key: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: SKS 1.1.6 + Comment: Hostname: keyserver.ubuntu.com + + mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI + lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf + RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq + M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot + +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX + b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6 + N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6 + V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj + xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l + WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg + UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG + CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN + o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH + vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2 + yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj + C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL + arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq + uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH + zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ + ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e + cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf + pb0uBy+g0oxJQg15 + =uy53 + -----END PGP PUBLIC KEY BLOCK----- +apt_pipelining: os +""" # noqa: E501 + +EXPECTED_REGEXES = [ + r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted", + r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted", + r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted", + r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted", + r"deb http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse", + r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse", +] + +TEST_KEYSERVER_KEY = """\ +pub rsa1024 2013-12-09 [SC] + 7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937 +uid [ unknown] Launchpad PPA for Ryan Harper +""" + +TEST_PPA_KEY = """\ +/etc/apt/trusted.gpg.d/simplestreams-dev_ubuntu_trunk.gpg +--------------------------------------------------------- +pub rsa4096 2016-05-04 [SC] + 3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8 +uid [ unknown] Launchpad PPA for simplestreams-dev +""" + +TEST_KEY = """\ +pub rsa4096 2016-03-04 [SC] + 1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF +uid [ unknown] Launchpad PPA for cloud init development team +""" + + +@pytest.mark.ci +@pytest.mark.ubuntu +@pytest.mark.user_data(USER_DATA) +class TestApt: + def test_sources_list(self, class_client: IntegrationInstance): + """Integration test for the apt module's `sources_list` functionality. + + This test specifies a ``sources_list`` and then checks that (a) the + expected number of sources.list entries is present, and (b) that each + expected line appears in the file. + + (This is ported from + `tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml`.) + """ + sources_list = class_client.read_from_file('/etc/apt/sources.list') + assert 6 == len(sources_list.rstrip().split('\n')) + + for expected_re in EXPECTED_REGEXES: + assert re.search(expected_re, sources_list) is not None + + def test_apt_conf(self, class_client: IntegrationInstance): + """Test the apt conf functionality. + + Ported from tests/cloud_tests/testcases/modules/apt_configure_conf.py + """ + apt_config = class_client.read_from_file( + '/etc/apt/apt.conf.d/94cloud-init-config' + ) + assert 'Assume-Yes "true";' in apt_config + assert 'Fix-Broken "true";' in apt_config + + def test_apt_proxy(self, class_client: IntegrationInstance): + """Test the apt proxy functionality. + + Ported from tests/cloud_tests/testcases/modules/apt_configure_proxy.py + """ + out = class_client.read_from_file( + '/etc/apt/apt.conf.d/90cloud-init-aptproxy') + assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out + assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out + assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out + assert 'Acquire::https::Proxy "https://squid.internal:3128";' in out + + def test_ppa_source(self, class_client: IntegrationInstance): + """Test the apt ppa functionality. + + Ported from + tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py + """ + release = ImageSpecification.from_os_image().release + ppa_path_contents = class_client.read_from_file( + '/etc/apt/sources.list.d/' + 'simplestreams-dev-ubuntu-trunk-{}.list'.format(release) + ) + + assert ( + 'http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu' + ) in ppa_path_contents + + keys = class_client.execute('apt-key finger') + assert TEST_PPA_KEY in keys + + def test_key(self, class_client: IntegrationInstance): + """Test the apt key functionality. + + Ported from + tests/cloud_tests/testcases/modules/apt_configure_sources_key.py + """ + test_archive_contents = class_client.read_from_file( + '/etc/apt/sources.list.d/test_key.list' + ) + + assert ( + 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu' + ) in test_archive_contents + + keys = class_client.execute('apt-key finger') + assert TEST_KEY in keys + + def test_keyserver(self, class_client: IntegrationInstance): + """Test the apt keyserver functionality. + + Ported from + tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py + """ + test_keyserver_contents = class_client.read_from_file( + '/etc/apt/sources.list.d/test_keyserver.list' + ) + + assert ( + 'http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu' + ) in test_keyserver_contents + + keys = class_client.execute('apt-key finger') + assert TEST_KEYSERVER_KEY in keys + + def test_os_pipelining(self, class_client: IntegrationInstance): + """Test 'os' settings does not write apt config file. + + Ported from tests/cloud_tests/testcases/modules/apt_pipelining_os.py + """ + conf_exists = class_client.execute( + 'test -f /etc/apt/apt.conf.d/90cloud-init-pipelining' + ).ok + assert conf_exists is False + + +DEFAULT_DATA = """\ +#cloud-config +apt: + primary: + - arches: + - default + security: + - arches: + - default +""" + + +@pytest.mark.ubuntu +@pytest.mark.user_data(DEFAULT_DATA) +class TestDefaults: + def test_primary(self, class_client: IntegrationInstance): + """Test apt default primary sources. + + Ported from + tests/cloud_tests/testcases/modules/apt_configure_primary.py + """ + sources_list = class_client.read_from_file('/etc/apt/sources.list') + assert 'deb http://archive.ubuntu.com/ubuntu' in sources_list + + def test_security(self, class_client: IntegrationInstance): + """Test apt default security sources. + + Ported from + tests/cloud_tests/testcases/modules/apt_configure_security.py + """ + sources_list = class_client.read_from_file('/etc/apt/sources.list') + + # 3 lines from main, universe, and multiverse + assert 3 == sources_list.count('deb http://security.ubuntu.com/ubuntu') + assert 3 == sources_list.count( + '# deb-src http://security.ubuntu.com/ubuntu' + ) + + +DISABLED_DATA = """\ +#cloud-config +apt: + disable_suites: + - $RELEASE + - $RELEASE-updates + - $RELEASE-backports + - $RELEASE-security +apt_pipelining: false +""" + + +@pytest.mark.ubuntu +@pytest.mark.user_data(DISABLED_DATA) +class TestDisabled: + def test_disable_suites(self, class_client: IntegrationInstance): + """Test disabling of apt suites. + + Ported from + tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py + """ + sources_list = class_client.execute( + "cat /etc/apt/sources.list | grep -v '^#'" + ).strip() + assert '' == sources_list + + def test_disable_apt_pipelining(self, class_client: IntegrationInstance): + """Test disabling of apt pipelining. + + Ported from + tests/cloud_tests/testcases/modules/apt_pipelining_disable.py + """ + conf = class_client.read_from_file( + '/etc/apt/apt.conf.d/90cloud-init-pipelining' + ) + assert 'Acquire::http::Pipeline-Depth "0";' in conf diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py deleted file mode 100644 index 28cbe19f..00000000 --- a/tests/integration_tests/modules/test_apt_configure_sources_list.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Integration test for the apt module's ``sources_list`` functionality. - -This test specifies a ``sources_list`` and then checks that (a) the expected -number of sources.list entries is present, and (b) that each expected line -appears in the file. - -(This is ported from -``tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml``.)""" -import re - -import pytest - - -USER_DATA = """\ -#cloud-config -apt: - primary: - - arches: [default] - uri: http://archive.ubuntu.com/ubuntu - security: - - arches: [default] - uri: http://security.ubuntu.com/ubuntu - sources_list: | - deb $MIRROR $RELEASE main restricted - deb-src $MIRROR $RELEASE main restricted - deb $PRIMARY $RELEASE universe restricted - deb-src $PRIMARY $RELEASE universe restricted - deb $SECURITY $RELEASE-security multiverse - deb-src $SECURITY $RELEASE-security multiverse -""" - -EXPECTED_REGEXES = [ - r"deb http://archive.ubuntu.com/ubuntu [a-z].* main restricted", - r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* main restricted", - r"deb http://archive.ubuntu.com/ubuntu [a-z].* universe restricted", - r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* universe restricted", - r"deb http://security.ubuntu.com/ubuntu [a-z].*security multiverse", - r"deb-src http://security.ubuntu.com/ubuntu [a-z].*security multiverse", -] - - -@pytest.mark.ci -@pytest.mark.ubuntu -class TestAptConfigureSourcesList: - - @pytest.mark.user_data(USER_DATA) - def test_sources_list(self, client): - sources_list = client.read_from_file("/etc/apt/sources.list") - assert 6 == len(sources_list.rstrip().split('\n')) - - for expected_re in EXPECTED_REGEXES: - assert re.search(expected_re, sources_list) is not None -- cgit v1.2.3 From 5a9008e53d7cf987b5cfb78964d2bd987d180993 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 18 Feb 2021 11:17:26 -0500 Subject: integration_tests: use unique MAC addresses for tests (#813) Using the same MAC address results in strange test behaviour if more than one such instance is up: traffic gets routed to an arbitrary interface with the given MAC address. This can happen if running tests in parallel, or on a system which retains test instances from previous runs. The introduction of tests/integration_tests/__init__.py means that pylint now checks the integration tests: this commit also addresses those failures. --- tests/integration_tests/__init__.py | 12 ++++++++++++ tests/integration_tests/bugs/test_gh626.py | 3 ++- tests/integration_tests/bugs/test_gh668.py | 3 ++- tests/integration_tests/bugs/test_lp1898997.py | 3 ++- tests/integration_tests/clouds.py | 12 +++++++----- tests/integration_tests/integration_settings.py | 1 + 6 files changed, 26 insertions(+), 8 deletions(-) create mode 100644 tests/integration_tests/__init__.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py new file mode 100644 index 00000000..e1d4cd28 --- /dev/null +++ b/tests/integration_tests/__init__.py @@ -0,0 +1,12 @@ +import random + + +def random_mac_address() -> str: + """Generate a random MAC address. + + The MAC address will have a 1 in its least significant bit, indicating it + to be a locally administered address. + """ + return "02:00:00:%02x:%02x:%02x" % (random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255)) diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py index 7b0df6eb..dba01b34 100644 --- a/tests/integration_tests/bugs/test_gh626.py +++ b/tests/integration_tests/bugs/test_gh626.py @@ -7,11 +7,12 @@ in the /etc/network/interfaces or netplan config. import pytest import yaml +from tests.integration_tests import random_mac_address from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance -MAC_ADDRESS = "de:ad:be:ef:12:34" +MAC_ADDRESS = random_mac_address() NETWORK_CONFIG = """\ version: 2 ethernets: diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py index 66ee302e..72fe0afc 100644 --- a/tests/integration_tests/bugs/test_gh668.py +++ b/tests/integration_tests/bugs/test_gh668.py @@ -7,12 +7,13 @@ for all network configuration outputs. import pytest +from tests.integration_tests import random_mac_address from tests.integration_tests.instances import IntegrationInstance DESTINATION_IP = "172.16.0.10" GATEWAY_IP = "10.0.0.100" -MAC_ADDRESS = "de:ad:be:ef:12:34" +MAC_ADDRESS = random_mac_address() NETWORK_CONFIG = """\ version: 2 diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index 54c88d82..90dc17da 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -10,8 +10,9 @@ network configuration, and confirms that the bridge can be used to ping the default gateway. """ import pytest +from tests.integration_tests import random_mac_address -MAC_ADDRESS = "de:ad:be:ef:12:34" +MAC_ADDRESS = random_mac_address() NETWORK_CONFIG = """\ diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 9eebb10a..9527a413 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -153,9 +153,8 @@ class IntegrationCloud(ABC): } kwargs.update(launch_kwargs) log.info( - "Launching instance with launch_kwargs:\n{}".format( - "\n".join("{}={}".format(*item) for item in kwargs.items()) - ) + "Launching instance with launch_kwargs:\n%s", + "\n".join("{}={}".format(*item) for item in kwargs.items()) ) pycloudlib_instance = self._perform_launch(kwargs) @@ -245,6 +244,7 @@ class _LxdIntegrationCloud(IntegrationCloud): integration_instance_cls = IntegrationLxdInstance def _get_cloud_instance(self): + # pylint: disable=no-member return self.pycloudlib_instance_cls(tag=self.instance_tag) @staticmethod @@ -260,8 +260,10 @@ class _LxdIntegrationCloud(IntegrationCloud): 'container_path': target_path, } log.info( - 'Mounting source {source_path} directly onto LXD container/vm ' - 'named {name} at {container_path}'.format(**format_variables)) + 'Mounting source %(source_path)s directly onto LXD container/vm ' + 'named %(name)s at %(container_path)s', + format_variables + ) command = ( 'lxc config device add {name} host-cloud-init disk ' 'source={source_path} ' diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 22b4fdda..54d09d9b 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -92,6 +92,7 @@ KEYPAIR_NAME = None ################################################################## # Bring in any user-file defined settings try: + # pylint: disable=wildcard-import,unused-wildcard-import from tests.integration_tests.user_settings import * # noqa except ImportError: pass -- cgit v1.2.3 From 28d2d4b86ebde5261fbe3a3ffb28292b2bbdbd76 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 18 Feb 2021 15:36:31 -0500 Subject: integration_tests: add UPGRADE CloudInitSource (#812) This allows out-of-date images to be brought up-to-date with the archive, so that tests written against the latest cloud-init release will pass. --- tests/integration_tests/conftest.py | 2 ++ tests/integration_tests/instances.py | 8 ++++++++ tests/integration_tests/integration_settings.py | 2 ++ 3 files changed, 12 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 99dd8d9e..3933d647 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -124,6 +124,8 @@ def get_validated_source( return CloudInitSource.PPA elif os.path.isfile(str(source)): return CloudInitSource.DEB_PACKAGE + elif source == "UPGRADE": + return CloudInitSource.UPGRADE raise ValueError( 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(source)) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 0d1e1aef..0d9852c3 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -39,6 +39,7 @@ class CloudInitSource(Enum): PROPOSED = 3 PPA = 4 DEB_PACKAGE = 5 + UPGRADE = 6 def installs_new_version(self): if self.name in [self.NONE.name, self.IN_PLACE.name]: @@ -123,6 +124,8 @@ class IntegrationInstance: self.install_ppa() elif source == CloudInitSource.PROPOSED: self.install_proposed_image() + elif source == CloudInitSource.UPGRADE: + self.upgrade_cloud_init() else: raise Exception( "Specified to install {} which isn't supported here".format( @@ -166,6 +169,11 @@ class IntegrationInstance: remote_script = 'dpkg -i {path}'.format(path=remote_path) self.execute(remote_script) + def upgrade_cloud_init(self): + log.info('Upgrading cloud-init to latest version in archive') + self.execute("apt-get update -q") + self.execute("apt-get install -qy cloud-init") + def __enter__(self): return self diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 54d09d9b..157d34ad 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -59,6 +59,8 @@ EXISTING_INSTANCE_ID = None # code. # PROPOSED # Install from the Ubuntu proposed repo +# UPGRADE +# Upgrade cloud-init to the version in the Ubuntu archive # , e.g., ppa:cloud-init-dev/proposed # Install from a PPA. It MUST start with 'ppa:' # -- cgit v1.2.3 From 66e2d42dd1b722dc8e59f4e5990cea54f81ccd2a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 19 Feb 2021 15:37:57 -0700 Subject: azure: case-insensitive UUID to avoid new IID during kernel upgrade (#798) Kernel's newer than 4.15 present /sys/dmi/id/product_uuid as a lowercase value. Previously UUID was uppercase. Azure datasource reads the product_uuid directly as their platform's instance-id. This presents a problem if a kernel is either upgraded or downgraded across the 4.15 kernel version boundary because the case of the UUID will change, resulting in cloud-init seeing a "new" instance id and re-running all modules. Re-running cc_ssh in cloud-init deletes and regenerates ssh_host keys on a system which can cause concern on long-running instances that somethingnefarious has happened. Also add: - An integration test for this for Azure Bionic Ubuntu FIPS upgrading from a FIPS kernel with uppercase UUID to a lowercase UUID in linux-azure - A new pytest.mark.sru_next to collect all integration tests related to our next SRU LP: #1835584 --- cloudinit/sources/DataSourceAzure.py | 12 ++- tests/integration_tests/bugs/test_lp1835584.py | 104 +++++++++++++++++++++++++ tests/integration_tests/instances.py | 6 +- tests/unittests/test_datasource/test_azure.py | 39 ++++++++-- tox.ini | 1 + 5 files changed, 153 insertions(+), 9 deletions(-) create mode 100644 tests/integration_tests/bugs/test_lp1835584.py (limited to 'tests/integration_tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 090dd66b..748a9716 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -683,10 +683,18 @@ class DataSourceAzure(sources.DataSource): def _iid(self, previous=None): prev_iid_path = os.path.join( self.paths.get_cpath('data'), 'instance-id') - iid = dmi.read_dmi_data('system-uuid') + # Older kernels than 4.15 will have UPPERCASE product_uuid. + # We don't want Azure to react to an UPPER/lower difference as a new + # instance id as it rewrites SSH host keys. + # LP: #1835584 + iid = dmi.read_dmi_data('system-uuid').lower() if os.path.exists(prev_iid_path): previous = util.load_file(prev_iid_path).strip() - if is_byte_swapped(previous, iid): + if previous.lower() == iid: + # If uppercase/lowercase equivalent, return the previous value + # to avoid new instance id. + return previous + if is_byte_swapped(previous.lower(), iid): return previous return iid diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py new file mode 100644 index 00000000..660d2a2a --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1835584.py @@ -0,0 +1,104 @@ +""" Integration test for LP #1835584 + +Upstream linux kernels prior to 4.15 providate DMI product_uuid in uppercase. +More recent kernels switched to lowercase for DMI product_uuid. Azure +datasource uses this product_uuid as the instance-id for cloud-init. + +The linux-azure-fips kernel installed in PRO FIPs images, that product UUID is +uppercase whereas the linux-azure cloud-optimized kernel reports the UUID as +lowercase. + +In cases where product_uuid changes case, ensure cloud-init doesn't +recreate ssh hostkeys across reboot (due to detecting an instance_id change). + +This currently only affects linux-azure-fips -> linux-azure on Bionic. +This test won't run on Xenial because both linux-azure-fips and linux-azure +report uppercase product_uuids. + +The test will launch a specific Bionic Ubuntu PRO FIPS image which has a +linux-azure-fips kernel known to report product_uuid as uppercase. Then upgrade +and reboot into linux-azure kernel which is known to report product_uuid as +lowercase. + +Across the reboot, assert that we didn't re-run config_ssh by virtue of +seeing only one semaphore creation log entry of type: + + Writing to /var/lib/cloud/instances//sem/config_ssh - + +https://bugs.launchpad.net/cloud-init/+bug/1835584 +""" +import re + +import pytest + +from tests.integration_tests.instances import IntegrationAzureInstance +from tests.integration_tests.clouds import ( + ImageSpecification, IntegrationCloud +) +from tests.integration_tests.conftest import get_validated_source + + +IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = ( + "Canonical:0001-com-ubuntu-pro-bionic-fips:pro-fips-18_04:18.04.202010201" +) + + +def _check_iid_insensitive_across_kernel_upgrade( + instance: IntegrationAzureInstance +): + uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid") + assert uuid.isupper(), ( + "Expected uppercase UUID on Ubuntu FIPS image {}".format( + uuid + ) + ) + orig_kernel = instance.execute("uname -r").strip() + assert "azure-fips" in orig_kernel + result = instance.execute("apt-get update") + # Install a 5.4+ kernel which provides lowercase product_uuid + result = instance.execute("apt-get install linux-azure --assume-yes") + if not result.ok: + pytest.fail("Unable to install linux-azure kernel: {}".format(result)) + instance.restart() + new_kernel = instance.execute("uname -r").strip() + assert orig_kernel != new_kernel + assert "azure-fips" not in new_kernel + assert "azure" in new_kernel + new_uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid") + assert ( + uuid.lower() == new_uuid + ), "Expected UUID on linux-azure to be lowercase of FIPS: {}".format(uuid) + log = instance.read_from_file("/var/log/cloud-init.log") + RE_CONFIG_SSH_SEMAPHORE = r"Writing.*sem/config_ssh " + ssh_runs = len(re.findall(RE_CONFIG_SSH_SEMAPHORE, log)) + assert 1 == ssh_runs, "config_ssh ran too many times {}".format(ssh_runs) + + +@pytest.mark.azure +@pytest.mark.sru_next +def test_azure_kernel_upgrade_case_insensitive_uuid( + session_cloud: IntegrationCloud +): + cfg_image_spec = ImageSpecification.from_os_image() + if (cfg_image_spec.os, cfg_image_spec.release) != ("ubuntu", "bionic"): + pytest.skip( + "Test only supports ubuntu:bionic not {0.os}:{0.release}".format( + cfg_image_spec + ) + ) + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + pytest.skip( + "Provide CLOUD_INIT_SOURCE to install expected working cloud-init" + ) + image_id = IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC + with session_cloud.launch( + launch_kwargs={"image_id": image_id} + ) as instance: + # We can't use setup_image fixture here because we want to avoid + # taking a snapshot or cleaning the booted machine after cloud-init + # upgrade. + instance.install_new_cloud_init( + source, take_snapshot=False, clean=False + ) + _check_iid_insensitive_across_kernel_upgrade(instance) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 0d9852c3..055ec758 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -116,7 +116,8 @@ class IntegrationInstance: def install_new_cloud_init( self, source: CloudInitSource, - take_snapshot=True + take_snapshot=True, + clean=True, ): if source == CloudInitSource.DEB_PACKAGE: self.install_deb() @@ -133,7 +134,8 @@ class IntegrationInstance: ) version = self.execute('cloud-init -v').split()[-1] log.info('Installed cloud-init version: %s', version) - self.instance.clean() + if clean: + self.instance.clean() if take_snapshot: snapshot_id = self.snapshot() self.cloud.snapshot_id = snapshot_id diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index dc615309..152a2e1a 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -201,6 +201,7 @@ IMDS_NETWORK_METADATA = { } MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8' class TestParseNetworkConfig(CiTestCase): @@ -630,7 +631,7 @@ scbus-1 on xpt0 bus 0 return dsaz def _get_ds(self, data, agent_command=None, distro='ubuntu', - apply_network=None): + apply_network=None, instance_id=None): def dsdevs(): return data.get('dsdevs', []) @@ -659,7 +660,10 @@ scbus-1 on xpt0 bus 0 self.m_ephemeral_dhcpv4 = mock.MagicMock() self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() - self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' + if instance_id: + self.instance_id = instance_id + else: + self.instance_id = EXAMPLE_UUID def _dmi_mocks(key): if key == 'system-uuid': @@ -910,7 +914,7 @@ scbus-1 on xpt0 bus 0 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, 'imds': NETWORK_METADATA, - 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', + 'instance-id': EXAMPLE_UUID, 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -1613,6 +1617,32 @@ scbus-1 on xpt0 bus 0 self.assertTrue(ret) self.assertEqual('value', dsrc.metadata['test']) + def test_instance_id_case_insensitive(self): + """Return the previous iid when current is a case-insensitive match.""" + lower_iid = EXAMPLE_UUID.lower() + upper_iid = EXAMPLE_UUID.upper() + # lowercase current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid + ) + # UPPERCASE previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + upper_iid) + ds.get_data() + self.assertEqual(upper_iid, ds.metadata['instance-id']) + + # UPPERCASE current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid + ) + # lowercase previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + lower_iid) + ds.get_data() + self.assertEqual(lower_iid, ds.metadata['instance-id']) + def test_instance_id_endianness(self): """Return the previous iid when dmi uuid is the byteswapped iid.""" ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) @@ -1628,8 +1658,7 @@ scbus-1 on xpt0 bus 0 os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') ds.get_data() - self.assertEqual( - 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + self.assertEqual(self.instance_id, ds.metadata['instance-id']) def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) diff --git a/tox.ini b/tox.ini index b359bb98..1887e582 100644 --- a/tox.ini +++ b/tox.ini @@ -184,5 +184,6 @@ markers = instance_name: the name to be used for the test instance sru_2020_11: test is part of the 2020/11 SRU verification sru_2021_01: test is part of the 2021/01 SRU verification + sru_next: test is part of the next SRU verification ubuntu: this test should run on Ubuntu unstable: skip this test because it is flakey -- cgit v1.2.3 From 38aee6eebb160d46287c63a979bb897b15bb2f96 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 22 Feb 2021 12:03:53 -0500 Subject: integration_tests: introduce lxd_use_exec mark (#802) pycloudlib has modified the way LXD executes tests (https://github.com/canonical/pycloudlib/pull/114): it will always use SSH to access them by default, instead of using `lxc exec`. This behaviour is transparent for them majority of cloud-init's integration tests, but some currently depend on using `lxc exec` to access instances with (intentionally) broken networking: obviously these are not accessible via SSH. pycloudlib retains support for switching an instance to use `lxc exec`. This commit introduces the `lxd_use_exec` mark, which tests can use to indicate to the integration testing framework that they should be so switched, and applies it to all applicable tests. --- integration-requirements.txt | 2 +- tests/integration_tests/bugs/test_gh668.py | 1 + tests/integration_tests/bugs/test_lp1898997.py | 1 + tests/integration_tests/conftest.py | 11 +++++++++++ tox.ini | 1 + 5 files changed, 15 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index c959001e..c64b3b26 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@878981e3c7caaf583a8c7c5494dba9d9447acee8 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@3a6c668fed769f00d83d1e6bea7d68953787cc38 pytest diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py index 72fe0afc..ce57052e 100644 --- a/tests/integration_tests/bugs/test_gh668.py +++ b/tests/integration_tests/bugs/test_gh668.py @@ -37,6 +37,7 @@ EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP) "user.network-config": NETWORK_CONFIG, "volatile.eth0.hwaddr": MAC_ADDRESS, }) +@pytest.mark.lxd_use_exec def test_static_route_to_host(client: IntegrationInstance): route = client.execute("ip route | grep {}".format(DESTINATION_IP)) assert route.startswith(EXPECTED_ROUTE) diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index 90dc17da..bde93d06 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -39,6 +39,7 @@ version: 2 "volatile.eth0.hwaddr": MAC_ADDRESS, }) @pytest.mark.lxd_vm +@pytest.mark.lxd_use_exec @pytest.mark.not_bionic @pytest.mark.not_xenial @pytest.mark.sru_2020_11 diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 3933d647..61ad8a71 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -200,6 +200,9 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): user_data = getter('user_data') name = getter('instance_name') lxd_config_dict = getter('lxd_config_dict') + lxd_use_exec = fixture_utils.closest_marker_args_or( + request, 'lxd_use_exec', None + ) launch_kwargs = {} if name is not None: @@ -208,10 +211,18 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): if not isinstance(session_cloud, _LxdIntegrationCloud): pytest.skip("lxd_config_dict requires LXD") launch_kwargs["config_dict"] = lxd_config_dict + if lxd_use_exec is not None: + if not isinstance(session_cloud, _LxdIntegrationCloud): + pytest.skip("lxd_use_exec requires LXD") + launch_kwargs["execute_via_ssh"] = False with session_cloud.launch( user_data=user_data, launch_kwargs=launch_kwargs ) as instance: + if lxd_use_exec is not None: + # Existing instances are not affected by the launch kwargs, so + # ensure it here; we still need the launch kwarg so waiting works + instance.execute_via_ssh = False previous_failures = request.session.testsfailed yield instance test_failed = request.session.testsfailed - previous_failures > 0 diff --git a/tox.ini b/tox.ini index 1887e582..0e2eae46 100644 --- a/tox.ini +++ b/tox.ini @@ -176,6 +176,7 @@ markers = oci: test will only run on OCI platform lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container + lxd_use_exec: `execute` will use `lxc exec` instead of SSH lxd_vm: test will only run in LXD VM not_xenial: test cannot run on the xenial release not_bionic: test cannot run on the bionic release -- cgit v1.2.3 From e384a5436560c9494118f0999c314982d4912d27 Mon Sep 17 00:00:00 2001 From: Michael Hudson-Doyle Date: Tue, 23 Feb 2021 08:20:46 +1300 Subject: cc_keys_to_console: add option to disable key emission (#811) Specifically: ssh: emit_keys_to_console: false We also port the cc_keys_to_console cloud tests to the new integration testing framework, and add a test for this new option. LP: #1915460 --- cloudinit/config/cc_keys_to_console.py | 5 +++ cloudinit/config/tests/test_keys_to_console.py | 34 +++++++++++++++ doc/examples/cloud-config-ssh-keys.txt | 10 +++++ .../modules/test_keys_to_console.py | 48 ++++++++++++++++++++++ 4 files changed, 97 insertions(+) create mode 100644 cloudinit/config/tests/test_keys_to_console.py create mode 100644 tests/integration_tests/modules/test_keys_to_console.py (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index 0f2be52b..646d1f67 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -51,6 +51,11 @@ def _get_helper_tool_path(distro): def handle(name, cfg, cloud, log, _args): + if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): + log.debug(("Skipping module named %s, " + "logging of SSH host keys disabled"), name) + return + helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): log.warning(("Unable to activate module %s," diff --git a/cloudinit/config/tests/test_keys_to_console.py b/cloudinit/config/tests/test_keys_to_console.py new file mode 100644 index 00000000..4083fc54 --- /dev/null +++ b/cloudinit/config/tests/test_keys_to_console.py @@ -0,0 +1,34 @@ +"""Tests for cc_keys_to_console.""" +from unittest import mock + +import pytest + +from cloudinit.config import cc_keys_to_console + + +class TestHandle: + """Tests for cloudinit.config.cc_keys_to_console.handle. + + TODO: These tests only cover the emit_keys_to_console config option, they + should be expanded to cover the full functionality. + """ + + @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log") + @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists") + @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp") + @pytest.mark.parametrize("cfg,subp_called", [ + ({}, True), # Default to emitting keys + ({"ssh": {}}, True), # Default even if we have the parent key + ({"ssh": {"emit_keys_to_console": True}}, True), # Explicitly enabled + ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled + ]) + def test_emit_keys_to_console_config( + self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called + ): + # Ensure we always find the helper + m_path_exists.return_value = True + m_subp.return_value = ("", "") + + cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ()) + + assert subp_called == (m_subp.call_count == 1) diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt index aad8b683..bfe5ab44 100644 --- a/doc/examples/cloud-config-ssh-keys.txt +++ b/doc/examples/cloud-config-ssh-keys.txt @@ -42,3 +42,13 @@ ssh_keys: -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost + +# By default, the fingerprints of the authorized keys for the users +# cloud-init adds are printed to the console. Setting +# no_ssh_fingerprints to true suppresses this output. +no_ssh_fingerprints: false + +# By default, (most) ssh host keys are printed to the console. Setting +# emit_keys_to_console to false suppresses this output. +ssh: + emit_keys_to_console: false diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py new file mode 100644 index 00000000..298c9e6d --- /dev/null +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -0,0 +1,48 @@ +"""Integration tests for the cc_keys_to_console module. + +(This is ported from +``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)""" +import pytest + +BLACKLIST_USER_DATA = """\ +#cloud-config +ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] +ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] +""" + +DISABLED_USER_DATA = """\ +#cloud-config +ssh: + emit_keys_to_console: false +""" + + +@pytest.mark.user_data(BLACKLIST_USER_DATA) +class TestKeysToConsoleBlacklist: + """Test that the blacklist options work as expected.""" + @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"]) + def test_excluded_keys(self, class_client, key_type): + syslog = class_client.read_from_file("/var/log/syslog") + assert "({})".format(key_type) not in syslog + + @pytest.mark.parametrize("key_type", ["ED25519", "RSA"]) + def test_included_keys(self, class_client, key_type): + syslog = class_client.read_from_file("/var/log/syslog") + assert "({})".format(key_type) in syslog + + +@pytest.mark.user_data(DISABLED_USER_DATA) +class TestKeysToConsoleDisabled: + """Test that output can be fully disabled.""" + @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"]) + def test_keys_excluded(self, class_client, key_type): + syslog = class_client.read_from_file("/var/log/syslog") + assert "({})".format(key_type) not in syslog + + def test_header_excluded(self, class_client): + syslog = class_client.read_from_file("/var/log/syslog") + assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog + + def test_footer_excluded(self, class_client): + syslog = class_client.read_from_file("/var/log/syslog") + assert "END SSH HOST KEY FINGERPRINTS" not in syslog -- cgit v1.2.3 From a64b73808857fa7b4f262422ce2c87eedbce10d5 Mon Sep 17 00:00:00 2001 From: Johnson Shi Date: Mon, 22 Feb 2021 13:50:59 -0800 Subject: Azure: Support for VMs without ephemeral resource disks. (#800) Changes: * Only merge in default Azure cloud ephemeral disk configs during DataSourceAzure._get_data() if the ephemeral disk exists. * DataSourceAzure.address_ephemeral_resize() (which is invoked in DataSourceAzure.activate() should only set up the ephemeral disk if the disk exists. Azure VMs may or may not come with ephemeral resource disks depending on the VM SKU. For VM SKUs that come with ephemeral resource disks, the Azure platform guarantees that the ephemeral resource disk is attached to the VM before the VM is booted. For VM SKUs that do not come with ephemeral resource disks, cloud-init currently attempts to wait and set up a non-existent ephemeral resource disk, which wastes boot time. It also causes disk setup modules to fail (due to non-existent references to the ephemeral resource disk). udevadm settle is invoked by cloud-init very early in boot. udevadm settle is invoked very early, before DataSourceAzure's _get_data() and activate() methods. Within DataSourceAzure's _get_data() and activate() methods, the ephemeral resource disk path should exist if the VM SKU comes with an ephemeral resource disk. The ephemeral resource disk path should not exist if the VM SKU does not come with an ephemeral resource disk. LP: #1901011 --- cloudinit/sources/DataSourceAzure.py | 53 +++++++++++++---------- integration-requirements.txt | 2 +- tests/integration_tests/bugs/test_lp1901011.py | 58 ++++++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 52 +++++++++++++++++------ 4 files changed, 130 insertions(+), 35 deletions(-) create mode 100644 tests/integration_tests/bugs/test_lp1901011.py (limited to 'tests/integration_tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 748a9716..cee630f7 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -270,7 +270,7 @@ BUILTIN_DS_CONFIG = { } # RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False -BUILTIN_CLOUD_CONFIG = { +BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = { 'disk_setup': { 'ephemeral0': {'table_type': 'gpt', 'layout': [100], @@ -618,8 +618,26 @@ class DataSourceAzure(sources.DataSource): maybe_remove_ubuntu_network_config_scripts() # Process crawled data and augment with various config defaults - self.cfg = util.mergemanydict( - [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG]) + + # Only merge in default cloud config related to the ephemeral disk + # if the ephemeral disk exists + devpath = RESOURCE_DISK_PATH + if os.path.exists(devpath): + report_diagnostic_event( + "Ephemeral resource disk '%s' exists. " + "Merging default Azure cloud ephemeral disk configs." + % devpath, + logger_func=LOG.debug) + self.cfg = util.mergemanydict( + [crawled_data['cfg'], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG]) + else: + report_diagnostic_event( + "Ephemeral resource disk '%s' does not exist. " + "Not merging default Azure cloud ephemeral disk configs." + % devpath, + logger_func=LOG.debug) + self.cfg = crawled_data['cfg'] + self._metadata_imds = crawled_data['metadata']['imds'] self.metadata = util.mergemanydict( [crawled_data['metadata'], DEFAULT_METADATA]) @@ -1468,26 +1486,17 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): @azure_ds_telemetry_reporter -def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, +def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, is_new_instance=False, preserve_ntfs=False): - # wait for ephemeral disk to come up - naplen = .2 - with events.ReportEventStack( - name="wait-for-ephemeral-disk", - description="wait for ephemeral disk", - parent=azure_ds_reporter - ): - missing = util.wait_for_files([devpath], - maxwait=maxwait, - naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - report_diagnostic_event( - "ephemeral device '%s' did not appear after %d seconds." % - (devpath, maxwait), - logger_func=LOG.warning) - return + if not os.path.exists(devpath): + report_diagnostic_event( + "Ephemeral resource disk '%s' does not exist." % devpath, + logger_func=LOG.debug) + return + else: + report_diagnostic_event( + "Ephemeral resource disk '%s' exists." % devpath, + logger_func=LOG.debug) result = False msg = None diff --git a/integration-requirements.txt b/integration-requirements.txt index c64b3b26..6b596426 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@3a6c668fed769f00d83d1e6bea7d68953787cc38 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@da8445325875674394ffd85aaefaa3d2d0e0020d pytest diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py new file mode 100644 index 00000000..2b47f0a8 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1901011.py @@ -0,0 +1,58 @@ +"""Integration test for LP: #1901011 + +Ensure an ephemeral disk exists after boot. + +See https://github.com/canonical/cloud-init/pull/800 +""" +import pytest + +from tests.integration_tests.clouds import IntegrationCloud + + +@pytest.mark.azure +@pytest.mark.parametrize('instance_type,is_ephemeral', [ + ('Standard_DS1_v2', True), + ('Standard_D2s_v4', False), +]) +def test_ephemeral(instance_type, is_ephemeral, + session_cloud: IntegrationCloud, setup_image): + if is_ephemeral: + expected_log = ( + "Ephemeral resource disk '/dev/disk/cloud/azure_resource' exists. " + "Merging default Azure cloud ephemeral disk configs." + ) + else: + expected_log = ( + "Ephemeral resource disk '/dev/disk/cloud/azure_resource' does " + "not exist. Not merging default Azure cloud ephemeral disk " + "configs." + ) + + with session_cloud.launch( + launch_kwargs={'instance_type': instance_type} + ) as client: + # Verify log file + log = client.read_from_file('/var/log/cloud-init.log') + assert expected_log in log + + # Verify devices + dev_links = client.execute('ls /dev/disk/cloud') + assert 'azure_root' in dev_links + assert 'azure_root-part1' in dev_links + if is_ephemeral: + assert 'azure_resource' in dev_links + assert 'azure_resource-part1' in dev_links + + # Verify mounts + blks = client.execute('lsblk -pPo NAME,TYPE,MOUNTPOINT') + root_device = client.execute( + 'realpath /dev/disk/cloud/azure_root-part1' + ) + assert 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format( + root_device) in blks + if is_ephemeral: + ephemeral_device = client.execute( + 'realpath /dev/disk/cloud/azure_resource-part1' + ) + assert 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format( + ephemeral_device) in blks diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 152a2e1a..f597c723 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1354,23 +1354,51 @@ scbus-1 on xpt0 bus 0 for mypk in mypklist: self.assertIn(mypk['value'], dsrc.metadata['public-keys']) - def test_default_ephemeral(self): - # make sure the ephemeral device works + def test_default_ephemeral_configs_ephemeral_exists(self): + # make sure the ephemeral configs are correct if disk present odata = {} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + + self.assertEqual(dsrc.device_name_to_device("ephemeral0"), + dsaz.RESOURCE_DISK_PATH) + assert 'disk_setup' in cfg + assert 'fs_setup' in cfg + self.assertIsInstance(cfg['disk_setup'], dict) + self.assertIsInstance(cfg['fs_setup'], list) + + def test_default_ephemeral_configs_ephemeral_does_not_exist(self): + # make sure the ephemeral configs are correct if disk not present + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() - self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - dsaz.RESOURCE_DISK_PATH) - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) + assert 'disk_setup' not in cfg + assert 'fs_setup' not in cfg def test_provide_disk_aliases(self): # Make sure that user can affect disk aliases -- cgit v1.2.3 From 62f7a8b17f0528869662e987aa59a5c248c18e02 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Wed, 3 Mar 2021 14:44:36 +0100 Subject: Fix the TestApt tests using apt-key on Xenial and Hirsute (#823) * Xenial issue The `apt-key finger` format changed since Xenial. Sample Xenial output: pub 4096R/991BC93C 2018-09-17 Key fingerprint = F6EC B376 2474 EDA9 D21B 7022 8719 20D1 991B Sample Focal output: pub rsa4096 2016-04-12 [SC] EB4C 1BFD 4F04 2F6D DDCC EC91 7721 F63B D38B 4796 What didn't change is the format of the key fingerprint, which should be enough to ensure that the right key is in place across all the supported releases. * Hirsute issue TestApt::test_ppa_source also fails on Hirsute because of a difference in how the PPA keys are added. On Focla this command: add-apt-repository ppa:simplestreams-dev/trunk install /etc/apt/trusted.gpg.d/simplestreams-dev_ubuntu_trunk.gpg, while on Hirsute the file is names simplestreams-dev-ubuntu-trunk.gpg. The filename is part of the `apt-key finger` output, and this the test fails. Only checking for the presence of the key fingerprint in apt-key also covers this case. LP: #1916629 --- tests/integration_tests/modules/test_apt.py | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py index 5e3d474c..c0c8321c 100644 --- a/tests/integration_tests/modules/test_apt.py +++ b/tests/integration_tests/modules/test_apt.py @@ -86,25 +86,11 @@ EXPECTED_REGEXES = [ r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse", ] -TEST_KEYSERVER_KEY = """\ -pub rsa1024 2013-12-09 [SC] - 7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937 -uid [ unknown] Launchpad PPA for Ryan Harper -""" +TEST_KEYSERVER_KEY = "7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937" -TEST_PPA_KEY = """\ -/etc/apt/trusted.gpg.d/simplestreams-dev_ubuntu_trunk.gpg ---------------------------------------------------------- -pub rsa4096 2016-05-04 [SC] - 3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8 -uid [ unknown] Launchpad PPA for simplestreams-dev -""" +TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8" -TEST_KEY = """\ -pub rsa4096 2016-03-04 [SC] - 1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF -uid [ unknown] Launchpad PPA for cloud init development team -""" +TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF" @pytest.mark.ci -- cgit v1.2.3 From 3dd3de7c381e870c6bc9b9b7a84d452b57594686 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 3 Mar 2021 14:40:07 -0600 Subject: integration_tests: add OpenStack as a platform (#804) --- integration-requirements.txt | 2 +- tests/integration_tests/clouds.py | 42 +++++++++++++++++++++++-- tests/integration_tests/conftest.py | 2 ++ tests/integration_tests/integration_settings.py | 8 +++++ 4 files changed, 51 insertions(+), 3 deletions(-) (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index 6b596426..fb4c3ba1 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@da8445325875674394ffd85aaefaa3d2d0e0020d +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@5975c7e60be4a1e95814909a35592634cc145938 pytest diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 9527a413..09a44b3c 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -1,8 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. from abc import ABC, abstractmethod import logging - -from pycloudlib import EC2, GCE, Azure, OCI, LXDContainer, LXDVirtualMachine +from uuid import UUID + +from pycloudlib import ( + EC2, + GCE, + Azure, + OCI, + LXDContainer, + LXDVirtualMachine, + Openstack, +) from pycloudlib.lxd.instance import LXDInstance import cloudinit @@ -311,3 +320,32 @@ class LxdVmCloud(_LxdIntegrationCloud): self._profile_list = self.cloud_instance.build_necessary_profiles( release) return self._profile_list + + +class OpenstackCloud(IntegrationCloud): + datasource = 'openstack' + integration_instance_cls = IntegrationInstance + + def _get_cloud_instance(self): + if not integration_settings.OPENSTACK_NETWORK: + raise Exception( + 'OPENSTACK_NETWORK must be set to a valid Openstack network. ' + 'If using the openstack CLI, try `openstack network list`' + ) + return Openstack( + tag='openstack-integration-test', + network=integration_settings.OPENSTACK_NETWORK, + ) + + def _get_initial_image(self): + image = ImageSpecification.from_os_image() + try: + UUID(image.image_id) + except ValueError as e: + raise Exception( + 'When using Openstack, `OS_IMAGE` MUST be specified with ' + 'a 36-character UUID image ID. Passing in a release name is ' + 'not valid here.\n' + 'OS image id: {}'.format(image.image_id) + ) from e + return image.image_id diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 61ad8a71..0b93c94f 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -20,6 +20,7 @@ from tests.integration_tests.clouds import ( LxdVmCloud, OciCloud, _LxdIntegrationCloud, + OpenstackCloud, ) from tests.integration_tests.instances import ( CloudInitSource, @@ -38,6 +39,7 @@ platforms = { 'oci': OciCloud, 'lxd_container': LxdContainerCloud, 'lxd_vm': LxdVmCloud, + 'openstack': OpenstackCloud, } os_list = ["ubuntu"] diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 157d34ad..0703be58 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -21,6 +21,7 @@ RUN_UNSTABLE = False # ec2 # gce # oci +# openstack PLATFORM = 'lxd_container' # The cloud-specific instance type to run. E.g., a1.medium on AWS @@ -89,6 +90,13 @@ PUBLIC_SSH_KEY = None # for the keypair. (Defaults to pycloudlib's default behaviour.) KEYPAIR_NAME = None +################################################################## +# OPENSTACK SETTINGS +################################################################## +# Network to use for Openstack. Should be one of the names/ids found +# in `openstack network list` +OPENSTACK_NETWORK = None + ################################################################## # USER SETTINGS OVERRIDES ################################################################## -- cgit v1.2.3 From 121bc04cdf0e6732fe143b7419131dc250c13384 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 8 Mar 2021 12:50:57 -0500 Subject: net: exclude OVS internal interfaces in get_interfaces (#829) `get_interfaces` is used to in two ways, broadly: firstly, to determine the available interfaces when converting cloud network configuration formats to cloud-init's network configuration formats; and, secondly, to ensure that any interfaces which are specified in network configuration are (a) available, and (b) named correctly. The first of these is unaffected by this commit, as no clouds support Open vSwitch configuration in their network configuration formats. For the second, we check that MAC addresses of physical devices are unique. In some OVS configurations, there are OVS-created devices which have duplicate MAC addresses, either with each other or with physical devices. As these interfaces are created by OVS, we can be confident that (a) they will be available when appropriate, and (b) that OVS will name them correctly. As such, this commit excludes any OVS-internal interfaces from the set of interfaces returned by `get_interfaces`. LP: #1912844 --- cloudinit/net/__init__.py | 62 +++++++++++ cloudinit/net/tests/test_init.py | 119 +++++++++++++++++++++ cloudinit/sources/helpers/tests/test_openstack.py | 5 + cloudinit/sources/tests/test_oracle.py | 4 + tests/integration_tests/bugs/test_lp1912844.py | 103 ++++++++++++++++++ .../unittests/test_datasource/test_configdrive.py | 8 ++ tests/unittests/test_net.py | 20 ++++ 7 files changed, 321 insertions(+) create mode 100644 tests/integration_tests/bugs/test_lp1912844.py (limited to 'tests/integration_tests') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index de65e7af..385b7bcc 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -6,6 +6,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import errno +import functools import ipaddress import logging import os @@ -19,6 +20,19 @@ from cloudinit.url_helper import UrlError, readurl LOG = logging.getLogger(__name__) SYS_CLASS_NET = "/sys/class/net/" DEFAULT_PRIMARY_INTERFACE = 'eth0' +OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [ + "ovs-vsctl", + "--format", + "csv", + "--no-headings", + "--timeout", + "10", + "--columns", + "name", + "find", + "interface", + "type=internal", +] def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): @@ -133,6 +147,52 @@ def master_is_openvswitch(devname): return os.path.exists(ovs_path) +@functools.lru_cache(maxsize=None) +def openvswitch_is_installed() -> bool: + """Return a bool indicating if Open vSwitch is installed in the system.""" + ret = bool(subp.which("ovs-vsctl")) + if not ret: + LOG.debug( + "ovs-vsctl not in PATH; not detecting Open vSwitch interfaces" + ) + return ret + + +@functools.lru_cache(maxsize=None) +def get_ovs_internal_interfaces() -> list: + """Return a list of the names of OVS internal interfaces on the system. + + These will all be strings, and are used to exclude OVS-specific interface + from cloud-init's network configuration handling. + """ + try: + out, _err = subp.subp(OVS_INTERNAL_INTERFACE_LOOKUP_CMD) + except subp.ProcessExecutionError as exc: + if "database connection failed" in exc.stderr: + LOG.info( + "Open vSwitch is not yet up; no interfaces will be detected as" + " OVS-internal" + ) + return [] + raise + else: + return out.splitlines() + + +def is_openvswitch_internal_interface(devname: str) -> bool: + """Returns True if this is an OVS internal interface. + + If OVS is not installed or not yet running, this will return False. + """ + if not openvswitch_is_installed(): + return False + ovs_bridges = get_ovs_internal_interfaces() + if devname in ovs_bridges: + LOG.debug("Detected %s as an OVS interface", devname) + return True + return False + + def is_netfailover(devname, driver=None): """ netfailover driver uses 3 nics, master, primary and standby. this returns True if the device is either the primary or standby @@ -884,6 +944,8 @@ def get_interfaces(blacklist_drivers=None) -> list: # skip nics that have no mac (00:00....) if name != 'lo' and mac == zero_mac[:len(mac)]: continue + if is_openvswitch_internal_interface(name): + continue # skip nics that have drivers blacklisted driver = device_driver(name) if driver in blacklist_drivers: diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 0535387a..946f8ee2 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -391,6 +391,10 @@ class TestGetDeviceList(CiTestCase): self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False), +) class TestGetInterfaceMAC(CiTestCase): def setUp(self): @@ -1224,6 +1228,121 @@ class TestNetFailOver(CiTestCase): self.assertFalse(net.is_netfailover(devname, driver)) +class TestOpenvswitchIsInstalled: + """Test cloudinit.net.openvswitch_is_installed. + + Uses the ``clear_lru_cache`` local autouse fixture to allow us to test + despite the ``lru_cache`` decorator on the unit under test. + """ + + @pytest.fixture(autouse=True) + def clear_lru_cache(self): + net.openvswitch_is_installed.cache_clear() + + @pytest.mark.parametrize( + "expected,which_return", [(True, "/some/path"), (False, None)] + ) + @mock.patch("cloudinit.net.subp.which") + def test_mirrors_which_result(self, m_which, expected, which_return): + m_which.return_value = which_return + assert expected == net.openvswitch_is_installed() + + @mock.patch("cloudinit.net.subp.which") + def test_only_calls_which_once(self, m_which): + net.openvswitch_is_installed() + net.openvswitch_is_installed() + assert 1 == m_which.call_count + + +@mock.patch("cloudinit.net.subp.subp", return_value=("", "")) +class TestGetOVSInternalInterfaces: + """Test cloudinit.net.get_ovs_internal_interfaces. + + Uses the ``clear_lru_cache`` local autouse fixture to allow us to test + despite the ``lru_cache`` decorator on the unit under test. + """ + @pytest.fixture(autouse=True) + def clear_lru_cache(self): + net.get_ovs_internal_interfaces.cache_clear() + + def test_command_used(self, m_subp): + """Test we use the correct command when we call subp""" + net.get_ovs_internal_interfaces() + + assert [ + mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD) + ] == m_subp.call_args_list + + def test_subp_contents_split_and_returned(self, m_subp): + """Test that the command output is appropriately mangled.""" + stdout = "iface1\niface2\niface3\n" + m_subp.return_value = (stdout, "") + + assert [ + "iface1", + "iface2", + "iface3", + ] == net.get_ovs_internal_interfaces() + + def test_database_connection_error_handled_gracefully(self, m_subp): + """Test that the error indicating OVS is down is handled gracefully.""" + m_subp.side_effect = ProcessExecutionError( + stderr="database connection failed" + ) + + assert [] == net.get_ovs_internal_interfaces() + + def test_other_errors_raised(self, m_subp): + """Test that only database connection errors are handled.""" + m_subp.side_effect = ProcessExecutionError() + + with pytest.raises(ProcessExecutionError): + net.get_ovs_internal_interfaces() + + def test_only_runs_once(self, m_subp): + """Test that we cache the value.""" + net.get_ovs_internal_interfaces() + net.get_ovs_internal_interfaces() + + assert 1 == m_subp.call_count + + +@mock.patch("cloudinit.net.get_ovs_internal_interfaces") +@mock.patch("cloudinit.net.openvswitch_is_installed") +class TestIsOpenVSwitchInternalInterface: + def test_false_if_ovs_not_installed( + self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces + ): + """Test that OVS' absence returns False.""" + m_openvswitch_is_installed.return_value = False + + assert not net.is_openvswitch_internal_interface("devname") + + @pytest.mark.parametrize( + "detected_interfaces,devname,expected_return", + [ + ([], "devname", False), + (["notdevname"], "devname", False), + (["devname"], "devname", True), + (["some", "other", "devices", "and", "ours"], "ours", True), + ], + ) + def test_return_value_based_on_detected_interfaces( + self, + m_openvswitch_is_installed, + m_get_ovs_internal_interfaces, + detected_interfaces, + devname, + expected_return, + ): + """Test that the detected interfaces are used correctly.""" + m_openvswitch_is_installed.return_value = True + m_get_ovs_internal_interfaces.return_value = detected_interfaces + assert expected_return == net.is_openvswitch_internal_interface( + devname + ) + + class TestIsIpAddress: """Tests for net.is_ip_address. diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py index 2bde1e3f..95fb9743 100644 --- a/cloudinit/sources/helpers/tests/test_openstack.py +++ b/cloudinit/sources/helpers/tests/test_openstack.py @@ -1,10 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. # ./cloudinit/sources/helpers/tests/test_openstack.py +from unittest import mock from cloudinit.sources.helpers import openstack from cloudinit.tests import helpers as test_helpers +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestConvertNetJson(test_helpers.CiTestCase): def test_phy_types(self): diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index a7bbdfd9..dcf33b9b 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -173,6 +173,10 @@ class TestIsPlatformViable(test_helpers.CiTestCase): m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestNetworkConfigFromOpcImds: def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): oracle_ds._vnics_data = [{}] diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py new file mode 100644 index 00000000..efafae50 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1912844.py @@ -0,0 +1,103 @@ +"""Integration test for LP: #1912844 + +cloud-init should ignore OVS-internal interfaces when performing its own +interface determination: these interfaces are handled fully by OVS, so +cloud-init should never need to touch them. + +This test is a semi-synthetic reproducer for the bug. It uses a similar +network configuration, tweaked slightly to DHCP in a way that will succeed even +on "failed" boots. The exact bug doesn't reproduce with the NoCloud +datasource, because it runs at init-local time (whereas the MAAS datasource, +from the report, runs only at init (network) time): this means that the +networking code runs before OVS creates its interfaces (which happens after +init-local but, of course, before networking is up), and so doesn't generate +the traceback that they cause. We work around this by calling +``get_interfaces_by_mac` directly in the test code. +""" +import pytest + +from tests.integration_tests import random_mac_address + +MAC_ADDRESS = random_mac_address() + +NETWORK_CONFIG = """\ +bonds: + bond0: + interfaces: + - enp5s0 + macaddress: {0} + mtu: 1500 +bridges: + ovs-br: + interfaces: + - bond0 + macaddress: {0} + mtu: 1500 + openvswitch: {{}} + dhcp4: true +ethernets: + enp5s0: + mtu: 1500 + set-name: enp5s0 + match: + macaddress: {0} +version: 2 +vlans: + ovs-br.100: + id: 100 + link: ovs-br + mtu: 1500 + ovs-br.200: + id: 200 + link: ovs-br + mtu: 1500 +""".format(MAC_ADDRESS) + + +SETUP_USER_DATA = """\ +#cloud-config +packages: +- openvswitch-switch +""" + + +@pytest.fixture +def ovs_enabled_session_cloud(session_cloud): + """A session_cloud wrapper, to use an OVS-enabled image for tests. + + This implementation is complicated by wanting to use ``session_cloud``s + snapshot cleanup/retention logic, to avoid having to reimplement that here. + """ + old_snapshot_id = session_cloud.snapshot_id + with session_cloud.launch( + user_data=SETUP_USER_DATA, + ) as instance: + instance.instance.clean() + session_cloud.snapshot_id = instance.snapshot() + + yield session_cloud + + try: + session_cloud.delete_snapshot() + finally: + session_cloud.snapshot_id = old_snapshot_id + + +@pytest.mark.lxd_vm +def test_get_interfaces_by_mac_doesnt_traceback(ovs_enabled_session_cloud): + """Launch our OVS-enabled image and confirm the bug doesn't reproduce.""" + launch_kwargs = { + "config_dict": { + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, + }, + } + with ovs_enabled_session_cloud.launch( + launch_kwargs=launch_kwargs, + ) as client: + result = client.execute( + "python3 -c" + "'from cloudinit.net import get_interfaces_by_mac;" + "get_interfaces_by_mac()'" + ) + assert result.ok diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 6f830cc6..2e2b7847 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -494,6 +494,10 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestNetJson(CiTestCase): def setUp(self): super(TestNetJson, self).setUp() @@ -654,6 +658,10 @@ class TestNetJson(CiTestCase): self.assertEqual(out_data, conv_data) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestConvertNetworkData(CiTestCase): with_logs = True diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 38d934d4..cb636f41 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2933,6 +2933,10 @@ iface eth1 inet dhcp self.assertEqual(0, mock_settle.call_count) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestRhelSysConfigRendering(CiTestCase): with_logs = True @@ -3620,6 +3624,10 @@ USERCTL=no expected, self._render_and_read(network_config=v2data)) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestOpenSuseSysConfigRendering(CiTestCase): with_logs = True @@ -5037,6 +5045,10 @@ class TestNetRenderers(CiTestCase): self.assertTrue(result) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestGetInterfaces(CiTestCase): _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], @@ -5186,6 +5198,10 @@ class TestInterfaceHasOwnMac(CiTestCase): self.assertFalse(interface_has_own_mac("eth0")) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestGetInterfacesByMac(CiTestCase): _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], @@ -5342,6 +5358,10 @@ class TestInterfacesSorting(CiTestCase): ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestGetIBHwaddrsByInterface(CiTestCase): _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' -- cgit v1.2.3 From 74e1e50d4c3f1d3e3c6973ad74b06919b3738e34 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 11 Mar 2021 11:18:50 -0500 Subject: integration_tests: mount more paths IN_PLACE (#838) This mounts the full directories that we install into systems over their corresponding paths within the system under test, getting us slightly closer to testing what a package would install. --- tests/integration_tests/clouds.py | 44 ++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 17 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 09a44b3c..a6026309 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from abc import ABC, abstractmethod import logging +import os.path from uuid import UUID from pycloudlib import ( @@ -262,23 +263,32 @@ class _LxdIntegrationCloud(IntegrationCloud): @staticmethod def _mount_source(instance: LXDInstance): - target_path = '/usr/lib/python3/dist-packages/cloudinit' - format_variables = { - 'name': instance.name, - 'source_path': cloudinit.__path__[0], - 'container_path': target_path, - } - log.info( - 'Mounting source %(source_path)s directly onto LXD container/vm ' - 'named %(name)s at %(container_path)s', - format_variables - ) - command = ( - 'lxc config device add {name} host-cloud-init disk ' - 'source={source_path} ' - 'path={container_path}' - ).format(**format_variables) - subp(command.split()) + cloudinit_path = cloudinit.__path__[0] + mounts = [ + (cloudinit_path, '/usr/lib/python3/dist-packages/cloudinit'), + (os.path.join(cloudinit_path, '..', 'config', 'cloud.cfg.d'), + '/etc/cloud/cloud.cfg.d'), + (os.path.join(cloudinit_path, '..', 'templates'), + '/etc/cloud/templates'), + ] + for (n, (source_path, target_path)) in enumerate(mounts): + format_variables = { + 'name': instance.name, + 'source_path': os.path.realpath(source_path), + 'container_path': target_path, + 'idx': n, + } + log.info( + 'Mounting source %(source_path)s directly onto LXD' + ' container/VM named %(name)s at %(container_path)s', + format_variables + ) + command = ( + 'lxc config device add {name} host-cloud-init-{idx} disk ' + 'source={source_path} ' + 'path={container_path}' + ).format(**format_variables) + subp(command.split()) def _perform_launch(self, launch_kwargs): launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None) -- cgit v1.2.3 From d95b448fe106146b7510f7b64f2e83c51943f04d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 11 Mar 2021 10:46:49 -0600 Subject: Integration test for #783 (#832) Newer verisons of /etc/sudoers prefer @includedir over #includedir. Ensure we handle that properly and don't include an additional #includedir when one isn't warranted. --- .../integration_tests/modules/test_users_groups.py | 45 +++++++++++++++++++--- 1 file changed, 40 insertions(+), 5 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index ee08d87b..bcb17b7f 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -1,16 +1,16 @@ -"""Integration test for the user_groups module. - -This test specifies a number of users and groups via user-data, and confirms -that they have been configured correctly in the system under test. +"""Integration tests for the user_groups module. TODO: -* This test assumes that the "ubuntu" user will be created when "default" is +* This module assumes that the "ubuntu" user will be created when "default" is specified; this will need modification to run on other OSes. """ import re import pytest +from tests.integration_tests.clouds import ImageSpecification +from tests.integration_tests.instances import IntegrationInstance + USER_DATA = """\ #cloud-config @@ -45,6 +45,12 @@ AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestUsersGroups: + """Test users and groups. + + This test specifies a number of users and groups via user-data, and + confirms that they have been configured correctly in the system under test. + """ + @pytest.mark.ubuntu @pytest.mark.parametrize( "getent_args,regex", @@ -86,3 +92,32 @@ class TestUsersGroups: _, groups_str = output.split(":", maxsplit=1) groups = groups_str.split() assert "secret" in groups + + +@pytest.mark.user_data(USER_DATA) +def test_sudoers_includedir(client: IntegrationInstance): + """Ensure we don't add additional #includedir to sudoers. + + Newer versions of /etc/sudoers will use @includedir rather than + #includedir. Ensure we handle that properly and don't include an + additional #includedir when one isn't warranted. + + https://github.com/canonical/cloud-init/pull/783 + """ + if ImageSpecification.from_os_image().release in [ + 'xenial', 'bionic', 'focal' + ]: + raise pytest.skip( + 'Test requires version of sudo installed on groovy and later' + ) + client.execute("sed -i 's/#include/@include/g' /etc/sudoers") + + sudoers = client.read_from_file('/etc/sudoers') + if '@includedir /etc/sudoers.d' not in sudoers: + client.execute("echo '@includedir /etc/sudoers.d' >> /etc/sudoers") + client.instance.clean() + client.restart() + sudoers = client.read_from_file('/etc/sudoers') + + assert '#includedir' not in sudoers + assert sudoers.count('includedir /etc/sudoers.d') == 1 -- cgit v1.2.3 From dae45c3b0d285cbc7b8b22128b5ed295e2c374f8 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 18 Mar 2021 16:29:15 -0400 Subject: integration_tests: bump pycloudlib dependency (#846) The latest pycloudlib now launches official Ubuntu cloud images for xenial, meaning that `lxc exec` no longer works against them. This commit includes handling for tests which are affected by this change; further details and reasoning in the included comment. --- integration-requirements.txt | 2 +- tests/integration_tests/conftest.py | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index fb4c3ba1..95891356 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@5975c7e60be4a1e95814909a35592634cc145938 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@96b146ee1beb99b8e44e36525e18a9a20e00c3f2 pytest diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 0b93c94f..6f4ce8d3 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -45,6 +45,17 @@ os_list = ["ubuntu"] session_start_time = datetime.datetime.now().strftime('%y%m%d%H%M%S') +XENIAL_LXD_VM_EXEC_MSG = """\ +The default xenial images do not support `exec` for LXD VMs. + +Specify an image known to work using: + + OS_IMAGE=::ubuntu::xenial + +You can re-run specifically tests that require this by passing `-m +lxd_use_exec` to pytest. +""" + def pytest_runtest_setup(item): """Skip tests on unsupported clouds. @@ -216,6 +227,16 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): if lxd_use_exec is not None: if not isinstance(session_cloud, _LxdIntegrationCloud): pytest.skip("lxd_use_exec requires LXD") + if isinstance(session_cloud, LxdVmCloud): + image_spec = ImageSpecification.from_os_image() + if image_spec.release == image_spec.image_id == "xenial": + # Why fail instead of skip? We expect that skipped tests will + # be run in a different one of our usual battery of test runs + # (e.g. LXD-only tests are skipped on EC2 but will run in our + # normal LXD test runs). This is not true of this test: it + # can't run in our usual xenial LXD VM test run, and it may not + # run anywhere else. A failure flags up this discrepancy. + pytest.fail(XENIAL_LXD_VM_EXEC_MSG) launch_kwargs["execute_via_ssh"] = False with session_cloud.launch( -- cgit v1.2.3 From c6726c2bbe82b738bd0a7fb308496a497c797d5f Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 19 Mar 2021 08:35:16 -0500 Subject: Fix apt default integration test (#845) The apt default test wasn't ported over from cloud-tests correctly. uri should be specified in the test, but it was not, so the test failed on openstack (and likely other platforms) because without a specified uri, the default uri will vary by platform. I separated this uri test out into a separate test function. Also add openstack specific test for apt configuration with no uri. Other platform-specific tests should be added here over time. --- tests/integration_tests/modules/test_apt.py | 33 +++++++++++++++++++++++------ tox.ini | 1 + 2 files changed, 28 insertions(+), 6 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py index c0c8321c..54711fc0 100644 --- a/tests/integration_tests/modules/test_apt.py +++ b/tests/integration_tests/modules/test_apt.py @@ -200,29 +200,32 @@ class TestApt: assert conf_exists is False -DEFAULT_DATA = """\ +_DEFAULT_DATA = """\ #cloud-config apt: primary: - arches: - default + {uri} security: - arches: - default """ +DEFAULT_DATA = _DEFAULT_DATA.format(uri='') @pytest.mark.ubuntu @pytest.mark.user_data(DEFAULT_DATA) class TestDefaults: - def test_primary(self, class_client: IntegrationInstance): - """Test apt default primary sources. + @pytest.mark.openstack + def test_primary_on_openstack(self, class_client: IntegrationInstance): + """Test apt default primary source on openstack. - Ported from - tests/cloud_tests/testcases/modules/apt_configure_primary.py + When no uri is provided. """ + zone = class_client.execute('cloud-init query v1.availability_zone') sources_list = class_client.read_from_file('/etc/apt/sources.list') - assert 'deb http://archive.ubuntu.com/ubuntu' in sources_list + assert '{}.clouds.archive.ubuntu.com'.format(zone) in sources_list def test_security(self, class_client: IntegrationInstance): """Test apt default security sources. @@ -239,6 +242,24 @@ class TestDefaults: ) +DEFAULT_DATA_WITH_URI = _DEFAULT_DATA.format( + uri='uri: "http://something.random.invalid/ubuntu"' +) + + +@pytest.mark.user_data(DEFAULT_DATA_WITH_URI) +def test_default_primary_with_uri(client: IntegrationInstance): + """Test apt default primary sources. + + Ported from + tests/cloud_tests/testcases/modules/apt_configure_primary.py + """ + sources_list = client.read_from_file('/etc/apt/sources.list') + assert 'archive.ubuntu.com' not in sources_list + + assert 'something.random.invalid' in sources_list + + DISABLED_DATA = """\ #cloud-config apt: diff --git a/tox.ini b/tox.ini index 10efd190..3158ebd5 100644 --- a/tox.ini +++ b/tox.ini @@ -174,6 +174,7 @@ markers = gce: test will only run on GCE platform azure: test will only run on Azure platform oci: test will only run on OCI platform + openstack: test will only run on openstack lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container lxd_use_exec: `execute` will use `lxc exec` instead of SSH -- cgit v1.2.3 From b794d426b9ab43ea9d6371477466070d86e10668 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 19 Mar 2021 10:06:42 -0400 Subject: write passwords only to serial console, lock down cloud-init-output.log (#847) Prior to this commit, when a user specified configuration which would generate random passwords for users, cloud-init would cause those passwords to be written to the serial console by emitting them on stderr. In the default configuration, any stdout or stderr emitted by cloud-init is also written to `/var/log/cloud-init-output.log`. This file is world-readable, meaning that those randomly-generated passwords were available to be read by any user with access to the system. This presents an obvious security issue. This commit responds to this issue in two ways: * We address the direct issue by moving from writing the passwords to sys.stderr to writing them directly to /dev/console (via util.multi_log); this means that the passwords will never end up in cloud-init-output.log * To avoid future issues like this, we also modify the logging code so that any files created in a log sink subprocess will only be owner/group readable and, if it exists, will be owned by the adm group. This results in `/var/log/cloud-init-output.log` no longer being world-readable, meaning that if there are other parts of the codebase that are emitting sensitive data intended for the serial console, that data is no longer available to all users of the system. LP: #1918303 --- cloudinit/config/cc_set_passwords.py | 5 +- cloudinit/config/tests/test_set_passwords.py | 40 ++++++++++++---- cloudinit/tests/test_util.py | 56 ++++++++++++++++++++++ cloudinit/util.py | 38 +++++++++++++-- .../integration_tests/modules/test_set_password.py | 24 ++++++++++ tests/integration_tests/test_logging.py | 22 +++++++++ tests/unittests/test_util.py | 4 ++ 7 files changed, 173 insertions(+), 16 deletions(-) create mode 100644 tests/integration_tests/test_logging.py (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index d6b5682d..433de751 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -78,7 +78,6 @@ password. """ import re -import sys from cloudinit.distros import ug_util from cloudinit import log as logging @@ -214,7 +213,9 @@ def handle(_name, cfg, cloud, log, args): if len(randlist): blurb = ("Set the following 'random' passwords\n", '\n'.join(randlist)) - sys.stderr.write("%s\n%s\n" % blurb) + util.multi_log( + "%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False + ) if expire: expired_users = [] diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index daa1ef51..bbe2ee8f 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -74,10 +74,6 @@ class TestSetPasswordsHandle(CiTestCase): with_logs = True - def setUp(self): - super(TestSetPasswordsHandle, self).setUp() - self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err') - def test_handle_on_empty_config(self, *args): """handle logs that no password has changed when config is empty.""" cloud = self.tmp_cloud(distro='ubuntu') @@ -129,10 +125,12 @@ class TestSetPasswordsHandle(CiTestCase): mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], m_subp.call_args_list) + @mock.patch(MODPATH + "util.multi_log") @mock.patch(MODPATH + "util.is_BSD") @mock.patch(MODPATH + "subp.subp") - def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp, - m_is_bsd): + def test_handle_on_chpasswd_list_creates_random_passwords( + self, m_subp, m_is_bsd, m_multi_log + ): """handle parses command set random passwords.""" m_is_bsd.return_value = False cloud = self.tmp_cloud(distro='ubuntu') @@ -146,10 +144,32 @@ class TestSetPasswordsHandle(CiTestCase): self.assertIn( 'DEBUG: Handling input for chpasswd as list.', self.logs.getvalue()) - self.assertNotEqual( - [mock.call(['chpasswd'], - '\n'.join(valid_random_pwds) + '\n')], - m_subp.call_args_list) + + self.assertEqual(1, m_subp.call_count) + args, _kwargs = m_subp.call_args + self.assertEqual(["chpasswd"], args[0]) + + stdin = args[1] + user_pass = { + user: password + for user, password + in (line.split(":") for line in stdin.splitlines()) + } + + self.assertEqual(1, m_multi_log.call_count) + self.assertEqual( + mock.call(mock.ANY, stderr=False, fallback_to_stdout=False), + m_multi_log.call_args + ) + + self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys())) + written_lines = m_multi_log.call_args[0][0].splitlines() + for password in user_pass.values(): + for line in written_lines: + if password in line: + break + else: + self.fail("Password not emitted to console") # vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index b7a302f1..e811917e 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -851,4 +851,60 @@ class TestEnsureFile: assert "ab" == kwargs["omode"] +@mock.patch("cloudinit.util.grp.getgrnam") +@mock.patch("cloudinit.util.os.setgid") +@mock.patch("cloudinit.util.os.umask") +class TestRedirectOutputPreexecFn: + """This tests specifically the preexec_fn used in redirect_output.""" + + @pytest.fixture(params=["outfmt", "errfmt"]) + def preexec_fn(self, request): + """A fixture to gather the preexec_fn used by redirect_output. + + This enables simpler direct testing of it, and parameterises any tests + using it to cover both the stdout and stderr code paths. + """ + test_string = "| piped output to invoke subprocess" + if request.param == "outfmt": + args = (test_string, None) + elif request.param == "errfmt": + args = (None, test_string) + with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: + util.redirect_output(*args) + + assert 1 == m_popen.call_count + _args, kwargs = m_popen.call_args + assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" + return kwargs["preexec_fn"] + + def test_preexec_fn_sets_umask( + self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn + ): + """preexec_fn should set a mask that avoids world-readable files.""" + preexec_fn() + + assert [mock.call(0o037)] == m_os_umask.call_args_list + + def test_preexec_fn_sets_group_id_if_adm_group_present( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should setgrp to adm if present, so files are owned by them.""" + fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) + m_getgrnam.return_value = fake_group + + preexec_fn() + + assert [mock.call("adm")] == m_getgrnam.call_args_list + assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list + + def test_preexec_fn_handles_absent_adm_group_gracefully( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should handle an absent adm group gracefully.""" + m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") + + preexec_fn() + + assert 0 == m_setgid.call_count + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 769f3425..4e0a72db 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -359,7 +359,7 @@ def find_modules(root_dir): def multi_log(text, console=True, stderr=True, - log=None, log_level=logging.DEBUG): + log=None, log_level=logging.DEBUG, fallback_to_stdout=True): if stderr: sys.stderr.write(text) if console: @@ -368,7 +368,7 @@ def multi_log(text, console=True, stderr=True, with open(conpath, 'w') as wfh: wfh.write(text) wfh.flush() - else: + elif fallback_to_stdout: # A container may lack /dev/console (arguably a container bug). If # it does not exist, then write output to stdout. this will result # in duplicate stderr and stdout messages if stderr was True. @@ -623,6 +623,26 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): if not o_err: o_err = sys.stderr + # pylint: disable=subprocess-popen-preexec-fn + def set_subprocess_umask_and_gid(): + """Reconfigure umask and group ID to create output files securely. + + This is passed to subprocess.Popen as preexec_fn, so it is executed in + the context of the newly-created process. It: + + * sets the umask of the process so created files aren't world-readable + * if an adm group exists in the system, sets that as the process' GID + (so that the created file(s) are owned by root:adm) + """ + os.umask(0o037) + try: + group_id = grp.getgrnam("adm").gr_gid + except KeyError: + # No adm group, don't set a group + pass + else: + os.setgid(group_id) + if outfmt: LOG.debug("Redirecting %s to %s", o_out, outfmt) (mode, arg) = outfmt.split(" ", 1) @@ -632,7 +652,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): owith = "wb" new_fp = open(arg, owith) elif mode == "|": - proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) + proc = subprocess.Popen( + arg, + shell=True, + stdin=subprocess.PIPE, + preexec_fn=set_subprocess_umask_and_gid, + ) new_fp = proc.stdin else: raise TypeError("Invalid type for output format: %s" % outfmt) @@ -654,7 +679,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): owith = "wb" new_fp = open(arg, owith) elif mode == "|": - proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) + proc = subprocess.Popen( + arg, + shell=True, + stdin=subprocess.PIPE, + preexec_fn=set_subprocess_umask_and_gid, + ) new_fp = proc.stdin else: raise TypeError("Invalid type for error format: %s" % errfmt) diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py index b13f76fb..d7cf91a5 100644 --- a/tests/integration_tests/modules/test_set_password.py +++ b/tests/integration_tests/modules/test_set_password.py @@ -116,6 +116,30 @@ class Mixin: # Which are not the same assert shadow_users["harry"] != shadow_users["dick"] + def test_random_passwords_not_stored_in_cloud_init_output_log( + self, class_client + ): + """We should not emit passwords to the in-instance log file. + + LP: #1918303 + """ + cloud_init_output = class_client.read_from_file( + "/var/log/cloud-init-output.log" + ) + assert "dick:" not in cloud_init_output + assert "harry:" not in cloud_init_output + + def test_random_passwords_emitted_to_serial_console(self, class_client): + """We should emit passwords to the serial console. (LP: #1918303)""" + try: + console_log = class_client.instance.console_log() + except NotImplementedError: + # Assume that an exception here means that we can't use the console + # log + pytest.skip("NotImplementedError when requesting console log") + assert "dick:" in console_log + assert "harry:" in console_log + def test_explicit_password_set_correctly(self, class_client): """Test that an explicitly-specified password is set correctly.""" shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client) diff --git a/tests/integration_tests/test_logging.py b/tests/integration_tests/test_logging.py new file mode 100644 index 00000000..b31a0434 --- /dev/null +++ b/tests/integration_tests/test_logging.py @@ -0,0 +1,22 @@ +"""Integration tests relating to cloud-init's logging.""" + + +class TestVarLogCloudInitOutput: + """Integration tests relating to /var/log/cloud-init-output.log.""" + + def test_var_log_cloud_init_output_not_world_readable(self, client): + """ + The log can contain sensitive data, it shouldn't be world-readable. + + LP: #1918303 + """ + # Check the file exists + assert client.execute("test -f /var/log/cloud-init-output.log").ok + + # Check its permissions are as we expect + perms, user, group = client.execute( + "stat -c %a:%U:%G /var/log/cloud-init-output.log" + ).split(":") + assert "640" == perms + assert "root" == user + assert "adm" == group diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 857629f1..e5292001 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -572,6 +572,10 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): util.multi_log(logged_string) self.assertEqual(logged_string, self.stdout.getvalue()) + def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self): + util.multi_log('something', fallback_to_stdout=False) + self.assertEqual('', self.stdout.getvalue()) + def test_logs_go_to_log_if_given(self): log = mock.MagicMock() logged_string = 'something very important' -- cgit v1.2.3 From 13606a12054f4fcf1494ea3068db0640ae6cc3a3 Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Thu, 25 Mar 2021 21:18:41 +0000 Subject: tools/write-ssh-key-fingerprints: do not display empty header/footer (#817) When output of SSH host keys and/or SSH fingerprints are disabled for all keys do not display headers and footers. Prevent risk of message text being interpreted as "logger" option by appending "--" to logger options. Correct syslog output that was tagged with "ec2" regardless of DataSource in use. Now use "cloud-init" tag instead. Various "shellcheck" corrections. Add testcase for disabled output of SSH host keys. --- .../modules/test_keys_to_console.py | 19 +++++++ tools/write-ssh-key-fingerprints | 58 +++++++++++++++------- 2 files changed, 59 insertions(+), 18 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py index 298c9e6d..56dff9a0 100644 --- a/tests/integration_tests/modules/test_keys_to_console.py +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -10,6 +10,11 @@ ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] """ +BLACKLIST_ALL_KEYS_USER_DATA = """\ +#cloud-config +ssh_fp_console_blacklist: [ssh-dsa, ssh-ecdsa, ssh-ed25519, ssh-rsa, ssh-dss, ecdsa-sha2-nistp256] +""" # noqa: E501 + DISABLED_USER_DATA = """\ #cloud-config ssh: @@ -31,6 +36,20 @@ class TestKeysToConsoleBlacklist: assert "({})".format(key_type) in syslog +@pytest.mark.user_data(BLACKLIST_ALL_KEYS_USER_DATA) +class TestAllKeysToConsoleBlacklist: + """Test that when key blacklist contains all key types that + no header/footer are output. + """ + def test_header_excluded(self, class_client): + syslog = class_client.read_from_file("/var/log/syslog") + assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog + + def test_footer_excluded(self, class_client): + syslog = class_client.read_from_file("/var/log/syslog") + assert "END SSH HOST KEY FINGERPRINTS" not in syslog + + @pytest.mark.user_data(DISABLED_USER_DATA) class TestKeysToConsoleDisabled: """Test that output can be fully disabled.""" diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints index 2a3dca7c..9409257d 100755 --- a/tools/write-ssh-key-fingerprints +++ b/tools/write-ssh-key-fingerprints @@ -1,39 +1,61 @@ #!/bin/sh # This file is part of cloud-init. See LICENSE file for license information. -logger_opts="-p user.info -t ec2" -# rhels' version of logger_opts does not support long -# for of -s (--stderr), so use short form. -logger_opts="$logger_opts -s" +do_syslog() { + log_message=$1 + + # rhels' version of logger_opts does not support long + # form of -s (--stderr), so use short form. + logger_opts="-s" + + # Need to end the options list with "--" to ensure that any minus symbols + # in the text passed to logger are not interpreted as logger options. + logger_opts="$logger_opts -p user.info -t cloud-init --" + + # shellcheck disable=SC2086 # logger give error if $logger_opts quoted + logger $logger_opts "$log_message" +} + # Redirect stderr to stdout exec 2>&1 fp_blist=",${1}," key_blist=",${2}," -{ -echo -echo "#############################################################" -echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" + +fingerprint_header_shown=0 for f in /etc/ssh/ssh_host_*key.pub; do [ -f "$f" ] || continue - read ktype line < "$f" + # shellcheck disable=SC2034 # Unused "line" required for word splitting + read -r ktype line < "$f" # skip the key if its type is in the blacklist [ "${fp_blist#*,$ktype,}" = "${fp_blist}" ] || continue - ssh-keygen -l -f "$f" + if [ $fingerprint_header_shown -eq 0 ]; then + do_syslog "#############################################################" + do_syslog "-----BEGIN SSH HOST KEY FINGERPRINTS-----" + fingerprint_header_shown=1 + fi + do_syslog "$(ssh-keygen -l -f "$f")" done -echo "-----END SSH HOST KEY FINGERPRINTS-----" -echo "#############################################################" - -} | logger $logger_opts +if [ $fingerprint_header_shown -eq 1 ]; then + do_syslog "-----END SSH HOST KEY FINGERPRINTS-----" + do_syslog "#############################################################" +fi -echo "-----BEGIN SSH HOST KEY KEYS-----" +key_header_shown=0 for f in /etc/ssh/ssh_host_*key.pub; do [ -f "$f" ] || continue - read ktype line < "$f" + # shellcheck disable=SC2034 # Unused "line" required for word splitting + read -r ktype line < "$f" # skip the key if its type is in the blacklist [ "${key_blist#*,$ktype,}" = "${key_blist}" ] || continue - cat $f + if [ $key_header_shown -eq 0 ]; then + echo "-----BEGIN SSH HOST KEY KEYS-----" + key_header_shown=1 + fi + cat "$f" done -echo "-----END SSH HOST KEY KEYS-----" +if [ $key_header_shown -eq 1 ]; then + echo "-----END SSH HOST KEY KEYS-----" +fi -- cgit v1.2.3 From 0d90596b56db5d306125ead08c571fc8d44d528e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 15 Apr 2021 10:20:04 -0500 Subject: Emit dots on travis to avoid timeout (#867) The current method of running a background sleep until travis is finished is causing integration test runs to pass even when they should be failing. Instead, update the code to emit dots itself. --- .travis.yml | 11 +---- tests/integration_tests/bugs/test_lp1813396.py | 2 +- tests/integration_tests/clouds.py | 4 +- tests/integration_tests/log_utils.py | 11 ----- .../modules/test_power_state_change.py | 2 +- tests/integration_tests/util.py | 49 ++++++++++++++++++++++ tox.ini | 2 +- 7 files changed, 56 insertions(+), 25 deletions(-) delete mode 100644 tests/integration_tests/log_utils.py create mode 100644 tests/integration_tests/util.py (limited to 'tests/integration_tests') diff --git a/.travis.yml b/.travis.yml index 690ab644..e112789a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -121,16 +121,7 @@ matrix: # Use sudo to get a new shell where we're in the sbuild group - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc' - ssh-keygen -P "" -q -f ~/.ssh/id_rsa - - sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' & - - | - SECONDS=0 - while [ -e /proc/$! ]; do - if [ "$SECONDS" -gt "570" ]; then - echo -n '.' - SECONDS=0 - fi - sleep 10 - done + - sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' - python: 3.5 env: TOXENV=xenial diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py index 7ad0e809..68b96b1d 100644 --- a/tests/integration_tests/bugs/test_lp1813396.py +++ b/tests/integration_tests/bugs/test_lp1813396.py @@ -6,7 +6,7 @@ Ensure gpg is called with no tty flag. import pytest from tests.integration_tests.instances import IntegrationInstance -from tests.integration_tests.log_utils import verify_ordered_items_in_text +from tests.integration_tests.util import verify_ordered_items_in_text USER_DATA = """\ diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index a6026309..11b57407 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -25,6 +25,7 @@ from tests.integration_tests.instances import ( IntegrationOciInstance, IntegrationLxdInstance, ) +from tests.integration_tests.util import emit_dots_on_travis try: from typing import Optional @@ -167,7 +168,8 @@ class IntegrationCloud(ABC): "\n".join("{}={}".format(*item) for item in kwargs.items()) ) - pycloudlib_instance = self._perform_launch(kwargs) + with emit_dots_on_travis(): + pycloudlib_instance = self._perform_launch(kwargs) log.info('Launched instance: %s', pycloudlib_instance) instance = self.get_instance(pycloudlib_instance, settings) if kwargs.get('wait', True): diff --git a/tests/integration_tests/log_utils.py b/tests/integration_tests/log_utils.py deleted file mode 100644 index 40baae7b..00000000 --- a/tests/integration_tests/log_utils.py +++ /dev/null @@ -1,11 +0,0 @@ -def verify_ordered_items_in_text(to_verify: list, text: str): - """Assert all items in list appear in order in text. - - Examples: - verify_ordered_items_in_text(['a', '1'], 'ab1') # passes - verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError - """ - index = 0 - for item in to_verify: - index = text[index:].find(item) - assert index > -1, "Expected item not found: '{}'".format(item) diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py index eebe6608..5f3a32ac 100644 --- a/tests/integration_tests/modules/test_power_state_change.py +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -9,7 +9,7 @@ import pytest from tests.integration_tests.clouds import IntegrationCloud from tests.integration_tests.instances import IntegrationInstance -from tests.integration_tests.log_utils import verify_ordered_items_in_text +from tests.integration_tests.util import verify_ordered_items_in_text USER_DATA = """\ #cloud-config diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py new file mode 100644 index 00000000..3ef12358 --- /dev/null +++ b/tests/integration_tests/util.py @@ -0,0 +1,49 @@ +import logging +import multiprocessing +import os +import time +from contextlib import contextmanager + +log = logging.getLogger('integration_testing') + + +def verify_ordered_items_in_text(to_verify: list, text: str): + """Assert all items in list appear in order in text. + + Examples: + verify_ordered_items_in_text(['a', '1'], 'ab1') # passes + verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError + """ + index = 0 + for item in to_verify: + index = text[index:].find(item) + assert index > -1, "Expected item not found: '{}'".format(item) + + +@contextmanager +def emit_dots_on_travis(): + """emit a dot every 60 seconds if running on Travis. + + Travis will kill jobs that don't emit output for a certain amount of time. + This context manager spins up a background process which will emit a dot to + stdout every 60 seconds to avoid being killed. + + It should be wrapped selectively around operations that are known to take a + long time. + """ + if os.environ.get('TRAVIS') != "true": + # If we aren't on Travis, don't do anything. + yield + return + + def emit_dots(): + while True: + log.info(".") + time.sleep(60) + + dot_process = multiprocessing.Process(target=emit_dots) + dot_process.start() + try: + yield + finally: + dot_process.terminate() diff --git a/tox.ini b/tox.ini index 3158ebd5..bf8cb78b 100644 --- a/tox.ini +++ b/tox.ini @@ -153,7 +153,7 @@ deps = [testenv:integration-tests-ci] commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} -passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* +passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* TRAVIS deps = -r{toxinidir}/integration-requirements.txt setenv = -- cgit v1.2.3 From 45db197cfc7e3488baae7dc1053c45da070248f6 Mon Sep 17 00:00:00 2001 From: hamalq <81582959+hamalq@users.noreply.github.com> Date: Thu, 15 Apr 2021 16:45:12 -0700 Subject: add prefer_fqdn_over_hostname config option (#859) the above option allows the user to control the behavior of a distro hostname selection if both short hostname and FQDN are supplied. If `prefer_fqdn_over_hostname` is true the FQDN will be selected as hostname; if false the hostname will be selected LP: #1921004 --- cloudinit/config/cc_set_hostname.py | 14 ++++- cloudinit/config/cc_update_hostname.py | 8 +++ cloudinit/distros/__init__.py | 7 +++ cloudinit/distros/freebsd.py | 7 +-- cloudinit/distros/rhel.py | 11 ++-- .../integration_tests/modules/test_set_hostname.py | 17 ++++++ .../test_handler/test_handler_set_hostname.py | 69 +++++++++++++++++----- 7 files changed, 105 insertions(+), 28 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index d4017478..5a59dc32 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -19,7 +19,10 @@ A hostname and fqdn can be provided by specifying a full domain name under the key, and the fqdn of the cloud wil be used. If a fqdn specified with the ``hostname`` key, it will be handled properly, although it is better to use the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, -it is distro dependent whether ``hostname`` or ``fqdn`` is used. +it is distro dependent whether ``hostname`` or ``fqdn`` is used, +unless the ``prefer_fqdn_over_hostname`` option is true and fqdn is set +it will force the use of FQDN in all distros, and if false then it will +force the hostname use. This module will run in the init-local stage before networking is configured if the hostname is set by metadata or user data on the local system. @@ -38,6 +41,7 @@ based on initial hostname. **Config keys**:: preserve_hostname: + prefer_fqdn_over_hostname: fqdn: hostname: """ @@ -62,6 +66,14 @@ def handle(name, cfg, cloud, log, _args): log.debug(("Configuration option 'preserve_hostname' is set," " not setting the hostname in module %s"), name) return + + # Set prefer_fqdn_over_hostname value in distro + hostname_fqdn = util.get_cfg_option_bool(cfg, + "prefer_fqdn_over_hostname", + None) + if hostname_fqdn is not None: + cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) # Check for previous successful invocation of set-hostname diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index d5f4eb5a..f4120356 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -27,6 +27,7 @@ is set, then the hostname will not be altered. **Config keys**:: preserve_hostname: + prefer_fqdn_over_hostname: fqdn: hostname: """ @@ -45,6 +46,13 @@ def handle(name, cfg, cloud, log, _args): " not updating the hostname in module %s"), name) return + # Set prefer_fqdn_over_hostname value in distro + hostname_fqdn = util.get_cfg_option_bool(cfg, + "prefer_fqdn_over_hostname", + None) + if hostname_fqdn is not None: + cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname") diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 220bd11f..8b8a647d 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -79,6 +79,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): shutdown_options_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'} _ci_pkl_version = 1 + prefer_fqdn = False def __init__(self, name, cfg, paths): self._paths = paths @@ -131,6 +132,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): def get_option(self, opt_name, default=None): return self._cfg.get(opt_name, default) + def set_option(self, opt_name, value=None): + self._cfg[opt_name] = value + def set_hostname(self, hostname, fqdn=None): writeable_hostname = self._select_hostname(hostname, fqdn) self._write_hostname(writeable_hostname, self.hostname_conf_fn) @@ -259,6 +263,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): def _select_hostname(self, hostname, fqdn): # Prefer the short hostname over the long # fully qualified domain name + if util.get_cfg_option_bool(self._cfg, "prefer_fqdn_over_hostname", + self.prefer_fqdn) and fqdn: + return fqdn if not hostname: return fqdn return hostname diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index dde34d41..9659843f 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -27,12 +27,7 @@ class Distro(cloudinit.distros.bsd.BSD): pkg_cmd_remove_prefix = ["pkg", "remove"] pkg_cmd_update_prefix = ["pkg", "update"] pkg_cmd_upgrade_prefix = ["pkg", "upgrade"] - - def _select_hostname(self, hostname, fqdn): - # Should be FQDN if available. See rc.conf(5) in FreeBSD - if fqdn: - return fqdn - return hostname + prefer_fqdn = True # See rc.conf(5) in FreeBSD def _get_add_member_to_group_cmd(self, member_name, group_name): return ['pw', 'usermod', '-n', member_name, '-G', group_name] diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index c72f7c17..0c00a531 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -50,6 +50,10 @@ class Distro(distros.Distro): } } + # Should be fqdn if we can use it + # See: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/deployment_guide/ch-sysconfig # noqa: E501 + prefer_fqdn = True + def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain @@ -91,13 +95,6 @@ class Distro(distros.Distro): } rhel_util.update_sysconfig_file(out_fn, host_cfg) - def _select_hostname(self, hostname, fqdn): - # Should be fqdn if we can use it - # See: https://www.centos.org/docs/5/html/Deployment_Guide-en-US/ch-sysconfig.html#s2-sysconfig-network # noqa - if fqdn: - return fqdn - return hostname - def _read_system_hostname(self): if self.uses_systemd(): host_fn = self.systemd_hostname_conf_fn diff --git a/tests/integration_tests/modules/test_set_hostname.py b/tests/integration_tests/modules/test_set_hostname.py index 2bfa403d..e7f7f6b6 100644 --- a/tests/integration_tests/modules/test_set_hostname.py +++ b/tests/integration_tests/modules/test_set_hostname.py @@ -24,6 +24,13 @@ hostname: cloudinit1 fqdn: cloudinit2.i9n.cloud-init.io """ +USER_DATA_PREFER_FQDN = """\ +#cloud-config +prefer_fqdn_over_hostname: {} +hostname: cloudinit1 +fqdn: cloudinit2.test.io +""" + @pytest.mark.ci class TestHostname: @@ -33,6 +40,16 @@ class TestHostname: hostname_output = client.execute("hostname") assert "cloudinit2" in hostname_output.strip() + @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(True)) + def test_prefer_fqdn(self, client): + hostname_output = client.execute("hostname") + assert "cloudinit2.test.io" in hostname_output.strip() + + @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(False)) + def test_prefer_short_hostname(self, client): + hostname_output = client.execute("hostname") + assert "cloudinit1" in hostname_output.strip() + @pytest.mark.user_data(USER_DATA_FQDN) def test_hostname_and_fqdn(self, client): hostname_output = client.execute("hostname") diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index 58abf51a..73641b70 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -15,6 +15,7 @@ import os import shutil import tempfile from io import BytesIO +from unittest import mock LOG = logging.getLogger(__name__) @@ -29,14 +30,53 @@ class TestHostname(t_help.FilesystemMockingTestCase): util.ensure_dir(os.path.join(self.tmp, 'data')) self.addCleanup(shutil.rmtree, self.tmp) - def _fetch_distro(self, kind): + def _fetch_distro(self, kind, conf=None): cls = distros.fetch(kind) paths = helpers.Paths({'cloud_dir': self.tmp}) - return cls(kind, {}, paths) + conf = {} if conf is None else conf + return cls(kind, conf, paths) - def test_write_hostname_rhel(self): + def test_debian_write_hostname_prefer_fqdn(self): cfg = { - 'hostname': 'blah.blah.blah.yahoo.com', + 'hostname': 'blah', + 'prefer_fqdn_over_hostname': True, + 'fqdn': 'blah.yahoo.com', + } + distro = self._fetch_distro('debian', cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('blah.yahoo.com', contents.strip()) + + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd): + cfg = { + 'hostname': 'blah', + 'prefer_fqdn_over_hostname': False, + 'fqdn': 'blah.yahoo.com', + } + distro = self._fetch_distro('rhel', cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/sysconfig/network", decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual( + {'HOSTNAME': 'blah'}, + dict(n_cfg)) + + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_write_hostname_rhel(self, m_uses_systemd): + cfg = { + 'hostname': 'blah', + 'fqdn': 'blah.blah.blah.yahoo.com' } distro = self._fetch_distro('rhel') paths = helpers.Paths({'cloud_dir': self.tmp}) @@ -45,15 +85,16 @@ class TestHostname(t_help.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) - if not distro.uses_systemd(): - contents = util.load_file("/etc/sysconfig/network", decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({'HOSTNAME': 'blah.blah.blah.yahoo.com'}, - dict(n_cfg)) + contents = util.load_file("/etc/sysconfig/network", decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual( + {'HOSTNAME': 'blah.blah.blah.yahoo.com'}, + dict(n_cfg)) def test_write_hostname_debian(self): cfg = { - 'hostname': 'blah.blah.blah.yahoo.com', + 'hostname': 'blah', + 'fqdn': 'blah.blah.blah.yahoo.com', } distro = self._fetch_distro('debian') paths = helpers.Paths({'cloud_dir': self.tmp}) @@ -65,7 +106,8 @@ class TestHostname(t_help.FilesystemMockingTestCase): contents = util.load_file("/etc/hostname") self.assertEqual('blah', contents.strip()) - def test_write_hostname_sles(self): + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_write_hostname_sles(self, m_uses_systemd): cfg = { 'hostname': 'blah.blah.blah.suse.com', } @@ -75,9 +117,8 @@ class TestHostname(t_help.FilesystemMockingTestCase): cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) - if not distro.uses_systemd(): - contents = util.load_file(distro.hostname_conf_fn) - self.assertEqual('blah', contents.strip()) + contents = util.load_file(distro.hostname_conf_fn) + self.assertEqual('blah', contents.strip()) def test_multiple_calls_skips_unchanged_hostname(self): """Only new hostname or fqdn values will generate a hostname call.""" -- cgit v1.2.3 From d132356cc361abef2d90d4073438f3ab759d5964 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 19 Apr 2021 11:31:28 -0500 Subject: fix error on upgrade caused by new vendordata2 attributes (#869) In #777, we added 'vendordata2' and 'vendordata2_raw' attributes to the DataSource class, but didn't use the upgrade framework to deal with an unpickle after upgrade. This commit adds the necessary upgrade code. Additionally, added a smaller-scope upgrade test to our integration tests that will be run on every CI run so we catch these issues immediately in the future. LP: #1922739 --- cloudinit/sources/__init__.py | 12 +++++++++++- cloudinit/tests/test_upgrade.py | 4 ++++ tests/integration_tests/clouds.py | 4 ++-- tests/integration_tests/test_upgrade.py | 25 ++++++++++++++++++++++++- 4 files changed, 41 insertions(+), 4 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 1ad1880d..7d74f8d9 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -24,6 +24,7 @@ from cloudinit import util from cloudinit.atomic_helper import write_json from cloudinit.event import EventType from cloudinit.filters import launch_index +from cloudinit.persistence import CloudInitPickleMixin from cloudinit.reporting import events DSMODE_DISABLED = "disabled" @@ -134,7 +135,7 @@ URLParams = namedtuple( 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) -class DataSource(metaclass=abc.ABCMeta): +class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' @@ -196,6 +197,8 @@ class DataSource(metaclass=abc.ABCMeta): # non-root users sensitive_metadata_keys = ('merged_cfg', 'security-credentials',) + _ci_pkl_version = 1 + def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro @@ -218,6 +221,13 @@ class DataSource(metaclass=abc.ABCMeta): else: self.ud_proc = ud_proc + def _unpickle(self, ci_pkl_version: int) -> None: + """Perform deserialization fixes for Paths.""" + if not hasattr(self, 'vendordata2'): + self.vendordata2 = None + if not hasattr(self, 'vendordata2_raw'): + self.vendordata2_raw = None + def __str__(self): return type_utils.obj_name(self) diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py index 05eefd2a..da3ab23b 100644 --- a/cloudinit/tests/test_upgrade.py +++ b/cloudinit/tests/test_upgrade.py @@ -46,3 +46,7 @@ class TestUpgrade: def test_paths_has_run_dir_attribute(self, previous_obj_pkl): assert previous_obj_pkl.paths.run_dir is not None + + def test_vendordata_exists(self, previous_obj_pkl): + assert previous_obj_pkl.vendordata2 is None + assert previous_obj_pkl.vendordata2_raw is None diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 11b57407..3bbccb44 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -111,14 +111,14 @@ class IntegrationCloud(ABC): # Even if we're using the default key, it may still have a # different name in the clouds, so we need to set it separately. self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME - self._released_image_id = self._get_initial_image() + self.released_image_id = self._get_initial_image() self.snapshot_id = None @property def image_id(self): if self.snapshot_id: return self.snapshot_id - return self._released_image_id + return self.released_image_id def emit_settings_to_log(self) -> None: log.info( diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index c20cb3c1..48e0691b 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -1,4 +1,5 @@ import logging +import os import pytest import time from pathlib import Path @@ -8,6 +9,8 @@ from tests.integration_tests.conftest import ( get_validated_source, session_start_time, ) +from tests.integration_tests.instances import CloudInitSource + log = logging.getLogger('integration_testing') @@ -63,7 +66,7 @@ def test_upgrade(session_cloud: IntegrationCloud): return # type checking doesn't understand that skip raises launch_kwargs = { - 'image_id': session_cloud._get_initial_image(), + 'image_id': session_cloud.released_image_id, } image = ImageSpecification.from_os_image() @@ -93,6 +96,26 @@ def test_upgrade(session_cloud: IntegrationCloud): instance.install_new_cloud_init(source, take_snapshot=False) instance.execute('hostname something-else') _restart(instance) + assert instance.execute('cloud-init status --wait --long').ok _output_to_compare(instance, after_path, netcfg_path) log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) + + +@pytest.mark.ci +@pytest.mark.ubuntu +def test_upgrade_package(session_cloud: IntegrationCloud): + if get_validated_source(session_cloud) != CloudInitSource.DEB_PACKAGE: + not_run_message = 'Test only supports upgrading to build deb' + if os.environ.get('TRAVIS'): + # If this isn't running on CI, we should know + pytest.fail(not_run_message) + else: + pytest.skip(not_run_message) + + launch_kwargs = {'image_id': session_cloud.released_image_id} + + with session_cloud.launch(launch_kwargs=launch_kwargs) as instance: + instance.install_deb() + instance.restart() + assert instance.execute('cloud-init status --wait --long').ok -- cgit v1.2.3 From 5c740dcf6f278a180c32863d2b4225b6db925ef0 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 26 Apr 2021 16:41:50 -0400 Subject: test_upgrade: modify test_upgrade_package to run for more sources (#883) This allows us to use it when validating packages from -proposed (and PPAs etc.). --- tests/integration_tests/test_upgrade.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 48e0691b..7478a1b9 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -9,11 +9,13 @@ from tests.integration_tests.conftest import ( get_validated_source, session_start_time, ) -from tests.integration_tests.instances import CloudInitSource log = logging.getLogger('integration_testing') +UNSUPPORTED_INSTALL_METHOD_MSG = ( + "Install method '{}' not supported for this test" +) USER_DATA = """\ #cloud-config hostname: SRU-worked @@ -57,12 +59,10 @@ def _restart(instance): @pytest.mark.sru_2020_11 -def test_upgrade(session_cloud: IntegrationCloud): +def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): source = get_validated_source(session_cloud) if not source.installs_new_version(): - pytest.skip("Install method '{}' not supported for this test".format( - source - )) + pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) return # type checking doesn't understand that skip raises launch_kwargs = { @@ -104,18 +104,21 @@ def test_upgrade(session_cloud: IntegrationCloud): @pytest.mark.ci @pytest.mark.ubuntu -def test_upgrade_package(session_cloud: IntegrationCloud): - if get_validated_source(session_cloud) != CloudInitSource.DEB_PACKAGE: - not_run_message = 'Test only supports upgrading to build deb' +def test_subsequent_boot_of_upgraded_package(session_cloud: IntegrationCloud): + source = get_validated_source(session_cloud) + if not source.installs_new_version(): if os.environ.get('TRAVIS'): # If this isn't running on CI, we should know - pytest.fail(not_run_message) + pytest.fail(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) else: - pytest.skip(not_run_message) + pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) + return # type checking doesn't understand that skip raises launch_kwargs = {'image_id': session_cloud.released_image_id} with session_cloud.launch(launch_kwargs=launch_kwargs) as instance: - instance.install_deb() + instance.install_new_cloud_init( + source, take_snapshot=False, clean=False + ) instance.restart() assert instance.execute('cloud-init status --wait --long').ok -- cgit v1.2.3 From 864346999702e6b2b8bf7e6244a6608bcead72a5 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 13 May 2021 12:55:41 -0500 Subject: Allow user control over update events (#834) Control is currently limited to boot events, though this should allow us to more easily incorporate HOTPLUG support. Disabling 'instance-first-boot' is not supported as we apply networking config too early in boot to have processed userdata (along with the fact that this would be a pretty big foot-gun). The concept of update events on datasource has been split into supported update events and default update events. Defaults will be used if there is no user-defined update events, but user-defined events won't be supplied if they aren't supported. When applying the networking config, we now check to see if the event is supported by the datasource as well as if it is enabled. Configuration looks like: updates: network: when: ['boot'] --- cloudinit/event.py | 69 ++++++++++-- cloudinit/sources/DataSourceAzure.py | 15 ++- cloudinit/sources/DataSourceEc2.py | 10 +- cloudinit/sources/DataSourceRbxCloud.py | 9 +- cloudinit/sources/DataSourceScaleway.py | 10 +- cloudinit/sources/DataSourceSmartOS.py | 8 +- cloudinit/sources/__init__.py | 41 +++++--- cloudinit/sources/tests/test_init.py | 29 +++-- cloudinit/stages.py | 117 +++++++++++++++++---- cloudinit/tests/test_event.py | 26 +++++ cloudinit/tests/test_stages.py | 98 ++++++++++++++--- doc/rtd/index.rst | 1 + doc/rtd/topics/events.rst | 83 +++++++++++++++ .../integration_tests/modules/test_user_events.py | 95 +++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 4 +- tests/unittests/test_datasource/test_smartos.py | 10 +- tox.ini | 2 +- 17 files changed, 545 insertions(+), 82 deletions(-) create mode 100644 cloudinit/tests/test_event.py create mode 100644 doc/rtd/topics/events.rst create mode 100644 tests/integration_tests/modules/test_user_events.py (limited to 'tests/integration_tests') diff --git a/cloudinit/event.py b/cloudinit/event.py index f7b311fb..76a0afc6 100644 --- a/cloudinit/event.py +++ b/cloudinit/event.py @@ -1,17 +1,72 @@ # This file is part of cloud-init. See LICENSE file for license information. - """Classes and functions related to event handling.""" +from enum import Enum +from typing import Dict, Set + +from cloudinit import log as logging + +LOG = logging.getLogger(__name__) + -# Event types which can generate maintenance requests for cloud-init. -class EventType(object): - BOOT = "System boot" - BOOT_NEW_INSTANCE = "New instance first boot" +class EventScope(Enum): + # NETWORK is currently the only scope, but we want to leave room to + # grow other scopes (e.g., STORAGE) without having to make breaking + # changes to the user config + NETWORK = 'network' - # TODO: Cloud-init will grow support for the follow event types: - # UDEV + def __str__(self): # pylint: disable=invalid-str-returned + return self.value + + +class EventType(Enum): + """Event types which can generate maintenance requests for cloud-init.""" + # Cloud-init should grow support for the follow event types: + # HOTPLUG # METADATA_CHANGE # USER_REQUEST + BOOT = "boot" + BOOT_NEW_INSTANCE = "boot-new-instance" + BOOT_LEGACY = "boot-legacy" + + def __str__(self): # pylint: disable=invalid-str-returned + return self.value + + +def userdata_to_events(user_config: dict) -> Dict[EventScope, Set[EventType]]: + """Convert userdata into update config format defined on datasource. + + Userdata is in the form of (e.g): + {'network': {'when': ['boot']}} + + DataSource config is in the form of: + {EventScope.Network: {EventType.BOOT}} + + Take the first and return the second + """ + update_config = {} + for scope, scope_list in user_config.items(): + try: + new_scope = EventScope(scope) + except ValueError as e: + LOG.warning( + "%s! Update data will be ignored for '%s' scope", + str(e), + scope, + ) + continue + try: + new_values = [EventType(x) for x in scope_list['when']] + except ValueError as e: + LOG.warning( + "%s! Update data will be ignored for '%s' scope", + str(e), + scope, + ) + new_values = [] + update_config[new_scope] = set(new_values) + + return update_config # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2f3390c3..dcdf9f8f 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,7 +22,7 @@ import requests from cloudinit import dmi from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.net import device_driver from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources @@ -338,6 +338,13 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): dsname = 'Azure' + # Regenerate network config new_instance boot and every boot + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY + }} + _negotiated = False _metadata_imds = sources.UNSET _ci_pkl_version = 1 @@ -352,8 +359,6 @@ class DataSourceAzure(sources.DataSource): BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') self._network_config = None - # Regenerate network config new_instance boot and every boot - self.update_events['network'].add(EventType.BOOT) self._ephemeral_dhcp_ctx = None self.failed_desired_api_version = False self.iso_dev = None @@ -2309,8 +2314,8 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): LOG.info( 'Removing Ubuntu extended network scripts because' ' cloud-init updates Azure network configuration on the' - ' following event: %s.', - EventType.BOOT) + ' following events: %s.', + [EventType.BOOT.value, EventType.BOOT_LEGACY.value]) logged = True if os.path.isdir(path): util.del_dir(path) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index a2105dc7..8a7f7c60 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -8,6 +8,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import copy import os import time @@ -20,7 +21,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType LOG = logging.getLogger(__name__) @@ -426,7 +427,12 @@ class DataSourceEc2(sources.DataSource): # Non-VPC (aka Classic) Ec2 instances need to rewrite the # network config file every boot due to MAC address change. if self.is_classic_instance(): - self.update_events['network'].add(EventType.BOOT) + self.default_update_events = copy.deepcopy( + self.default_update_events) + self.default_update_events[EventScope.NETWORK].add( + EventType.BOOT) + self.default_update_events[EventScope.NETWORK].add( + EventType.BOOT_LEGACY) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py index 0b8994bf..bb69e998 100644 --- a/cloudinit/sources/DataSourceRbxCloud.py +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -17,7 +17,7 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import subp from cloudinit import util -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType LOG = logging.getLogger(__name__) ETC_HOSTS = '/etc/hosts' @@ -206,10 +206,11 @@ def read_user_data_callback(mount_dir): class DataSourceRbxCloud(sources.DataSource): dsname = "RbxCloud" - update_events = {'network': [ + default_update_events = {EventScope.NETWORK: { EventType.BOOT_NEW_INSTANCE, - EventType.BOOT - ]} + EventType.BOOT, + EventType.BOOT_LEGACY + }} def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 41be7665..7b8974a2 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -31,8 +31,8 @@ from cloudinit import sources from cloudinit import url_helper from cloudinit import util from cloudinit import net +from cloudinit.event import EventScope, EventType from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError -from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -172,7 +172,13 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" - update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} + default_update_events = { + EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY + } + } def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index fd292baa..9b16bf8d 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -36,7 +36,7 @@ from cloudinit import serial from cloudinit import sources from cloudinit import subp from cloudinit import util -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType LOG = logging.getLogger(__name__) @@ -170,6 +170,11 @@ class DataSourceSmartOS(sources.DataSource): smartos_type = sources.UNSET md_client = sources.UNSET + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY + }} def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -181,7 +186,6 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = {} self.network_data = None self._network_config = None - self.update_events['network'].add(EventType.BOOT) self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 7d74f8d9..a07c4b4f 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -13,6 +13,7 @@ import copy import json import os from collections import namedtuple +from typing import Dict, List from cloudinit import dmi from cloudinit import importer @@ -22,7 +23,7 @@ from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util from cloudinit.atomic_helper import write_json -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.filters import launch_index from cloudinit.persistence import CloudInitPickleMixin from cloudinit.reporting import events @@ -175,12 +176,23 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): # The datasource defines a set of supported EventTypes during which # the datasource can react to changes in metadata and regenerate - # network configuration on metadata changes. - # A datasource which supports writing network config on each system boot - # would call update_events['network'].add(EventType.BOOT). + # network configuration on metadata changes. These are defined in + # `supported_network_events`. + # The datasource also defines a set of default EventTypes that the + # datasource can react to. These are the event types that will be used + # if not overridden by the user. + # A datasource requiring to write network config on each system boot + # would call default_update_events['network'].add(EventType.BOOT). # Default: generate network config on new instance id (first boot). - update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + }} + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + }} # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute @@ -648,10 +660,12 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): def get_package_mirror_info(self): return self.distro.get_package_mirror_info(data_source=self) - def update_metadata(self, source_event_types): + def update_metadata_if_supported( + self, source_event_types: List[EventType] + ) -> bool: """Refresh cached metadata if the datasource supports this event. - The datasource has a list of update_events which + The datasource has a list of supported_update_events which trigger refreshing all cached metadata as well as refreshing the network configuration. @@ -661,9 +675,9 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): @return True if the datasource did successfully update cached metadata due to source_event_type. """ - supported_events = {} + supported_events = {} # type: Dict[EventScope, set] for event in source_event_types: - for update_scope, update_events in self.update_events.items(): + for update_scope, update_events in self.supported_update_events.items(): # noqa: E501 if event in update_events: if not supported_events.get(update_scope): supported_events[update_scope] = set() @@ -671,7 +685,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): for scope, matched_events in supported_events.items(): LOG.debug( "Update datasource metadata and %s config due to events: %s", - scope, ', '.join(matched_events)) + scope.value, + ', '.join([event.value for event in matched_events])) # Each datasource has a cached config property which needs clearing # Once cleared that config property will be regenerated from # current metadata. @@ -682,7 +697,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): if result: return True LOG.debug("Datasource %s not updated for events: %s", self, - ', '.join(source_event_types)) + ', '.join([event.value for event in source_event_types])) return False def check_instance_id(self, sys_cfg): @@ -789,7 +804,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): with myrep: LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) - if s.update_metadata([EventType.BOOT_NEW_INSTANCE]): + if s.update_metadata_if_supported( + [EventType.BOOT_NEW_INSTANCE] + ): myrep.message = "found %s data from %s" % (mode, name) return (s, type_utils.obj_name(cls)) except Exception: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 1420a988..a2b052a6 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -5,7 +5,7 @@ import inspect import os import stat -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.helpers import Paths from cloudinit import importer from cloudinit.sources import ( @@ -618,24 +618,29 @@ class TestDataSource(CiTestCase): self.assertEqual('himom', getattr(self.datasource, cached_attr_name)) self.assertEqual('updated', self.datasource.myattr) + @mock.patch.dict(DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) def test_update_metadata_only_acts_on_supported_update_events(self): - """update_metadata won't get_data on unsupported update events.""" - self.datasource.update_events['network'].discard(EventType.BOOT) + """update_metadata_if_supported wont get_data on unsupported events.""" self.assertEqual( - {'network': set([EventType.BOOT_NEW_INSTANCE])}, - self.datasource.update_events) + {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])}, + self.datasource.default_update_events + ) def fake_get_data(): raise Exception('get_data should not be called') self.datasource.get_data = fake_get_data self.assertFalse( - self.datasource.update_metadata( + self.datasource.update_metadata_if_supported( source_event_types=[EventType.BOOT])) + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) def test_update_metadata_returns_true_on_supported_update_event(self): - """update_metadata returns get_data response on supported events.""" - + """update_metadata_if_supported returns get_data on supported events""" def fake_get_data(): return True @@ -643,14 +648,16 @@ class TestDataSource(CiTestCase): self.datasource._network_config = 'something' self.datasource._dirty_cache = True self.assertTrue( - self.datasource.update_metadata( + self.datasource.update_metadata_if_supported( source_event_types=[ EventType.BOOT, EventType.BOOT_NEW_INSTANCE])) self.assertEqual(UNSET, self.datasource._network_config) + self.assertIn( "DEBUG: Update datasource metadata and network config due to" - " events: New instance first boot", - self.logs.getvalue()) + " events: boot-new-instance", + self.logs.getvalue() + ) class TestRedactSensitiveData(CiTestCase): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5bacc85d..bbded1e9 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -8,9 +8,11 @@ import copy import os import pickle import sys +from collections import namedtuple +from typing import Dict, Set from cloudinit.settings import ( - FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) + FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG) from cloudinit import handlers @@ -21,7 +23,11 @@ from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler from cloudinit.handlers.shell_script import ShellScriptPartHandler from cloudinit.handlers.upstart_job import UpstartJobPartHandler -from cloudinit.event import EventType +from cloudinit.event import ( + EventScope, + EventType, + userdata_to_events, +) from cloudinit.sources import NetworkConfigSource from cloudinit import cloud @@ -118,6 +124,7 @@ class Init(object): def _initial_subdirs(self): c_dir = self.paths.cloud_dir + run_dir = self.paths.run_dir initial_dirs = [ c_dir, os.path.join(c_dir, 'scripts'), @@ -130,6 +137,7 @@ class Init(object): os.path.join(c_dir, 'handlers'), os.path.join(c_dir, 'sem'), os.path.join(c_dir, 'data'), + os.path.join(run_dir, 'sem'), ] return initial_dirs @@ -341,6 +349,11 @@ class Init(object): return self._previous_iid def is_new_instance(self): + """Return true if this is a new instance. + + If datasource has already been initialized, this will return False, + even on first boot. + """ previous = self.previous_iid() ret = (previous == NO_PREVIOUS_INSTANCE_ID or previous != self.datasource.get_instance_id()) @@ -702,6 +715,46 @@ class Init(object): return (self.distro.generate_fallback_config(), NetworkConfigSource.fallback) + def update_event_enabled( + self, event_source_type: EventType, scope: EventScope = None + ) -> bool: + """Determine if a particular EventType is enabled. + + For the `event_source_type` passed in, check whether this EventType + is enabled in the `updates` section of the userdata. If `updates` + is not enabled in userdata, check if defined as one of the + `default_events` on the datasource. `scope` may be used to + narrow the check to a particular `EventScope`. + + Note that on first boot, userdata may NOT be available yet. In this + case, we only have the data source's `default_update_events`, + so an event that should be enabled in userdata may be denied. + """ + default_events = self.datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501 + user_events = userdata_to_events(self.cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501 + # A value in the first will override a value in the second + allowed = util.mergemanydict([ + copy.deepcopy(user_events), + copy.deepcopy(default_events), + ]) + LOG.debug('Allowed events: %s', allowed) + + if not scope: + scopes = allowed.keys() + else: + scopes = [scope] + scope_values = [s.value for s in scopes] + + for evt_scope in scopes: + if event_source_type in allowed.get(evt_scope, []): + LOG.debug('Event Allowed: scope=%s EventType=%s', + evt_scope.value, event_source_type) + return True + + LOG.debug('Event Denied: scopes=%s EventType=%s', + scope_values, event_source_type) + return False + def _apply_netcfg_names(self, netcfg): try: LOG.debug("applying net config names for %s", netcfg) @@ -709,27 +762,51 @@ class Init(object): except Exception as e: LOG.warning("Failed to rename devices: %s", e) + def _get_per_boot_network_semaphore(self): + return namedtuple('Semaphore', 'semaphore args')( + helpers.FileSemaphores(self.paths.get_runpath('sem')), + ('apply_network_config', PER_ONCE) + ) + + def _network_already_configured(self) -> bool: + sem = self._get_per_boot_network_semaphore() + return sem.semaphore.has_run(*sem.args) + def apply_network_config(self, bring_up): - # get a network config + """Apply the network config. + + Find the config, determine whether to apply it, apply it via + the distro, and optionally bring it up + """ netcfg, src = self._find_networking_config() if netcfg is None: LOG.info("network config is disabled by %s", src) return - # request an update if needed/available - if self.datasource is not NULL_DATA_SOURCE: - if not self.is_new_instance(): - if not self.datasource.update_metadata([EventType.BOOT]): - LOG.debug( - "No network config applied. Neither a new instance" - " nor datasource network update on '%s' event", - EventType.BOOT) - # nothing new, but ensure proper names - self._apply_netcfg_names(netcfg) - return - else: - # refresh netcfg after update - netcfg, src = self._find_networking_config() + def event_enabled_and_metadata_updated(event_type): + return self.update_event_enabled( + event_type, scope=EventScope.NETWORK + ) and self.datasource.update_metadata_if_supported([event_type]) + + def should_run_on_boot_event(): + return (not self._network_already_configured() and + event_enabled_and_metadata_updated(EventType.BOOT)) + + if ( + self.datasource is not NULL_DATA_SOURCE and + not self.is_new_instance() and + not should_run_on_boot_event() and + not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY) + ): + LOG.debug( + "No network config applied. Neither a new instance" + " nor datasource network update allowed") + # nothing new, but ensure proper names + self._apply_netcfg_names(netcfg) + return + + # refresh netcfg after update + netcfg, src = self._find_networking_config() # ensure all physical devices in config are present self.distro.networking.wait_for_physdevs(netcfg) @@ -740,8 +817,12 @@ class Init(object): # rendering config LOG.info("Applying network configuration from %s bringup=%s: %s", src, bring_up, netcfg) + + sem = self._get_per_boot_network_semaphore() try: - return self.distro.apply_network_config(netcfg, bring_up=bring_up) + with sem.semaphore.lock(*sem.args): + return self.distro.apply_network_config( + netcfg, bring_up=bring_up) except net.RendererNotFoundError as e: LOG.error("Unable to render networking. Network config is " "likely broken: %s", e) diff --git a/cloudinit/tests/test_event.py b/cloudinit/tests/test_event.py new file mode 100644 index 00000000..3da4c70c --- /dev/null +++ b/cloudinit/tests/test_event.py @@ -0,0 +1,26 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests related to cloudinit.event module.""" +from cloudinit.event import EventType, EventScope, userdata_to_events + + +class TestEvent: + def test_userdata_to_events(self): + userdata = {'network': {'when': ['boot']}} + expected = {EventScope.NETWORK: {EventType.BOOT}} + assert expected == userdata_to_events(userdata) + + def test_invalid_scope(self, caplog): + userdata = {'networkasdfasdf': {'when': ['boot']}} + userdata_to_events(userdata) + assert ( + "'networkasdfasdf' is not a valid EventScope! Update data " + "will be ignored for 'networkasdfasdf' scope" + ) in caplog.text + + def test_invalid_event(self, caplog): + userdata = {'network': {'when': ['bootasdfasdf']}} + userdata_to_events(userdata) + assert ( + "'bootasdfasdf' is not a valid EventType! Update data " + "will be ignored for 'network' scope" + ) in caplog.text diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index d2d1b37f..a06a2bde 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -1,7 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. """Tests related to cloudinit.stages module.""" - import os import stat @@ -11,7 +10,7 @@ from cloudinit import stages from cloudinit import sources from cloudinit.sources import NetworkConfigSource -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.util import write_file from cloudinit.tests.helpers import CiTestCase, mock @@ -52,6 +51,8 @@ class TestInit(CiTestCase): 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir, 'run_dir': self.tmpdir}}} self.init.datasource = FakeDataSource(paths=self.init.paths) + self._real_is_new_instance = self.init.is_new_instance + self.init.is_new_instance = mock.Mock(return_value=True) def test_wb__find_networking_config_disabled(self): """find_networking_config returns no config when disabled.""" @@ -291,6 +292,7 @@ class TestInit(CiTestCase): m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} self.init._find_networking_config = fake_network_config + self.init.apply_network_config(True) self.init.distro.apply_network_config_names.assert_called_with(net_cfg) self.init.distro.apply_network_config.assert_called_with( @@ -299,6 +301,7 @@ class TestInit(CiTestCase): @mock.patch('cloudinit.distros.ubuntu.Distro') def test_apply_network_on_same_instance_id(self, m_ubuntu): """Only call distro.apply_network_config_names on same instance id.""" + self.init.is_new_instance = self._real_is_new_instance old_instance_id = os.path.join( self.init.paths.get_cpath('data'), 'instance-id') write_file(old_instance_id, TEST_INSTANCE_ID) @@ -311,18 +314,19 @@ class TestInit(CiTestCase): return net_cfg, NetworkConfigSource.fallback self.init._find_networking_config = fake_network_config + self.init.apply_network_config(True) self.init.distro.apply_network_config_names.assert_called_with(net_cfg) self.init.distro.apply_network_config.assert_not_called() - self.assertIn( - 'No network config applied. Neither a new instance' - " nor datasource network update on '%s' event" % EventType.BOOT, - self.logs.getvalue()) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs): - """Apply network if datasource.update_metadata permits BOOT event.""" + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + # CiTestCase doesn't work with pytest.mark.parametrize, and moving this + # functionality to a separate class is more cumbersome than it'd be worth + # at the moment, so use this as a simple setup + def _apply_network_setup(self, m_macs): old_instance_id = os.path.join( self.init.paths.get_cpath('data'), 'instance-id') write_file(old_instance_id, TEST_INSTANCE_ID) @@ -338,12 +342,80 @@ class TestInit(CiTestCase): self.init._find_networking_config = fake_network_config self.init.datasource = FakeDataSource(paths=self.init.paths) - self.init.datasource.update_events = {'network': [EventType.BOOT]} + self.init.is_new_instance = mock.Mock(return_value=False) + return net_cfg + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}) + def test_apply_network_allowed_when_default_boot( + self, m_ubuntu, m_macs + ): + """Apply network if datasource permits BOOT event.""" + net_cfg = self._apply_network_setup(m_macs) + self.init.apply_network_config(True) - self.init.distro.apply_network_config_names.assert_called_with(net_cfg) + assert mock.call( + net_cfg + ) == self.init.distro.apply_network_config_names.call_args_list[-1] + assert mock.call( + net_cfg, bring_up=True + ) == self.init.distro.apply_network_config.call_args_list[-1] + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_no_default_boot( + self, m_ubuntu, m_macs + ): + """Don't apply network if datasource has no BOOT event.""" + self._apply_network_setup(m_macs) + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_allowed_with_userdata_overrides( + self, m_ubuntu, m_macs + ): + """Apply network if userdata overrides default config""" + net_cfg = self._apply_network_setup(m_macs) + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with( + net_cfg) self.init.distro.apply_network_config.assert_called_with( net_cfg, bring_up=True) + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_unsupported( + self, m_ubuntu, m_macs + ): + """Don't apply network config if unsupported. + + Shouldn't work even when specified as userdata + """ + self._apply_network_setup(m_macs) + + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + class TestInit_InitializeFilesystem: """Tests for cloudinit.stages.Init._initialize_filesystem. diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 10e8228f..33c6b56a 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -49,6 +49,7 @@ Having trouble? We would like to help! topics/format.rst topics/examples.rst + topics/events.rst topics/modules.rst topics/merging.rst diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst new file mode 100644 index 00000000..463208cc --- /dev/null +++ b/doc/rtd/topics/events.rst @@ -0,0 +1,83 @@ +.. _events: + +****************** +Events and Updates +****************** + +Events +====== + +`Cloud-init`_ will fetch and apply cloud and user data configuration +upon several event types. The two most common events for cloud-init +are when an instance first boots and any subsequent boot thereafter (reboot). +In addition to boot events, cloud-init users and vendors are interested +in when devices are added. cloud-init currently supports the following +event types: + +- **BOOT_NEW_INSTANCE**: New instance first boot +- **BOOT**: Any system boot other than 'BOOT_NEW_INSTANCE' +- **BOOT_LEGACY**: Similar to 'BOOT', but applies networking config twice each + boot: once during Local stage, then again in Network stage. As this behavior + was previously the default behavior, this option exists to prevent regressing + such behavior. + +Future work will likely include infrastructure and support for the following +events: + +- **HOTPLUG**: Dynamic add of a system device +- **METADATA_CHANGE**: An instance's metadata has change +- **USER_REQUEST**: Directed request to update + +Datasource Event Support +======================== + +All :ref:`datasources` by default support the ``BOOT_NEW_INSTANCE`` event. +Each Datasource will declare a set of these events that it is capable of +handling. Datasources may not support all event types. In some cases a system +may be configured to allow a particular event but may be running on +a platform whose datasource cannot support the event. + +Configuring Event Updates +========================= + +Update configuration may be specified via user data, +which can be used to enable or disable handling of specific events. +This configuration will be honored as long as the events are supported by +the datasource. However, configuration will always be applied at first +boot, regardless of the user data specified. + +Updates +~~~~~~~ +Update policy configuration defines which +events are allowed to be handled. This is separate from whether a +particular platform or datasource has the capability for such events. + +**scope**: ** + +The ``scope`` value is a string which defines under which domain does the +event occur. Currently the only one known scope is ``network``, though more +scopes may be added in the future. Scopes are defined by convention but +arbitrary values can be used. + +**when**: ** + +Each ``scope`` requires a ``when`` element to specify which events +are to allowed to be handled. + + +Examples +======== + +apply network config every boot +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +On every boot, apply network configuration found in the datasource. + +.. code-block:: shell-session + + # apply network config on every boot + updates: + network: + when: ['boot'] + +.. _Cloud-init: https://launchpad.net/cloud-init +.. vi: textwidth=78 diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py new file mode 100644 index 00000000..a45cad72 --- /dev/null +++ b/tests/integration_tests/modules/test_user_events.py @@ -0,0 +1,95 @@ +"""Test user-overridable events. + +This is currently limited to applying network config on BOOT events. +""" + +import pytest +import re +import yaml + +from tests.integration_tests.instances import IntegrationInstance + + +def _add_dummy_bridge_to_netplan(client: IntegrationInstance): + # Update netplan configuration to ensure it doesn't change on reboot + netplan = yaml.safe_load( + client.execute('cat /etc/netplan/50-cloud-init.yaml') + ) + # Just a dummy bridge to do nothing + try: + netplan['network']['bridges']['dummy0'] = {'dhcp4': False} + except KeyError: + netplan['network']['bridges'] = {'dummy0': {'dhcp4': False}} + + dumped_netplan = yaml.dump(netplan) + client.write_to_file('/etc/netplan/50-cloud-init.yaml', dumped_netplan) + + +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.ec2 +@pytest.mark.gce +@pytest.mark.oci +@pytest.mark.openstack +@pytest.mark.not_xenial +def test_boot_event_disabled_by_default(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Applying network configuration' in log + assert 'dummy0' not in client.execute('ls /sys/class/net') + + _add_dummy_bridge_to_netplan(client) + client.execute('rm /var/log/cloud-init.log') + + client.restart() + log2 = client.read_from_file('/var/log/cloud-init.log') + + # We attempt to apply network config twice on every boot. + # Ensure neither time works. + assert 2 == len( + re.findall(r"Event Denied: scopes=\['network'\] EventType=boot[^-]", + log2) + ) + assert 2 == log2.count( + "Event Denied: scopes=['network'] EventType=boot-legacy" + ) + assert 2 == log2.count( + "No network config applied. Neither a new instance" + " nor datasource network update allowed" + ) + + assert 'dummy0' in client.execute('ls /sys/class/net') + + +def _test_network_config_applied_on_reboot(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Applying network configuration' in log + assert 'dummy0' not in client.execute('ls /sys/class/net') + + _add_dummy_bridge_to_netplan(client) + client.execute('rm /var/log/cloud-init.log') + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + + assert 'Event Allowed: scope=network EventType=boot' in log + assert 'Applying network configuration' in log + assert 'dummy0' not in client.execute('ls /sys/class/net') + + +@pytest.mark.azure +@pytest.mark.not_xenial +def test_boot_event_enabled_by_default(client: IntegrationInstance): + _test_network_config_applied_on_reboot(client) + + +USER_DATA = """\ +#cloud-config +updates: + network: + when: [boot] +""" + + +@pytest.mark.not_xenial +@pytest.mark.user_data(USER_DATA) +def test_boot_event_enabled(client: IntegrationInstance): + _test_network_config_applied_on_reboot(client) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 742d1faa..54e06119 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -3163,8 +3163,8 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): expected_logs = [ 'INFO: Removing Ubuntu extended network scripts because cloud-init' - ' updates Azure network configuration on the following event:' - ' System boot.', + ' updates Azure network configuration on the following events:' + " ['boot', 'boot-legacy']", 'Recursively deleting %s' % subdir, 'Attempting to remove %s' % file1] for log in expected_logs: diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 5847a384..9c499672 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -29,7 +29,7 @@ from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, identify_file) -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit import helpers as c_helpers from cloudinit.util import (b64e, write_file) @@ -653,8 +653,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): def test_reconfig_network_on_boot(self): # Test to ensure that network is configured from metadata on each boot dsrc = self._get_ds(mockdata=MOCK_RETURNS) - self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]), - dsrc.update_events['network']) + self.assertSetEqual( + {EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY}, + dsrc.default_update_events[EventScope.NETWORK] + ) class TestIdentifyFile(CiTestCase): diff --git a/tox.ini b/tox.ini index bf8cb78b..a2981b98 100644 --- a/tox.ini +++ b/tox.ini @@ -174,7 +174,7 @@ markers = gce: test will only run on GCE platform azure: test will only run on Azure platform oci: test will only run on OCI platform - openstack: test will only run on openstack + openstack: test will only run on openstack platform lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container lxd_use_exec: `execute` will use `lxc exec` instead of SSH -- cgit v1.2.3 From 4c3c36297ad199ee9325a48f7e56a9c099ec183f Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 14 May 2021 14:38:56 -0500 Subject: Add integration test for lp-1920939 (#891) In #856 we added the ability to use partprobe instead of blockdev for reading partitions. Test that partprobe succeeds where blockdev fails. Also add a mechanism to our integration tests to allow a callable to be called between `lxc init` and `lxc start` --- tests/integration_tests/bugs/test_lp1920939.py | 140 +++++++++++++++++++++++++ tests/integration_tests/clouds.py | 19 ++-- tests/integration_tests/conftest.py | 8 +- tox.ini | 1 + 4 files changed, 159 insertions(+), 9 deletions(-) create mode 100644 tests/integration_tests/bugs/test_lp1920939.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_lp1920939.py b/tests/integration_tests/bugs/test_lp1920939.py new file mode 100644 index 00000000..408792a6 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1920939.py @@ -0,0 +1,140 @@ +""" +Test that disk setup can run successfully on a mounted partition when +partprobe is being used. + +lp-1920939 +""" +import json +import os +import pytest +from uuid import uuid4 +from pycloudlib.lxd.instance import LXDInstance + +from cloudinit.subp import subp +from tests.integration_tests.instances import IntegrationInstance + +DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) + + +def setup_and_mount_lxd_disk(instance: LXDInstance): + subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( + instance.name, DISK_PATH).split()) + + +@pytest.yield_fixture +def create_disk(): + # 640k should be enough for anybody + subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split()) + yield + os.remove(DISK_PATH) + + +USERDATA = """\ +#cloud-config +disk_setup: + /dev/sdb: + table_type: mbr + layout: [50, 50] + overwrite: True +fs_setup: + - label: test + device: /dev/sdb1 + filesystem: ext4 + - label: test2 + device: /dev/sdb2 + filesystem: ext4 +mounts: +- ["/dev/sdb1", "/mnt1"] +- ["/dev/sdb2", "/mnt2"] +""" + +UPDATED_USERDATA = """\ +#cloud-config +disk_setup: + /dev/sdb: + table_type: mbr + layout: [100] + overwrite: True +fs_setup: + - label: test3 + device: /dev/sdb1 + filesystem: ext4 +mounts: +- ["/dev/sdb1", "/mnt3"] +""" + + +def _verify_first_disk_setup(client, log): + assert 'Traceback' not in log + assert 'WARN' not in log + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 2 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt1' + assert sdb['children'][1]['name'] == 'sdb2' + assert sdb['children'][1]['mountpoint'] == '/mnt2' + + +@pytest.mark.user_data(USERDATA) +@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) +@pytest.mark.ubuntu +@pytest.mark.lxd_vm +# Not bionic or xenial because the LXD agent gets in the way of us +# changing the userdata +@pytest.mark.not_bionic +@pytest.mark.not_xenial +def test_disk_setup_when_mounted(create_disk, client: IntegrationInstance): + """Test lp-1920939. + + We insert an extra disk into our VM, format it to have two partitions, + modify our cloud config to mount devices before disk setup, and modify + our userdata to setup a single partition on the disk. + + This allows cloud-init to attempt disk setup on a mounted partition. + When blockdev is in use, it will fail with + "blockdev: ioctl error on BLKRRPART: Device or resource busy" along + with a warning and a traceback. When partprobe is in use, everything + should work successfully. + """ + log = client.read_from_file('/var/log/cloud-init.log') + _verify_first_disk_setup(client, log) + + # Update our userdata and cloud.cfg to mount then perform new disk setup + client.write_to_file( + '/var/lib/cloud/seed/nocloud-net/user-data', + UPDATED_USERDATA + ) + client.execute("sed -i 's/write-files/write-files\\n - mounts/' " + "/etc/cloud/cloud.cfg") + + client.execute('cloud-init clean --logs') + client.restart() + + # Assert new setup works as expected + assert 'Traceback' not in log + assert 'WARN' not in log + + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 1 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt3' + + +@pytest.mark.user_data(USERDATA) +@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) +@pytest.mark.ubuntu +@pytest.mark.lxd_vm +def test_disk_setup_no_partprobe(create_disk, client: IntegrationInstance): + """Ensure disk setup still works as expected without partprobe.""" + # We can't do this part in a bootcmd because the path has already + # been found by the time we get to the bootcmd + client.execute('rm $(which partprobe)') + client.execute('cloud-init clean --logs') + client.restart() + + log = client.read_from_file('/var/log/cloud-init.log') + _verify_first_disk_setup(client, log) + + assert 'partprobe' not in log diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 3bbccb44..1378b215 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -142,12 +142,12 @@ class IntegrationCloud(ABC): except (ValueError, IndexError): return image.image_id - def _perform_launch(self, launch_kwargs): + def _perform_launch(self, launch_kwargs, **kwargs): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) return pycloudlib_instance def launch(self, user_data=None, launch_kwargs=None, - settings=integration_settings): + settings=integration_settings, **kwargs): if launch_kwargs is None: launch_kwargs = {} if self.settings.EXISTING_INSTANCE_ID: @@ -158,21 +158,21 @@ class IntegrationCloud(ABC): self.settings.EXISTING_INSTANCE_ID ) return - kwargs = { + default_launch_kwargs = { 'image_id': self.image_id, 'user_data': user_data, } - kwargs.update(launch_kwargs) + launch_kwargs = {**default_launch_kwargs, **launch_kwargs} log.info( "Launching instance with launch_kwargs:\n%s", - "\n".join("{}={}".format(*item) for item in kwargs.items()) + "\n".join("{}={}".format(*item) for item in launch_kwargs.items()) ) with emit_dots_on_travis(): - pycloudlib_instance = self._perform_launch(kwargs) + pycloudlib_instance = self._perform_launch(launch_kwargs, **kwargs) log.info('Launched instance: %s', pycloudlib_instance) instance = self.get_instance(pycloudlib_instance, settings) - if kwargs.get('wait', True): + if launch_kwargs.get('wait', True): # If we aren't waiting, we can't rely on command execution here log.info( 'cloud-init version: %s', @@ -292,7 +292,7 @@ class _LxdIntegrationCloud(IntegrationCloud): ).format(**format_variables) subp(command.split()) - def _perform_launch(self, launch_kwargs): + def _perform_launch(self, launch_kwargs, **kwargs): launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None) wait = launch_kwargs.pop('wait', True) release = launch_kwargs.pop('image_id') @@ -310,6 +310,9 @@ class _LxdIntegrationCloud(IntegrationCloud): ) if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE': self._mount_source(pycloudlib_instance) + if 'lxd_setup' in kwargs: + log.info("Running callback specified by 'lxd_setup' mark") + kwargs['lxd_setup'](pycloudlib_instance) pycloudlib_instance.start(wait=wait) return pycloudlib_instance diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 6f4ce8d3..5a543e39 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -213,6 +213,7 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): user_data = getter('user_data') name = getter('instance_name') lxd_config_dict = getter('lxd_config_dict') + lxd_setup = getter('lxd_setup') lxd_use_exec = fixture_utils.closest_marker_args_or( request, 'lxd_use_exec', None ) @@ -238,9 +239,14 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): # run anywhere else. A failure flags up this discrepancy. pytest.fail(XENIAL_LXD_VM_EXEC_MSG) launch_kwargs["execute_via_ssh"] = False + local_launch_kwargs = {} + if lxd_setup is not None: + if not isinstance(session_cloud, _LxdIntegrationCloud): + pytest.skip('lxd_setup requres LXD') + local_launch_kwargs['lxd_setup'] = lxd_setup with session_cloud.launch( - user_data=user_data, launch_kwargs=launch_kwargs + user_data=user_data, launch_kwargs=launch_kwargs, **local_launch_kwargs ) as instance: if lxd_use_exec is not None: # Existing instances are not affected by the launch kwargs, so diff --git a/tox.ini b/tox.ini index a2981b98..9374a1cb 100644 --- a/tox.ini +++ b/tox.ini @@ -177,6 +177,7 @@ markers = openstack: test will only run on openstack platform lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container + lxd_setup: specify callable to be called between init and start lxd_use_exec: `execute` will use `lxc exec` instead of SSH lxd_vm: test will only run in LXD VM not_xenial: test cannot run on the xenial release -- cgit v1.2.3 From 3611befbe4fbe8cddd39ffe6d0c05545d59177d7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 19 May 2021 12:14:06 -0500 Subject: Add integration test for #868 (#901) Ensure no Traceback when 'chef_license' is set --- tests/integration_tests/bugs/test_gh868.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 tests/integration_tests/bugs/test_gh868.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py new file mode 100644 index 00000000..31052dcb --- /dev/null +++ b/tests/integration_tests/bugs/test_gh868.py @@ -0,0 +1,19 @@ +"""Ensure no Traceback when 'chef_license' is set""" +import pytest +from tests.integration_tests.instances import IntegrationInstance + + +USERDATA = """\ +#cloud-config +chef: + install_type: omnibus + chef_license: accept + server_url: https://chef.yourorg.invalid + validation_name: some-validator +""" + + +@pytest.mark.user_data(USERDATA) +def test_chef_license(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Traceback' not in log -- cgit v1.2.3 From 59a3d84562a0fdeb16a4a566371294790d110ad4 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 14 Jun 2021 21:37:15 -0500 Subject: testing: OCI availability domain is now required (SC-59) (#910) --- integration-requirements.txt | 2 +- tests/integration_tests/clouds.py | 9 ++++++++- tests/integration_tests/integration_settings.py | 7 +++++++ 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'tests/integration_tests') diff --git a/integration-requirements.txt b/integration-requirements.txt index 95891356..8b6a9514 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@96b146ee1beb99b8e44e36525e18a9a20e00c3f2 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@c128d4d87b226b9c294de167ed753e644cf0526a pytest diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 1378b215..4a0d3008 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -247,8 +247,15 @@ class OciCloud(IntegrationCloud): integration_instance_cls = IntegrationOciInstance def _get_cloud_instance(self): + if not integration_settings.ORACLE_AVAILABILITY_DOMAIN: + raise Exception( + 'ORACLE_AVAILABILITY_DOMAIN must be set to a valid ' + 'availability domain. If using the oracle CLI, ' + 'try `oci iam availability-domain list`' + ) return OCI( - tag='oci-integration-test' + tag='oci-integration-test', + availability_domain=integration_settings.ORACLE_AVAILABILITY_DOMAIN ) diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index 0703be58..e4a790c2 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -97,6 +97,13 @@ KEYPAIR_NAME = None # in `openstack network list` OPENSTACK_NETWORK = None +################################################################## +# OCI SETTINGS +################################################################## +# Availability domain to use for Oracle. Should be one of the namess found +# in `oci iam availability-domain list` +ORACLE_AVAILABILITY_DOMAIN = None + ################################################################## # USER SETTINGS OVERRIDES ################################################################## -- cgit v1.2.3 From fbcb224bc12495ba200ab107246349d802c5d8e6 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 17 Jun 2021 14:02:44 -0500 Subject: tests: Add 'adhoc' mark for integration tests (#925) Also new jenkins tox definition --- tests/integration_tests/bugs/test_gh868.py | 1 + tox.ini | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py index 31052dcb..838efca6 100644 --- a/tests/integration_tests/bugs/test_gh868.py +++ b/tests/integration_tests/bugs/test_gh868.py @@ -13,6 +13,7 @@ chef: """ +@pytest.mark.adhoc # Can't be regularly reaching out to chef install script @pytest.mark.user_data(USERDATA) def test_chef_license(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') diff --git a/tox.ini b/tox.ini index 9374a1cb..f21e1186 100644 --- a/tox.ini +++ b/tox.ini @@ -157,7 +157,15 @@ passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* TRAVIS deps = -r{toxinidir}/integration-requirements.txt setenv = - PYTEST_ADDOPTS="-m ci" + PYTEST_ADDOPTS="-m ci and not adhoc" + +[testenv:integration-tests-jenkins] +commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} +passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* +deps = + -r{toxinidir}/integration-requirements.txt +setenv = + PYTEST_ADDOPTS="-m not adhoc" [pytest] # TODO: s/--strict/--strict-markers/ once xenial support is dropped @@ -190,3 +198,4 @@ markers = sru_next: test is part of the next SRU verification ubuntu: this test should run on Ubuntu unstable: skip this test because it is flakey + adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins) -- cgit v1.2.3 From 78e89b03ecb29e7df3181b1219a0b5f44b9d7532 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Thu, 1 Jul 2021 12:35:40 -0400 Subject: - Detect a Python version change and clear the cache (#857) summary: Clear cache when a Python version change is detected When a distribution gets updated it is possible that the Python version changes. Python makes no guarantee that pickle is consistent across versions as such we need to purge the cache and start over. Co-authored-by: James Falcon --- cloudinit/cmd/main.py | 30 +++++++++++ cloudinit/cmd/tests/test_main.py | 2 + .../assets/test_version_change.pkl | Bin 0 -> 21 bytes .../modules/test_ssh_auth_key_fingerprints.py | 2 +- .../modules/test_version_change.py | 56 +++++++++++++++++++++ tests/integration_tests/util.py | 4 ++ 6 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 tests/integration_tests/assets/test_version_change.pkl create mode 100644 tests/integration_tests/modules/test_version_change.py (limited to 'tests/integration_tests') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index baf1381f..21213a4a 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -210,6 +210,35 @@ def attempt_cmdline_url(path, network=True, cmdline=None): (cmdline_name, url, path)) +def purge_cache_on_python_version_change(init): + """Purge the cache if python version changed on us. + + There could be changes not represented in our cache (obj.pkl) after we + upgrade to a new version of python, so at that point clear the cache + """ + current_python_version = '%d.%d' % ( + sys.version_info.major, sys.version_info.minor + ) + python_version_path = os.path.join( + init.paths.get_cpath('data'), 'python-version' + ) + if os.path.exists(python_version_path): + cached_python_version = open(python_version_path).read() + # The Python version has changed out from under us, anything that was + # pickled previously is likely useless due to API changes. + if cached_python_version != current_python_version: + LOG.debug('Python version change detected. Purging cache') + init.purge_cache(True) + util.write_file(python_version_path, current_python_version) + else: + if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): + LOG.info( + 'Writing python-version file. ' + 'Cache compatibility status is currently unknown.' + ) + util.write_file(python_version_path, current_python_version) + + def main_init(name, args): deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] if args.local: @@ -276,6 +305,7 @@ def main_init(name, args): util.logexc(LOG, "Failed to initialize, likely bad things to come!") # Stage 4 path_helper = init.paths + purge_cache_on_python_version_change(init) mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK if mode == sources.DSMODE_NETWORK: diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index 78b27441..1f5975b0 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -17,6 +17,8 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') class TestMain(FilesystemMockingTestCase): + with_logs = True + allowed_subp = False def setUp(self): super(TestMain, self).setUp() diff --git a/tests/integration_tests/assets/test_version_change.pkl b/tests/integration_tests/assets/test_version_change.pkl new file mode 100644 index 00000000..65ae93e5 Binary files /dev/null and b/tests/integration_tests/assets/test_version_change.pkl differ diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py index b9b0d85e..e1946cb1 100644 --- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py @@ -18,7 +18,7 @@ USER_DATA_SSH_AUTHKEY_DISABLE = """\ no_ssh_fingerprints: true """ -USER_DATA_SSH_AUTHKEY_ENABLE="""\ +USER_DATA_SSH_AUTHKEY_ENABLE = """\ #cloud-config ssh_genkeytypes: - ecdsa diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py new file mode 100644 index 00000000..4e9ab63f --- /dev/null +++ b/tests/integration_tests/modules/test_version_change.py @@ -0,0 +1,56 @@ +from pathlib import Path + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import ASSETS_DIR + + +PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') +TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl' + + +def _assert_no_pickle_problems(log): + assert 'Failed loading pickled blob' not in log + assert 'Traceback' not in log + assert 'WARN' not in log + + +def test_reboot_without_version_change(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Python version change detected' not in log + assert 'Cache compatibility status is currently unknown.' not in log + _assert_no_pickle_problems(log) + + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Python version change detected' not in log + assert 'Could not determine Python version used to write cache' not in log + _assert_no_pickle_problems(log) + + # Now ensure that loading a bad pickle gives us problems + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log + + +def test_cache_purged_on_version_change(client: IntegrationInstance): + # Start by pushing the invalid pickle so we'll hit an error if the + # cache didn't actually get purged + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.execute("echo '1.0' > /var/lib/cloud/data/python-version") + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Python version change detected. Purging cache' in log + _assert_no_pickle_problems(log) + + +def test_log_message_on_missing_version_file(client: IntegrationInstance): + # Start by pushing a pickle so we can see the log message + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.execute("rm /var/lib/cloud/data/python-version") + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert ( + 'Writing python-version file. ' + 'Cache compatibility status is currently unknown.' + ) in log diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 3ef12358..8d726bb2 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -3,10 +3,14 @@ import multiprocessing import os import time from contextlib import contextmanager +from pathlib import Path log = logging.getLogger('integration_testing') +ASSETS_DIR = Path('tests/integration_tests/assets') + + def verify_ordered_items_in_text(to_verify: list, text: str): """Assert all items in list appear in order in text. -- cgit v1.2.3 From 36aeb49cb966d6ab576d4ce7cd2e815079c541a3 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 2 Jul 2021 18:01:58 -0500 Subject: testing: simplify test_upgrade.py (#932) test_upgrade.py was outputting a ton of stuff that had to be manually collected and verified. This commit adds more assertions to the test and outputs directly to the logs rather than separate files. --- tests/integration_tests/clouds.py | 2 +- tests/integration_tests/test_upgrade.py | 164 ++++++++++++++++++-------------- 2 files changed, 96 insertions(+), 70 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 4a0d3008..f2362b5d 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -157,7 +157,7 @@ class IntegrationCloud(ABC): self.instance = self.cloud_instance.get_instance( self.settings.EXISTING_INSTANCE_ID ) - return + return self.instance default_launch_kwargs = { 'image_id': self.image_id, 'user_data': user_data, diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 7478a1b9..5c7a2e1e 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -1,17 +1,35 @@ +import json import logging import os import pytest -import time -from pathlib import Path -from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud -from tests.integration_tests.conftest import ( - get_validated_source, - session_start_time, -) +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.conftest import get_validated_source + + +LOG = logging.getLogger('integration_testing.test_upgrade') + +LOG_TEMPLATE = """\n\ +=== `systemd-analyze` before: +{pre_systemd_analyze} +=== `systemd-analyze` after: +{post_systemd_analyze} +=== `systemd-analyze blame` before (first 10 lines): +{pre_systemd_blame} +=== `systemd-analyze blame` after (first 10 lines): +{post_systemd_blame} -log = logging.getLogger('integration_testing') +=== `cloud-init analyze show` before:') +{pre_analyze_totals} +=== `cloud-init analyze show` after:') +{post_analyze_totals} + +=== `cloud-init analyze blame` before (first 10 lines): ') +{pre_cloud_blame} +=== `cloud-init analyze blame` after (first 10 lines): ') +{post_cloud_blame} +""" UNSUPPORTED_INSTALL_METHOD_MSG = ( "Install method '{}' not supported for this test" @@ -22,43 +40,6 @@ hostname: SRU-worked """ -def _output_to_compare(instance, file_path, netcfg_path): - commands = [ - 'hostname', - 'dpkg-query --show cloud-init', - 'cat /run/cloud-init/result.json', - # 'cloud-init init' helps us understand if our pickling upgrade paths - # have broken across re-constitution of a cached datasource. Some - # platforms invalidate their datasource cache on reboot, so we run - # it here to ensure we get a dirty run. - 'cloud-init init', - 'grep Trace /var/log/cloud-init.log', - 'cloud-id', - 'cat {}'.format(netcfg_path), - 'systemd-analyze', - 'systemd-analyze blame', - 'cloud-init analyze show', - 'cloud-init analyze blame', - ] - with file_path.open('w') as f: - for command in commands: - f.write('===== {} ====='.format(command) + '\n') - f.write(instance.execute(command) + '\n') - - -def _restart(instance): - # work around pad.lv/1908287 - instance.restart() - if not instance.execute('cloud-init status --wait --long').ok: - for _ in range(10): - time.sleep(5) - result = instance.execute('cloud-init status --wait --long') - if result.ok: - return - raise Exception("Cloud-init didn't finish starting up") - - -@pytest.mark.sru_2020_11 def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): source = get_validated_source(session_cloud) if not source.installs_new_version(): @@ -69,37 +50,82 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): 'image_id': session_cloud.released_image_id, } - image = ImageSpecification.from_os_image() - - # Get the paths to write test logs - output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH) - output_dir.mkdir(parents=True, exist_ok=True) - base_filename = 'test_upgrade_{platform}_{os}_{{stage}}_{time}.log'.format( - platform=session_cloud.settings.PLATFORM, - os=image.release, - time=session_start_time, - ) - before_path = output_dir / base_filename.format(stage='before') - after_path = output_dir / base_filename.format(stage='after') - - # Get the network cfg file - netcfg_path = '/dev/null' - if image.os == 'ubuntu': - netcfg_path = '/etc/netplan/50-cloud-init.yaml' - if image.release == 'xenial': - netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg' - with session_cloud.launch( launch_kwargs=launch_kwargs, user_data=USER_DATA, ) as instance: - _output_to_compare(instance, before_path, netcfg_path) + # get pre values + pre_hostname = instance.execute('hostname') + pre_cloud_id = instance.execute('cloud-id') + pre_result = instance.execute('cat /run/cloud-init/result.json') + pre_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml') + pre_systemd_analyze = instance.execute('systemd-analyze') + pre_systemd_blame = instance.execute('systemd-analyze blame') + pre_cloud_analyze = instance.execute('cloud-init analyze show') + pre_cloud_blame = instance.execute('cloud-init analyze blame') + + # Ensure no issues pre-upgrade + assert not json.loads(pre_result)['v1']['errors'] + + log = instance.read_from_file('/var/log/cloud-init.log') + assert 'Traceback' not in log + assert 'WARN' not in log + + # Upgrade and reboot instance.install_new_cloud_init(source, take_snapshot=False) instance.execute('hostname something-else') - _restart(instance) + instance.restart() assert instance.execute('cloud-init status --wait --long').ok - _output_to_compare(instance, after_path, netcfg_path) - log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) + # 'cloud-init init' helps us understand if our pickling upgrade paths + # have broken across re-constitution of a cached datasource. Some + # platforms invalidate their datasource cache on reboot, so we run + # it here to ensure we get a dirty run. + assert instance.execute('cloud-init init').ok + + # get post values + post_hostname = instance.execute('hostname') + post_cloud_id = instance.execute('cloud-id') + post_result = instance.execute('cat /run/cloud-init/result.json') + post_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml') + post_systemd_analyze = instance.execute('systemd-analyze') + post_systemd_blame = instance.execute('systemd-analyze blame') + post_cloud_analyze = instance.execute('cloud-init analyze show') + post_cloud_blame = instance.execute('cloud-init analyze blame') + + # Ensure no issues post-upgrade + assert not json.loads(pre_result)['v1']['errors'] + + log = instance.read_from_file('/var/log/cloud-init.log') + assert 'Traceback' not in log + assert 'WARN' not in log + + # Ensure important things stayed the same + assert pre_hostname == post_hostname + assert pre_cloud_id == post_cloud_id + assert pre_result == post_result + assert pre_network == post_network + + # Calculate and log all the boot numbers + pre_analyze_totals = [ + x for x in pre_cloud_analyze.splitlines() + if x.startswith('Finished stage') or x.startswith('Total Time') + ] + post_analyze_totals = [ + x for x in post_cloud_analyze.splitlines() + if x.startswith('Finished stage') or x.startswith('Total Time') + ] + + # pylint: disable=logging-format-interpolation + LOG.info(LOG_TEMPLATE.format( + pre_systemd_analyze=pre_systemd_analyze, + post_systemd_analyze=post_systemd_analyze, + pre_systemd_blame='\n'.join(pre_systemd_blame.splitlines()[:10]), + post_systemd_blame='\n'.join(post_systemd_blame.splitlines()[:10]), + pre_analyze_totals='\n'.join(pre_analyze_totals), + post_analyze_totals='\n'.join(post_analyze_totals), + pre_cloud_blame='\n'.join(pre_cloud_blame.splitlines()[:10]), + post_cloud_blame='\n'.join(post_cloud_blame.splitlines()[:10]), + )) @pytest.mark.ci -- cgit v1.2.3 From 9b52405c6f0de5e00d5ee9c1d13540425d8f6bf5 Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 12 Jul 2021 20:21:02 +0200 Subject: ssh-util: allow cloudinit to merge all ssh keys into a custom user file, defined in AuthorizedKeysFile (#937) This patch aims to fix LP1911680, by analyzing the files provided in sshd_config and merge all keys into an user-specific file. Also introduces additional tests to cover this specific case. The file is picked by analyzing the path given in AuthorizedKeysFile. If it points inside the current user folder (path is /home/user/*), it means it is an user-specific file, so we can copy all user-keys there. If it contains a %u or %h, it means that there will be a specific authorized_keys file for each user, so we can copy all user-keys there. If no path points to an user-specific file, for example when only /etc/ssh/authorized_keys is given, default to ~/.ssh/authorized_keys. Note that if there are more than a single user-specific file, the last one will be picked. Signed-off-by: Emanuele Giuseppe Esposito Co-authored-by: James Falcon LP: #1911680 RHBZ:1862967 --- cloudinit/ssh_util.py | 22 +- tests/integration_tests/assets/keys/id_rsa.test1 | 38 ++++ .../integration_tests/assets/keys/id_rsa.test1.pub | 1 + tests/integration_tests/assets/keys/id_rsa.test2 | 38 ++++ .../integration_tests/assets/keys/id_rsa.test2.pub | 1 + tests/integration_tests/assets/keys/id_rsa.test3 | 38 ++++ .../integration_tests/assets/keys/id_rsa.test3.pub | 1 + .../integration_tests/modules/test_ssh_keysfile.py | 85 +++++++ tests/integration_tests/util.py | 15 +- tests/unittests/test_sshutil.py | 246 ++++++++++++++++++++- 10 files changed, 470 insertions(+), 15 deletions(-) create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1 create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1.pub create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2 create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2.pub create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3 create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3.pub create mode 100644 tests/integration_tests/modules/test_ssh_keysfile.py (limited to 'tests/integration_tests') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index c08042d6..89057262 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -252,13 +252,15 @@ def render_authorizedkeysfile_paths(value, homedir, username): def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): (ssh_dir, pw_ent) = users_ssh_info(username) default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') + user_authorizedkeys_file = default_authorizedkeys_file auth_key_fns = [] with util.SeLinuxGuard(ssh_dir, recursive=True): try: ssh_cfg = parse_ssh_config_map(sshd_cfg_file) + key_paths = ssh_cfg.get("authorizedkeysfile", + "%h/.ssh/authorized_keys") auth_key_fns = render_authorizedkeysfile_paths( - ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"), - pw_ent.pw_dir, username) + key_paths, pw_ent.pw_dir, username) except (IOError, OSError): # Give up and use a default key filename @@ -267,8 +269,22 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): "config from %r, using 'AuthorizedKeysFile' file " "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) + # check if one of the keys is the user's one + for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns): + if any([ + '%u' in key_path, + '%h' in key_path, + auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir)) + ]): + user_authorizedkeys_file = auth_key_fn + + if user_authorizedkeys_file != default_authorizedkeys_file: + LOG.debug( + "AuthorizedKeysFile has an user-specific authorized_keys, " + "using %s", user_authorizedkeys_file) + # always store all the keys in the user's private file - return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) + return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) def setup_user_keys(keys, username, options=None): diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1 new file mode 100644 index 00000000..bd4c822e --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test1 @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye +t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV +3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+ +yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY +lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN +HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw +Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO +geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2 +EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL +NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/ +rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n +vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2 +euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS +khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg +RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN +oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT +Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT +tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi +vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq +KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA +w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+ +qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6 ++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA +AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp +60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E +uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC +77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ +aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk +cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb +Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb +GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB +/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g +0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I +bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4 +CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub new file mode 100644 index 00000000..3d2e26e1 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2 new file mode 100644 index 00000000..5854d901 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test2 @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO +LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+ +3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO +i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH +m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2 +17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5 +qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS +yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2 +EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ +A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq +0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4 +qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP +CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T +wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH +BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5 +ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+ +aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV +RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd +eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34 +qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql +rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB +w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy +dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA +wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj +c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr +IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy +Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv +vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u +U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv +/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9 +mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV +zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd +E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS +0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD +tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub new file mode 100644 index 00000000..f3831a57 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3 new file mode 100644 index 00000000..2596c762 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test3 @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV +yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG +bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH +9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ +ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV +O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr +jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm +Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2 +EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL +6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy +fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e +fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ +iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f +2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA +QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ +HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n +Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK +WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV +JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ +vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR +2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8 +1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d +DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA +wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P +yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy +QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2 +0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k +mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi +uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9 +3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr +VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM +2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM +GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa +e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x +eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub new file mode 100644 index 00000000..057db632 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py new file mode 100644 index 00000000..f82d7649 --- /dev/null +++ b/tests/integration_tests/modules/test_ssh_keysfile.py @@ -0,0 +1,85 @@ +import paramiko +import pytest +from io import StringIO +from paramiko.ssh_exception import SSHException + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import get_test_rsa_keypair + +TEST_USER1_KEYS = get_test_rsa_keypair('test1') +TEST_USER2_KEYS = get_test_rsa_keypair('test2') +TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') + +USERDATA = """\ +#cloud-config +bootcmd: + - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config +ssh_authorized_keys: + - {default} +users: +- default +- name: test_user1 + ssh_authorized_keys: + - {user1} +- name: test_user2 + ssh_authorized_keys: + - {user2} +""".format( # noqa: E501 + default=TEST_DEFAULT_KEYS.public_key, + user1=TEST_USER1_KEYS.public_key, + user2=TEST_USER2_KEYS.public_key, +) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(USERDATA) +def test_authorized_keys(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/.ssh/authorized_keys2', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/.ssh/authorized_keys2', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', + TEST_DEFAULT_KEYS), + ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), + ] + + for user, filename, keys in expected_keys: + contents = client.read_from_file(filename) + if user in ['ubuntu', 'root']: + # Our personal public key gets added by pycloudlib + lines = contents.split('\n') + assert len(lines) == 2 + assert keys.public_key.strip() in contents + else: + assert contents.strip() == keys.public_key.strip() + + # Ensure we can actually connect + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + paramiko_key = paramiko.RSAKey.from_private_key(StringIO( + keys.private_key)) + + # Will fail with AuthenticationException if + # we cannot connect + ssh.connect( + client.instance.ip, + username=user, + pkey=paramiko_key, + look_for_keys=False, + allow_agent=False, + ) + + # Ensure other uses can't connect using our key + other_users = [u[0] for u in expected_keys if u[2] != keys] + for other_user in other_users: + with pytest.raises(SSHException): + print('trying to connect as {} with key from {}'.format( + other_user, user)) + ssh.connect( + client.instance.ip, + username=other_user, + pkey=paramiko_key, + look_for_keys=False, + allow_agent=False, + ) diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 8d726bb2..ce62ffc8 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -3,12 +3,15 @@ import multiprocessing import os import time from contextlib import contextmanager +from collections import namedtuple from pathlib import Path -log = logging.getLogger('integration_testing') +log = logging.getLogger('integration_testing') +key_pair = namedtuple('key_pair', 'public_key private_key') ASSETS_DIR = Path('tests/integration_tests/assets') +KEY_PATH = ASSETS_DIR / 'keys' def verify_ordered_items_in_text(to_verify: list, text: str): @@ -51,3 +54,13 @@ def emit_dots_on_travis(): yield finally: dot_process.terminate() + + +def get_test_rsa_keypair(key_name: str = 'test1') -> key_pair: + private_key_path = KEY_PATH / 'id_rsa.{}'.format(key_name) + public_key_path = KEY_PATH / 'id_rsa.{}.pub'.format(key_name) + with public_key_path.open() as public_file: + public_key = public_file.read() + with private_key_path.open() as private_file: + private_key = private_file.read() + return key_pair(public_key, private_key) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index fd1d1bac..bcb8044f 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -570,20 +570,33 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): ssh_util.render_authorizedkeysfile_paths( "%h/.keys", "/homedirs/bobby", "bobby")) + def test_all(self): + self.assertEqual( + ["/homedirs/bobby/.keys", "/homedirs/bobby/.secret/keys", + "/keys/path1", "/opt/bobby/keys"], + ssh_util.render_authorizedkeysfile_paths( + "%h/.keys .secret/keys /keys/path1 /opt/%u/keys", + "/homedirs/bobby", "bobby")) + class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.ssh_util.pwd.getpwnam") def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby') + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') m_getpwnam.return_value = fpw - authorized_keys = self.tmp_path('authorized_keys') + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home2/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) util.write_file(authorized_keys, VALID_CONTENT['rsa']) - user_keys = self.tmp_path('user_keys') + # /tmp/home2/bobby/.ssh/user_keys = dsa + user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) util.write_file(user_keys, VALID_CONTENT['dsa']) - sshd_config = self.tmp_path('sshd_config') + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") util.write_file( sshd_config, "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) @@ -593,33 +606,244 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): fpw.pw_name, sshd_config) content = ssh_util.update_authorized_keys(auth_key_entries, []) - self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) + self.assertEqual(user_keys, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) @patch("cloudinit.ssh_util.pwd.getpwnam") def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): - fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie') + fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') m_getpwnam.return_value = fpw - authorized_keys = self.tmp_path('authorized_keys') + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home/suzie/.ssh/authorized_keys = rsa + authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) util.write_file(authorized_keys, VALID_CONTENT['rsa']) - user_keys = self.tmp_path('user_keys') + # /tmp/home/suzie/.ssh/user_keys = dsa + user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) util.write_file(user_keys, VALID_CONTENT['dsa']) - sshd_config = self.tmp_path('sshd_config') + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") util.write_file( sshd_config, - "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) + "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) ) (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(authorized_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home2/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + + # /tmp/home2/bobby/.ssh/user_keys = dsa + user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, + user_keys, authorized_keys) + ) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(authorized_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa + authorized_keys = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + + # /tmp/home2/bobby/.ssh/user_keys3 = dsa + user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, + authorized_keys, user_keys) + ) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(user_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_global(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + + # /tmp/etc/ssh/authorized_keys = rsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['rsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config') + util.write_file( + sshd_config, + "AuthorizedKeysFile %s" % (authorized_keys_global) ) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) content = ssh_util.update_authorized_keys(auth_key_entries, []) self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa + authorized_keys = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + # /tmp/home2/bobby/.ssh/user_keys3 = dsa + user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') + user_ssh_folder = "%s/.ssh" % fpw2.pw_dir + # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com + authorized_keys2 = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys2, + VALID_CONTENT['ssh-xmss@openssh.com']) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" % + (authorized_keys_global, user_keys) + ) + + # process first user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(user_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content) + + m_getpwnam.return_value = fpw2 + # process second user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw2.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(authorized_keys2, auth_key_fn) + self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + self.assertFalse(VALID_CONTENT['rsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + # /tmp/home/bobby/.ssh/authorized_keys2 = rsa + authorized_keys = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + # /tmp/home/bobby/.ssh/user_keys3 = dsa + user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy') + user_ssh_folder = "%s/.ssh" % fpw2.pw_dir + # /tmp/home/badguy/home/bobby = "" + authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" % + (authorized_keys_global, user_keys, authorized_keys2) + ) + + # process first user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(user_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + m_getpwnam.return_value = fpw2 + # process second user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw2.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + # badguy should not take the key from the other user! + self.assertEqual(authorized_keys2, auth_key_fn) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) + self.assertFalse(VALID_CONTENT['rsa'] in content) # vi: ts=4 expandtab -- cgit v1.2.3 From eacb0353803263934aa2ac827c37e461c87cb107 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 15 Jul 2021 17:52:21 -0500 Subject: Fix MIME policy failure on python version upgrade (#934) Python 3.6 added a new `policy` attribute to `MIMEMultipart`. MIMEMultipart may be part of the cached object pickle of a datasource. Upgrading from an old version of python to 3.6+ will cause the datasource to be invalid after pickle load. This commit uses the upgrade framework to attempt to access the mime message and fail early (thus discarding the cache) if we cannot. Commit 78e89b03 should fix this issue more generally. --- cloudinit/sources/__init__.py | 18 + cloudinit/stages.py | 2 + .../integration_tests/assets/trusty_with_mime.pkl | 572 +++++++++++++++++++++ .../integration_tests/modules/test_persistence.py | 30 ++ 4 files changed, 622 insertions(+) create mode 100644 tests/integration_tests/assets/trusty_with_mime.pkl create mode 100644 tests/integration_tests/modules/test_persistence.py (limited to 'tests/integration_tests') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a07c4b4f..9d25b0ee 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -75,6 +75,10 @@ NetworkConfigSource = namedtuple('NetworkConfigSource', _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) +class DatasourceUnpickleUserDataError(Exception): + """Raised when userdata is unable to be unpickled due to python upgrades""" + + class DataSourceNotFoundException(Exception): pass @@ -239,6 +243,20 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): self.vendordata2 = None if not hasattr(self, 'vendordata2_raw'): self.vendordata2_raw = None + if hasattr(self, 'userdata') and self.userdata is not None: + # If userdata stores MIME data, on < python3.6 it will be + # missing the 'policy' attribute that exists on >=python3.6. + # Calling str() on the userdata will attempt to access this + # policy attribute. This will raise an exception, causing + # the pickle load to fail, so cloud-init will discard the cache + try: + str(self.userdata) + except AttributeError as e: + LOG.debug( + "Unable to unpickle datasource: %s." + " Ignoring current cache.", e + ) + raise DatasourceUnpickleUserDataError() from e def __str__(self): return type_utils.obj_name(self) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 3688be2e..06e0d9b1 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -1070,6 +1070,8 @@ def _pkl_load(fname): return None try: return pickle.loads(pickle_contents) + except sources.DatasourceUnpickleUserDataError: + return None except Exception: util.logexc(LOG, "Failed loading pickled blob from %s", fname) return None diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl new file mode 100644 index 00000000..a4089ecf --- /dev/null +++ b/tests/integration_tests/assets/trusty_with_mime.pkl @@ -0,0 +1,572 @@ +ccopy_reg +_reconstructor +p1 +(ccloudinit.sources.DataSourceNoCloud +DataSourceNoCloudNet +p2 +c__builtin__ +object +p3 +NtRp4 +(dp5 +S'paths' +p6 +g1 +(ccloudinit.helpers +Paths +p7 +g3 +NtRp8 +(dp9 +S'lookups' +p10 +(dp11 +S'cloud_config' +p12 +S'cloud-config.txt' +p13 +sS'userdata' +p14 +S'user-data.txt.i' +p15 +sS'vendordata' +p16 +S'vendor-data.txt.i' +p17 +sS'userdata_raw' +p18 +S'user-data.txt' +p19 +sS'boothooks' +p20 +g20 +sS'scripts' +p21 +g21 +sS'sem' +p22 +g22 +sS'data' +p23 +g23 +sS'vendor_scripts' +p24 +S'scripts/vendor' +p25 +sS'handlers' +p26 +g26 +sS'obj_pkl' +p27 +S'obj.pkl' +p28 +sS'vendordata_raw' +p29 +S'vendor-data.txt' +p30 +sS'vendor_cloud_config' +p31 +S'vendor-cloud-config.txt' +p32 +ssS'template_tpl' +p33 +S'/etc/cloud/templates/%s.tmpl' +p34 +sS'cfgs' +p35 +(dp36 +S'cloud_dir' +p37 +S'/var/lib/cloud/' +p38 +sS'templates_dir' +p39 +S'/etc/cloud/templates/' +p40 +sS'upstart_dir' +p41 +S'/etc/init/' +p42 +ssS'cloud_dir' +p43 +g38 +sS'datasource' +p44 +NsS'upstart_conf_d' +p45 +g42 +sS'boot_finished' +p46 +S'/var/lib/cloud/instance/boot-finished' +p47 +sS'instance_link' +p48 +S'/var/lib/cloud/instance' +p49 +sS'seed_dir' +p50 +S'/var/lib/cloud/seed' +p51 +sbsS'supported_seed_starts' +p52 +(S'http://' +p53 +S'https://' +p54 +S'ftp://' +p55 +tp56 +sS'sys_cfg' +p57 +(dp58 +S'output' +p59 +(dp60 +S'all' +p61 +S'| tee -a /var/log/cloud-init-output.log' +p62 +ssS'users' +p63 +(lp64 +S'default' +p65 +asS'def_log_file' +p66 +S'/var/log/cloud-init.log' +p67 +sS'cloud_final_modules' +p68 +(lp69 +S'rightscale_userdata' +p70 +aS'scripts-vendor' +p71 +aS'scripts-per-once' +p72 +aS'scripts-per-boot' +p73 +aS'scripts-per-instance' +p74 +aS'scripts-user' +p75 +aS'ssh-authkey-fingerprints' +p76 +aS'keys-to-console' +p77 +aS'phone-home' +p78 +aS'final-message' +p79 +aS'power-state-change' +p80 +asS'disable_root' +p81 +I01 +sS'syslog_fix_perms' +p82 +S'syslog:adm' +p83 +sS'log_cfgs' +p84 +(lp85 +(lp86 +S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n' +p87 +aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n' +p88 +aa(lp89 +g87 +aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n" +p90 +aasS'cloud_init_modules' +p91 +(lp92 +S'migrator' +p93 +aS'seed_random' +p94 +aS'bootcmd' +p95 +aS'write-files' +p96 +aS'growpart' +p97 +aS'resizefs' +p98 +aS'set_hostname' +p99 +aS'update_hostname' +p100 +aS'update_etc_hosts' +p101 +aS'ca-certs' +p102 +aS'rsyslog' +p103 +aS'users-groups' +p104 +aS'ssh' +p105 +asS'preserve_hostname' +p106 +I00 +sS'_log' +p107 +(lp108 +g87 +ag90 +ag88 +asS'datasource_list' +p109 +(lp110 +S'NoCloud' +p111 +aS'ConfigDrive' +p112 +aS'OpenNebula' +p113 +aS'Azure' +p114 +aS'AltCloud' +p115 +aS'OVF' +p116 +aS'MAAS' +p117 +aS'GCE' +p118 +aS'OpenStack' +p119 +aS'CloudSigma' +p120 +aS'Ec2' +p121 +aS'CloudStack' +p122 +aS'SmartOS' +p123 +aS'None' +p124 +asS'vendor_data' +p125 +(dp126 +S'prefix' +p127 +(lp128 +sS'enabled' +p129 +I01 +ssS'cloud_config_modules' +p130 +(lp131 +S'emit_upstart' +p132 +aS'disk_setup' +p133 +aS'mounts' +p134 +aS'ssh-import-id' +p135 +aS'locale' +p136 +aS'set-passwords' +p137 +aS'grub-dpkg' +p138 +aS'apt-pipelining' +p139 +aS'apt-configure' +p140 +aS'package-update-upgrade-install' +p141 +aS'landscape' +p142 +aS'timezone' +p143 +aS'puppet' +p144 +aS'chef' +p145 +aS'salt-minion' +p146 +aS'mcollective' +p147 +aS'disable-ec2-metadata' +p148 +aS'runcmd' +p149 +aS'byobu' +p150 +assg14 +(iemail.mime.multipart +MIMEMultipart +p151 +(dp152 +S'_headers' +p153 +(lp154 +(S'Content-Type' +p155 +S'multipart/mixed; boundary="===============4291038100093149247=="' +tp156 +a(S'MIME-Version' +p157 +S'1.0' +p158 +tp159 +a(S'Number-Attachments' +p160 +S'1' +tp161 +asS'_payload' +p162 +(lp163 +(iemail.mime.base +MIMEBase +p164 +(dp165 +g153 +(lp166 +(g157 +g158 +tp167 +a(S'Content-Type' +p168 +S'text/x-not-multipart' +tp169 +a(S'Content-Disposition' +p170 +S'attachment; filename="part-001"' +tp171 +asg162 +S'' +sS'_charset' +p172 +NsS'_default_type' +p173 +S'text/plain' +p174 +sS'preamble' +p175 +NsS'defects' +p176 +(lp177 +sS'_unixfrom' +p178 +NsS'epilogue' +p179 +Nsbasg172 +Nsg173 +g174 +sg175 +Nsg176 +(lp180 +sg178 +Nsg179 +Nsbsg16 +S'#cloud-config\n{}\n\n' +p181 +sg18 +S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n' +p182 +sg29 +NsS'dsmode' +p183 +S'net' +p184 +sS'seed' +p185 +S'/var/lib/cloud/seed/nocloud-net' +p186 +sS'cmdline_id' +p187 +S'ds=nocloud-net' +p188 +sS'ud_proc' +p189 +g1 +(ccloudinit.user_data +UserDataProcessor +p190 +g3 +NtRp191 +(dp192 +g6 +g8 +sS'ssl_details' +p193 +(dp194 +sbsg50 +g186 +sS'ds_cfg' +p195 +(dp196 +sS'distro' +p197 +g1 +(ccloudinit.distros.ubuntu +Distro +p198 +g3 +NtRp199 +(dp200 +S'osfamily' +p201 +S'debian' +p202 +sS'_paths' +p203 +g8 +sS'name' +p204 +S'ubuntu' +p205 +sS'_runner' +p206 +g1 +(ccloudinit.helpers +Runners +p207 +g3 +NtRp208 +(dp209 +g6 +g8 +sS'sems' +p210 +(dp211 +sbsS'_cfg' +p212 +(dp213 +S'paths' +p214 +(dp215 +g37 +g38 +sg39 +g40 +sg41 +g42 +ssS'default_user' +p216 +(dp217 +S'shell' +p218 +S'/bin/bash' +p219 +sS'name' +p220 +S'ubuntu' +p221 +sS'sudo' +p222 +(lp223 +S'ALL=(ALL) NOPASSWD:ALL' +p224 +asS'lock_passwd' +p225 +I01 +sS'gecos' +p226 +S'Ubuntu' +p227 +sS'groups' +p228 +(lp229 +S'adm' +p230 +aS'audio' +p231 +aS'cdrom' +p232 +aS'dialout' +p233 +aS'dip' +p234 +aS'floppy' +p235 +aS'netdev' +p236 +aS'plugdev' +p237 +aS'sudo' +p238 +aS'video' +p239 +assS'package_mirrors' +p240 +(lp241 +(dp242 +S'arches' +p243 +(lp244 +S'i386' +p245 +aS'amd64' +p246 +asS'failsafe' +p247 +(dp248 +S'security' +p249 +S'http://security.ubuntu.com/ubuntu' +p250 +sS'primary' +p251 +S'http://archive.ubuntu.com/ubuntu' +p252 +ssS'search' +p253 +(dp254 +S'security' +p255 +(lp256 +sS'primary' +p257 +(lp258 +S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/' +p259 +aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/' +p260 +aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/' +p261 +assa(dp262 +S'arches' +p263 +(lp264 +S'armhf' +p265 +aS'armel' +p266 +aS'default' +p267 +asS'failsafe' +p268 +(dp269 +S'security' +p270 +S'http://ports.ubuntu.com/ubuntu-ports' +p271 +sS'primary' +p272 +S'http://ports.ubuntu.com/ubuntu-ports' +p273 +ssasS'ssh_svcname' +p274 +S'ssh' +p275 +ssbsS'metadata' +p276 +(dp277 +g183 +g184 +sS'local-hostname' +p278 +S'me' +p279 +sS'instance-id' +p280 +S'me' +p281 +ssb. \ No newline at end of file diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py new file mode 100644 index 00000000..00fdeaea --- /dev/null +++ b/tests/integration_tests/modules/test_persistence.py @@ -0,0 +1,30 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Test the behavior of loading/discarding pickle data""" +from pathlib import Path + +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import ( + ASSETS_DIR, + verify_ordered_items_in_text, +) + + +PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') +TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl' + + +@pytest.mark.lxd_container +def test_log_message_on_missing_version_file(client: IntegrationInstance): + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.restart() + assert client.execute('cloud-init status --wait').ok + log = client.read_from_file('/var/log/cloud-init.log') + verify_ordered_items_in_text([ + "Unable to unpickle datasource: 'MIMEMultipart' object has no " + "attribute 'policy'. Ignoring current cache.", + 'no cache found', + 'Searching for local data source', + 'SUCCESS: found local data from DataSourceNoCloud' + ], log) -- cgit v1.2.3 From 184c836a16e9954a2cba11ae21f07923077ec904 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 19 Jul 2021 14:13:21 -0500 Subject: Initial hotplug support (#936) Adds a udev script which will invoke a hotplug hook script on all net add events. The script will write some udev arguments to a systemd FIFO socket (to ensure we have only instance of cloud-init running at a time), which is then read by a new service that calls a new 'cloud-init devel hotplug-hook' command to handle the new event. This hotplug-hook command will: - Fetch the pickled datsource - Verify that the hotplug event is supported/enabled - Update the metadata for the datasource - Ensure the hotplugged device exists within the datasource - Apply the config change on the datasource metadata - Bring up the new interface (or apply global network configuration) - Save the updated metadata back to the pickle cache Also scattered in some unrelated typing where helpful --- bash_completion/cloud-init | 5 +- cloudinit/cmd/devel/hotplug_hook.py | 236 ++++++++++++++++++++++++ cloudinit/cmd/devel/parser.py | 3 + cloudinit/distros/__init__.py | 11 +- cloudinit/event.py | 1 + cloudinit/net/activators.py | 174 +++++++++++++---- cloudinit/sources/DataSourceConfigDrive.py | 10 +- cloudinit/sources/DataSourceEc2.py | 7 + cloudinit/sources/DataSourceOpenStack.py | 11 +- cloudinit/sources/__init__.py | 3 +- cloudinit/stages.py | 4 +- doc/rtd/topics/cli.rst | 4 + doc/rtd/topics/events.rst | 10 +- packages/redhat/cloud-init.spec.in | 7 + setup.py | 2 + systemd/cloud-init-generator.tmpl | 0 systemd/cloud-init-hotplugd.service | 22 +++ systemd/cloud-init-hotplugd.socket | 13 ++ tests/integration_tests/modules/test_hotplug.py | 94 ++++++++++ tests/unittests/cmd/devel/test_hotplug_hook.py | 218 ++++++++++++++++++++++ tests/unittests/test_net_activators.py | 135 ++++++++++---- tools/hook-hotplug | 21 +++ udev/10-cloud-init-hook-hotplug.rules | 6 + 23 files changed, 906 insertions(+), 91 deletions(-) create mode 100644 cloudinit/cmd/devel/hotplug_hook.py mode change 100755 => 100644 systemd/cloud-init-generator.tmpl create mode 100644 systemd/cloud-init-hotplugd.service create mode 100644 systemd/cloud-init-hotplugd.socket create mode 100644 tests/integration_tests/modules/test_hotplug.py create mode 100644 tests/unittests/cmd/devel/test_hotplug_hook.py create mode 100755 tools/hook-hotplug create mode 100644 udev/10-cloud-init-hook-hotplug.rules (limited to 'tests/integration_tests') diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init index a9577e9d..b9f137b1 100644 --- a/bash_completion/cloud-init +++ b/bash_completion/cloud-init @@ -28,7 +28,7 @@ _cloudinit_complete() COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word)) ;; devel) - COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word)) + COMPREPLY=($(compgen -W "--help hotplug-hook schema net-convert" -- $cur_word)) ;; dhclient-hook) COMPREPLY=($(compgen -W "--help up down" -- $cur_word)) @@ -64,6 +64,9 @@ _cloudinit_complete() --frequency) COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word)) ;; + hotplug-hook) + COMPREPLY=($(compgen -W "--help" -- $cur_word)) + ;; net-convert) COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word)) ;; diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py new file mode 100644 index 00000000..0282f24a --- /dev/null +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -0,0 +1,236 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Handle reconfiguration on hotplug events""" +import abc +import argparse +import os +import time + +from cloudinit import log +from cloudinit import reporting +from cloudinit.event import EventScope, EventType +from cloudinit.net import activators, read_sys_net_safe +from cloudinit.net.network_state import parse_net_config_data +from cloudinit.reporting import events +from cloudinit.stages import Init +from cloudinit.sources import DataSource + + +LOG = log.getLogger(__name__) +NAME = 'hotplug-hook' + + +def get_parser(parser=None): + """Build or extend an arg parser for hotplug-hook utility. + + @param parser: Optional existing ArgumentParser instance representing the + subcommand which will be extended to support the args of this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) + + parser.description = __doc__ + parser.add_argument("-d", "--devpath", required=True, + metavar="PATH", + help="sysfs path to hotplugged device") + parser.add_argument("-s", "--subsystem", required=True, + help="subsystem to act on", + choices=['net']) + parser.add_argument("-u", "--udevaction", required=True, + help="action to take", + choices=['add', 'remove']) + + return parser + + +class UeventHandler(abc.ABC): + def __init__(self, id, datasource, devpath, action, success_fn): + self.id = id + self.datasource = datasource # type: DataSource + self.devpath = devpath + self.action = action + self.success_fn = success_fn + + @abc.abstractmethod + def apply(self): + raise NotImplementedError() + + @property + @abc.abstractmethod + def config(self): + raise NotImplementedError() + + @abc.abstractmethod + def device_detected(self) -> bool: + raise NotImplementedError() + + def detect_hotplugged_device(self): + detect_presence = None + if self.action == 'add': + detect_presence = True + elif self.action == 'remove': + detect_presence = False + else: + raise ValueError('Unknown action: %s' % self.action) + + if detect_presence != self.device_detected(): + raise RuntimeError( + 'Failed to detect %s in updated metadata' % self.id) + + def success(self): + return self.success_fn() + + def update_metadata(self): + result = self.datasource.update_metadata_if_supported([ + EventType.HOTPLUG]) + if not result: + raise RuntimeError( + 'Datasource %s not updated for ' + 'event %s' % (self.datasource, EventType.HOTPLUG) + ) + return result + + +class NetHandler(UeventHandler): + def __init__(self, datasource, devpath, action, success_fn): + # convert devpath to mac address + id = read_sys_net_safe(os.path.basename(devpath), 'address') + super().__init__(id, datasource, devpath, action, success_fn) + + def apply(self): + self.datasource.distro.apply_network_config( + self.config, + bring_up=False, + ) + interface_name = os.path.basename(self.devpath) + activator = activators.select_activator() + if self.action == 'add': + if not activator.bring_up_interface(interface_name): + raise RuntimeError( + 'Failed to bring up device: {}'.format(self.devpath)) + elif self.action == 'remove': + if not activator.bring_down_interface(interface_name): + raise RuntimeError( + 'Failed to bring down device: {}'.format(self.devpath)) + + @property + def config(self): + return self.datasource.network_config + + def device_detected(self) -> bool: + netstate = parse_net_config_data(self.config) + found = [ + iface for iface in netstate.iter_interfaces() + if iface.get('mac_address') == self.id + ] + LOG.debug('Ifaces with ID=%s : %s', self.id, found) + return len(found) > 0 + + +SUBSYSTEM_PROPERTES_MAP = { + 'net': (NetHandler, EventScope.NETWORK), +} + + +def handle_hotplug( + hotplug_init: Init, devpath, subsystem, udevaction +): + handler_cls, event_scope = SUBSYSTEM_PROPERTES_MAP.get( + subsystem, (None, None) + ) + if handler_cls is None: + raise Exception( + 'hotplug-hook: cannot handle events for subsystem: {}'.format( + subsystem)) + + LOG.debug('Fetching datasource') + datasource = hotplug_init.fetch(existing="trust") + + if not hotplug_init.update_event_enabled( + event_source_type=EventType.HOTPLUG, + scope=EventScope.NETWORK + ): + LOG.debug('hotplug not enabled for event of type %s', event_scope) + return + + LOG.debug('Creating %s event handler', subsystem) + event_handler = handler_cls( + datasource=datasource, + devpath=devpath, + action=udevaction, + success_fn=hotplug_init._write_to_cache + ) # type: UeventHandler + wait_times = [1, 3, 5, 10, 30] + for attempt, wait in enumerate(wait_times): + LOG.debug( + 'subsystem=%s update attempt %s/%s', + subsystem, + attempt, + len(wait_times) + ) + try: + LOG.debug('Refreshing metadata') + event_handler.update_metadata() + LOG.debug('Detecting device in updated metadata') + event_handler.detect_hotplugged_device() + LOG.debug('Applying config change') + event_handler.apply() + LOG.debug('Updating cache') + event_handler.success() + break + except Exception as e: + LOG.debug('Exception while processing hotplug event. %s', e) + time.sleep(wait) + last_exception = e + else: + raise last_exception # type: ignore + + +def handle_args(name, args): + # Note that if an exception happens between now and when logging is + # setup, we'll only see it in the journal + hotplug_reporter = events.ReportEventStack( + name, __doc__, reporting_enabled=True + ) + + hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter) + hotplug_init.read_cfg() + + log.setupLogging(hotplug_init.cfg) + if 'reporting' in hotplug_init.cfg: + reporting.update_configuration(hotplug_init.cfg.get('reporting')) + + # Logging isn't going to be setup until now + LOG.debug( + '%s called with the following arguments: {udevaction: %s, ' + 'subsystem: %s, devpath: %s}', + name, args.udevaction, args.subsystem, args.devpath + ) + LOG.debug( + '%s called with the following arguments:\n' + 'udevaction: %s\n' + 'subsystem: %s\n' + 'devpath: %s', + name, args.udevaction, args.subsystem, args.devpath + ) + + with hotplug_reporter: + try: + handle_hotplug( + hotplug_init=hotplug_init, + devpath=args.devpath, + subsystem=args.subsystem, + udevaction=args.udevaction, + ) + except Exception: + LOG.exception('Received fatal exception handling hotplug!') + raise + + LOG.debug('Exiting hotplug handler') + reporting.flush_events() + + +if __name__ == '__main__': + args = get_parser().parse_args() + handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index 1a3c46a4..be304630 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -7,6 +7,7 @@ import argparse from cloudinit.config import schema +from . import hotplug_hook from . import net_convert from . import render from . import make_mime @@ -21,6 +22,8 @@ def get_parser(parser=None): subparsers.required = True subcmds = [ + (hotplug_hook.NAME, hotplug_hook.__doc__, + hotplug_hook.get_parser, hotplug_hook.handle_args), ('schema', 'Validate cloud-config files for document schema', schema.get_parser, schema.handle_schema_args), (net_convert.NAME, net_convert.__doc__, diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2caa8bc2..7bdf2197 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -206,8 +206,15 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): def generate_fallback_config(self): return net.generate_fallback_config() - def apply_network_config(self, netconfig, bring_up=False): - # apply network config netconfig + def apply_network_config(self, netconfig, bring_up=False) -> bool: + """Apply the network config. + + If bring_up is True, attempt to bring up the passed in devices. If + devices is None, attempt to bring up devices returned by + _write_network_config. + + Returns True if any devices failed to come up, otherwise False. + """ # This method is preferred to apply_network which only takes # a much less complete network config format (interfaces(5)). network_state = parse_net_config_data(netconfig) diff --git a/cloudinit/event.py b/cloudinit/event.py index 76a0afc6..53ad4c25 100644 --- a/cloudinit/event.py +++ b/cloudinit/event.py @@ -29,6 +29,7 @@ class EventType(Enum): BOOT = "boot" BOOT_NEW_INSTANCE = "boot-new-instance" BOOT_LEGACY = "boot-legacy" + HOTPLUG = 'hotplug' def __str__(self): # pylint: disable=invalid-str-returned return self.value diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py index 34fee3bf..84aaafc9 100644 --- a/cloudinit/net/activators.py +++ b/cloudinit/net/activators.py @@ -15,31 +15,80 @@ from cloudinit.net.sysconfig import NM_CFG_FILE LOG = logging.getLogger(__name__) +def _alter_interface(cmd, device_name) -> bool: + LOG.debug("Attempting command %s for device %s", cmd, device_name) + try: + (_out, err) = subp.subp(cmd) + if len(err): + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) + return True + except subp.ProcessExecutionError: + util.logexc(LOG, "Running interface command %s failed", cmd) + return False + + class NetworkActivator(ABC): @staticmethod @abstractmethod def available() -> bool: + """Return True if activator is available, otherwise return False.""" raise NotImplementedError() @staticmethod @abstractmethod def bring_up_interface(device_name: str) -> bool: + """Bring up interface. + + Return True is successful, otherwise return False + """ + raise NotImplementedError() + + @staticmethod + @abstractmethod + def bring_down_interface(device_name: str) -> bool: + """Bring down interface. + + Return True is successful, otherwise return False + """ raise NotImplementedError() @classmethod def bring_up_interfaces(cls, device_names: Iterable[str]) -> bool: - all_succeeded = True - for device in device_names: - if not cls.bring_up_interface(device): - all_succeeded = False - return all_succeeded + """Bring up specified list of interfaces. + + Return True is successful, otherwise return False + """ + return all(cls.bring_up_interface(device) for device in device_names) @classmethod def bring_up_all_interfaces(cls, network_state: NetworkState) -> bool: + """Bring up all interfaces. + + Return True is successful, otherwise return False + """ return cls.bring_up_interfaces( [i['name'] for i in network_state.iter_interfaces()] ) + @classmethod + def bring_down_interfaces(cls, device_names: Iterable[str]) -> bool: + """Bring down specified list of interfaces. + + Return True is successful, otherwise return False + """ + return all(cls.bring_down_interface(device) for device in device_names) + + @classmethod + def bring_down_all_interfaces(cls, network_state: NetworkState) -> bool: + """Bring down all interfaces. + + Return True is successful, otherwise return False + """ + return cls.bring_down_interfaces( + [i['name'] for i in network_state.iter_interfaces()] + ) + class IfUpDownActivator(NetworkActivator): # Note that we're not overriding bring_up_interfaces to pass something @@ -53,24 +102,27 @@ class IfUpDownActivator(NetworkActivator): @staticmethod def bring_up_interface(device_name: str) -> bool: - """Bring up interface using ifup.""" + """Bring up interface using ifup. + + Return True is successful, otherwise return False + """ cmd = ['ifup', device_name] - LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning("Running %s resulted in stderr output: %s", - cmd, err) - return True - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) - return False + return _alter_interface(cmd, device_name) + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """Bring up interface using ifup. + + Return True is successful, otherwise return False + """ + cmd = ['ifdown', device_name] + return _alter_interface(cmd, device_name) class NetworkManagerActivator(NetworkActivator): @staticmethod def available(target=None) -> bool: + """ Return true if network manager can be used on this system.""" config_present = os.path.isfile( subp.target_path(target, path=NM_CFG_FILE) ) @@ -79,44 +131,86 @@ class NetworkManagerActivator(NetworkActivator): @staticmethod def bring_up_interface(device_name: str) -> bool: - try: - subp.subp(['nmcli', 'connection', 'up', device_name]) - except subp.ProcessExecutionError: - util.logexc(LOG, "nmcli failed to bring up {}".format(device_name)) - return False - return True + """Bring up interface using nmcli. + + Return True is successful, otherwise return False + """ + cmd = ['nmcli', 'connection', 'up', 'ifname', device_name] + return _alter_interface(cmd, device_name) + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """Bring down interface using nmcli. + + Return True is successful, otherwise return False + """ + cmd = ['nmcli', 'connection', 'down', device_name] + return _alter_interface(cmd, device_name) class NetplanActivator(NetworkActivator): + NETPLAN_CMD = ['netplan', 'apply'] + @staticmethod def available(target=None) -> bool: + """ Return true if netplan can be used on this system.""" return netplan_available(target=target) - @staticmethod - def _apply_netplan(): - LOG.debug('Applying current netplan config') - try: - subp.subp(['netplan', 'apply'], capture=True) - except subp.ProcessExecutionError: - util.logexc(LOG, "netplan apply failed") - return False - return True - @staticmethod def bring_up_interface(device_name: str) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ LOG.debug("Calling 'netplan apply' rather than " - "bringing up individual interfaces") - return NetplanActivator._apply_netplan() + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') @staticmethod def bring_up_interfaces(device_names: Iterable[str]) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ LOG.debug("Calling 'netplan apply' rather than " - "bringing up individual interfaces") - return NetplanActivator._apply_netplan() + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') @staticmethod def bring_up_all_interfaces(network_state: NetworkState) -> bool: - return NetplanActivator._apply_netplan() + """Apply netplan config. + + Return True is successful, otherwise return False + """ + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ + LOG.debug("Calling 'netplan apply' rather than " + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') + + @staticmethod + def bring_down_interfaces(device_names: Iterable[str]) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ + LOG.debug("Calling 'netplan apply' rather than " + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') + + @staticmethod + def bring_down_all_interfaces(network_state: NetworkState) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') # This section is mostly copied and pasted from renderers.py. An abstract @@ -153,4 +247,6 @@ def select_activator(priority=None, target=None) -> Type[NetworkActivator]: raise RuntimeError( "No available network activators found%s. Searched " "through list: %s" % (tmsg, priority)) - return found[0] + selected = found[0] + LOG.debug('Using selected activator: %s', selected) + return selected diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 62756cf7..19c8d126 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -12,9 +12,8 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import subp from cloudinit import util - +from cloudinit.event import EventScope, EventType from cloudinit.net import eni - from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform from cloudinit.sources.helpers import openstack @@ -37,6 +36,13 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): dsname = 'ConfigDrive' + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + EventType.HOTPLUG, + }} + def __init__(self, sys_cfg, distro, paths): super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) self.source = None diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 8a7f7c60..700437b0 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -76,6 +76,13 @@ class DataSourceEc2(sources.DataSource): # Whether we want to get network configuration from the metadata service. perform_dhcp_setup = False + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + EventType.HOTPLUG, + }} + def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 619a171e..a85b71d7 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -8,11 +8,11 @@ import time from cloudinit import dmi from cloudinit import log as logging -from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper from cloudinit import util - +from cloudinit.event import EventScope, EventType +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceOracle as oracle @@ -46,6 +46,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): # Whether we want to get network configuration from the metadata service. perform_dhcp_setup = False + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + EventType.HOTPLUG + }} + def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) self.metadata_address = None diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9d25b0ee..bf6bf139 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -23,6 +23,7 @@ from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util from cloudinit.atomic_helper import write_json +from cloudinit.distros import Distro from cloudinit.event import EventScope, EventType from cloudinit.filters import launch_index from cloudinit.persistence import CloudInitPickleMixin @@ -215,7 +216,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): _ci_pkl_version = 1 - def __init__(self, sys_cfg, distro, paths, ud_proc=None): + def __init__(self, sys_cfg, distro: Distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro self.paths = paths diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 06e0d9b1..bc164fa0 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -241,7 +241,7 @@ class Init(object): else: return (None, "cache invalid in datasource: %s" % ds) - def _get_data_source(self, existing): + def _get_data_source(self, existing) -> sources.DataSource: if self.datasource is not NULL_DATA_SOURCE: return self.datasource @@ -267,7 +267,7 @@ class Init(object): cfg_list, pkg_list, self.reporter) LOG.info("Loaded datasource %s - %s", dsname, ds) - self.datasource = ds + self.datasource = ds # type: sources.DataSource # Ensure we adjust our path members datasource # now that we have one (thus allowing ipath to be used) self._reset() diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst index 0ff230b5..b6115ed6 100644 --- a/doc/rtd/topics/cli.rst +++ b/doc/rtd/topics/cli.rst @@ -119,6 +119,10 @@ Current subcommands: schema errors locally without the need for deployment. Schema validation is work in progress and supports a subset of cloud-config modules. + * ``hotplug-hook``: respond to newly added system devices by retrieving + updated system metadata and bringing up/down the corresponding device. + This command is intended to be called via a systemd service and is + not considered user-accessible except for debugging purposes. .. _cli_features: diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst index 463208cc..984e7577 100644 --- a/doc/rtd/topics/events.rst +++ b/doc/rtd/topics/events.rst @@ -20,11 +20,11 @@ event types: boot: once during Local stage, then again in Network stage. As this behavior was previously the default behavior, this option exists to prevent regressing such behavior. +- **HOTPLUG**: Dynamic add of a system device Future work will likely include infrastructure and support for the following events: -- **HOTPLUG**: Dynamic add of a system device - **METADATA_CHANGE**: An instance's metadata has change - **USER_REQUEST**: Directed request to update @@ -64,6 +64,12 @@ arbitrary values can be used. Each ``scope`` requires a ``when`` element to specify which events are to allowed to be handled. +Hotplug +======= +When the hotplug event is supported by the data source and configured in +user data, cloud-init will respond to the addition or removal of network +interfaces to the system. In addition to fetching and updating the system +metadata, cloud-init will also bring up/down the newly added interface. Examples ======== @@ -77,7 +83,7 @@ On every boot, apply network configuration found in the datasource. # apply network config on every boot updates: network: - when: ['boot'] + when: ['boot', 'hotplug'] .. _Cloud-init: https://launchpad.net/cloud-init .. vi: textwidth=78 diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 16138012..b930709b 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -119,6 +119,12 @@ version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f) ( cd "$RPM_BUILD_ROOT" && sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys ) +# patch hotplug /usr/libexec script path +hotplug_file=$(cd "$RPM_BUILD_ROOT" && find . -name 10-cloud-init-hook-hotplug.rules -type f) + +( cd "$RPM_BUILD_ROOT" && + sed -i "s,/usr/lib,%{_libexecdir}," $hotplug_file ) + %clean rm -rf $RPM_BUILD_ROOT @@ -172,6 +178,7 @@ fi %files /lib/udev/rules.d/66-azure-ephemeral.rules +/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules %if "%{init_system}" == "systemd" /usr/lib/systemd/system-generators/cloud-init-generator diff --git a/setup.py b/setup.py index dcbe0843..7fa03e63 100755 --- a/setup.py +++ b/setup.py @@ -128,6 +128,7 @@ INITSYS_FILES = { 'systemd': [render_tmpl(f) for f in (glob('systemd/*.tmpl') + glob('systemd/*.service') + + glob('systemd/*.socket') + glob('systemd/*.target')) if (is_f(f) and not is_generator(f))], 'systemd.generators': [ @@ -249,6 +250,7 @@ data_files = [ (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', + 'tools/hook-hotplug', 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), (USR + '/share/bash-completion/completions', diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl old mode 100755 new mode 100644 diff --git a/systemd/cloud-init-hotplugd.service b/systemd/cloud-init-hotplugd.service new file mode 100644 index 00000000..b64632ef --- /dev/null +++ b/systemd/cloud-init-hotplugd.service @@ -0,0 +1,22 @@ +# Paired with cloud-init-hotplugd.socket to read from the FIFO +# /run/cloud-init/hook-hotplug-cmd which is created during a udev network +# add or remove event as processed by 10-cloud-init-hook-hotplug.rules. + +# On start, read args from the FIFO, process and provide structured arguments +# to `cloud-init devel hotplug-hook` which will setup or teardown network +# devices as configured by user-data. + +# Known bug with an enforcing SELinux policy: LP: #1936229 +# cloud-init-hotplud.service will read args from file descriptor 3 + +[Unit] +Description=cloud-init hotplug hook daemon +After=cloud-init-hotplugd.socket + +[Service] +Type=simple +ExecStart=/bin/bash -c 'read args <&3; echo "args=$args"; \ + exec /usr/bin/cloud-init devel hotplug-hook $args; \ + exit 0' +SyslogIdentifier=cloud-init-hotplugd +TimeoutStopSec=5 diff --git a/systemd/cloud-init-hotplugd.socket b/systemd/cloud-init-hotplugd.socket new file mode 100644 index 00000000..aa093016 --- /dev/null +++ b/systemd/cloud-init-hotplugd.socket @@ -0,0 +1,13 @@ +# cloud-init-hotplugd.socket listens on the FIFO file +# /run/cloud-init/hook-hotplug-cmd which is created during a udev network +# add or remove event as processed by 10-cloud-init-hook-hotplug.rules. + +# Known bug with an enforcing SELinux policy: LP: #1936229 +[Unit] +Description=cloud-init hotplug hook socket + +[Socket] +ListenFIFO=/run/cloud-init/hook-hotplug-cmd + +[Install] +WantedBy=cloud-init.target diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py new file mode 100644 index 00000000..b683566f --- /dev/null +++ b/tests/integration_tests/modules/test_hotplug.py @@ -0,0 +1,94 @@ +import pytest +import time +import yaml +from collections import namedtuple + +from tests.integration_tests.instances import IntegrationInstance + +USER_DATA = """\ +#cloud-config +updates: + network: + when: ['hotplug'] +""" + +ip_addr = namedtuple('ip_addr', 'interface state ip4 ip6') + + +def _wait_till_hotplug_complete(client, expected_runs=1): + for _ in range(60): + log = client.read_from_file('/var/log/cloud-init.log') + if log.count('Exiting hotplug handler') == expected_runs: + return log + time.sleep(1) + raise Exception('Waiting for hotplug handler failed') + + +def _get_ip_addr(client): + ips = [] + lines = client.execute('ip --brief addr').split('\n') + for line in lines: + attributes = line.split() + interface, state = attributes[0], attributes[1] + ip4_cidr = attributes[2] if len(attributes) > 2 else None + ip6_cidr = attributes[3] if len(attributes) > 3 else None + ip4 = ip4_cidr.split('/')[0] if ip4_cidr else None + ip6 = ip6_cidr.split('/')[0] if ip6_cidr else None + ip = ip_addr(interface, state, ip4, ip6) + ips.append(ip) + return ips + + +@pytest.mark.openstack +@pytest.mark.user_data(USER_DATA) +def test_hotplug_add_remove(client: IntegrationInstance): + ips_before = _get_ip_addr(client) + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Exiting hotplug handler' not in log + + # Add new NIC + added_ip = client.instance.add_network_interface() + _wait_till_hotplug_complete(client) + ips_after_add = _get_ip_addr(client) + new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0] + + assert len(ips_after_add) == len(ips_before) + 1 + assert added_ip not in [ip.ip4 for ip in ips_before] + assert added_ip in [ip.ip4 for ip in ips_after_add] + assert new_addition.state == 'UP' + + netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml') + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface in config['network']['ethernets'] + + # Remove new NIC + client.instance.remove_network_interface(added_ip) + _wait_till_hotplug_complete(client, expected_runs=2) + ips_after_remove = _get_ip_addr(client) + assert len(ips_after_remove) == len(ips_before) + assert added_ip not in [ip.ip4 for ip in ips_after_remove] + + netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml') + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface not in config['network']['ethernets'] + + +@pytest.mark.openstack +def test_no_hotplug_in_userdata(client: IntegrationInstance): + ips_before = _get_ip_addr(client) + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Exiting hotplug handler' not in log + + # Add new NIC + client.instance.add_network_interface() + _wait_till_hotplug_complete(client) + log = client.read_from_file('/var/log/cloud-init.log') + assert 'hotplug not enabled for event of type network' in log + + ips_after_add = _get_ip_addr(client) + if len(ips_after_add) == len(ips_before) + 1: + # We can see the device, but it should not have been brought up + new_ip = [ip for ip in ips_after_add if ip not in ips_before][0] + assert new_ip.state == 'DOWN' + else: + assert len(ips_after_add) == len(ips_before) diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py new file mode 100644 index 00000000..63d2490e --- /dev/null +++ b/tests/unittests/cmd/devel/test_hotplug_hook.py @@ -0,0 +1,218 @@ +import pytest +from collections import namedtuple +from unittest import mock +from unittest.mock import call + +from cloudinit.cmd.devel.hotplug_hook import handle_hotplug +from cloudinit.distros import Distro +from cloudinit.event import EventType +from cloudinit.net.activators import NetworkActivator +from cloudinit.net.network_state import NetworkState +from cloudinit.sources import DataSource +from cloudinit.stages import Init + + +hotplug_args = namedtuple('hotplug_args', 'udevaction, subsystem, devpath') +FAKE_MAC = '11:22:33:44:55:66' + + +@pytest.yield_fixture +def mocks(): + m_init = mock.MagicMock(spec=Init) + m_distro = mock.MagicMock(spec=Distro) + m_datasource = mock.MagicMock(spec=DataSource) + m_datasource.distro = m_distro + m_init.datasource = m_datasource + m_init.fetch.return_value = m_datasource + + read_sys_net = mock.patch( + 'cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe', + return_value=FAKE_MAC + ) + + m_network_state = mock.MagicMock(spec=NetworkState) + parse_net = mock.patch( + 'cloudinit.cmd.devel.hotplug_hook.parse_net_config_data', + return_value=m_network_state + ) + + m_activator = mock.MagicMock(spec=NetworkActivator) + select_activator = mock.patch( + 'cloudinit.cmd.devel.hotplug_hook.activators.select_activator', + return_value=m_activator + ) + + sleep = mock.patch('time.sleep') + + read_sys_net.start() + parse_net.start() + select_activator.start() + m_sleep = sleep.start() + + yield namedtuple('mocks', 'm_init m_network_state m_activator m_sleep')( + m_init=m_init, + m_network_state=m_network_state, + m_activator=m_activator, + m_sleep=m_sleep, + ) + + read_sys_net.stop() + parse_net.stop() + select_activator.stop() + sleep.stop() + + +class TestUnsupportedActions: + def test_unsupported_subsystem(self, mocks): + with pytest.raises( + Exception, + match='cannot handle events for subsystem: not_real' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + subsystem='not_real', + udevaction='add' + ) + + def test_unsupported_udevaction(self, mocks): + with pytest.raises(ValueError, match='Unknown action: not_real'): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='not_real', + subsystem='net' + ) + + +class TestHotplug: + def test_succcessful_add(self, mocks): + init = mocks.m_init + mocks.m_network_state.iter_interfaces.return_value = [{ + 'mac_address': FAKE_MAC, + }] + handle_hotplug( + hotplug_init=init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + init.datasource.update_metadata_if_supported.assert_called_once_with([ + EventType.HOTPLUG + ]) + mocks.m_activator.bring_up_interface.assert_called_once_with('fake') + mocks.m_activator.bring_down_interface.assert_not_called() + init._write_to_cache.assert_called_once_with() + + def test_successful_remove(self, mocks): + init = mocks.m_init + mocks.m_network_state.iter_interfaces.return_value = [{}] + handle_hotplug( + hotplug_init=init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + init.datasource.update_metadata_if_supported.assert_called_once_with([ + EventType.HOTPLUG + ]) + mocks.m_activator.bring_down_interface.assert_called_once_with('fake') + mocks.m_activator.bring_up_interface.assert_not_called() + init._write_to_cache.assert_called_once_with() + + def test_update_event_disabled(self, mocks, caplog): + init = mocks.m_init + init.update_event_enabled.return_value = False + handle_hotplug( + hotplug_init=init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + assert 'hotplug not enabled for event of type' in caplog.text + init.datasource.update_metadata_if_supported.assert_not_called() + mocks.m_activator.bring_up_interface.assert_not_called() + mocks.m_activator.bring_down_interface.assert_not_called() + init._write_to_cache.assert_not_called() + + def test_update_metadata_failed(self, mocks): + mocks.m_init.datasource.update_metadata_if_supported.return_value = \ + False + with pytest.raises( + RuntimeError, match='Datasource .* not updated for event hotplug' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + + def test_detect_hotplugged_device_not_detected_on_add(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{}] + with pytest.raises( + RuntimeError, + match='Failed to detect {} in updated metadata'.format(FAKE_MAC) + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + + def test_detect_hotplugged_device_detected_on_remove(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{ + 'mac_address': FAKE_MAC, + }] + with pytest.raises( + RuntimeError, + match='Failed to detect .* in updated metadata' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + + def test_apply_failed_on_add(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{ + 'mac_address': FAKE_MAC, + }] + mocks.m_activator.bring_up_interface.return_value = False + with pytest.raises( + RuntimeError, match='Failed to bring up device: /dev/fake' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + + def test_apply_failed_on_remove(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{}] + mocks.m_activator.bring_down_interface.return_value = False + with pytest.raises( + RuntimeError, match='Failed to bring down device: /dev/fake' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + + def test_retry(self, mocks): + with pytest.raises(RuntimeError): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + assert mocks.m_sleep.call_count == 5 + assert mocks.m_sleep.call_args_list == [ + call(1), call(3), call(5), call(10), call(30) + ] diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index f11486ff..db825c35 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -35,32 +35,8 @@ ethernets: dhcp4: true """ -IF_UP_DOWN_AVAILABLE_CALLS = [ - (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), - (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), - (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), -] - -IF_UP_DOWN_CALL_LIST = [ - ((['ifup', 'eth0'], ), {}), - ((['ifup', 'eth1'], ), {}), -] - -NETPLAN_AVAILABLE_CALLS = [ - (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}), -] - NETPLAN_CALL_LIST = [ - ((['netplan', 'apply'], ), {'capture': True}), -] - -NETWORK_MANAGER_AVAILABLE_CALLS = [ - (('nmcli',), {'target': None}), -] - -NETWORK_MANAGER_CALL_LIST = [ - ((['nmcli', 'connection', 'up', 'eth0'], ), {}), - ((['nmcli', 'connection', 'up', 'eth1'], ), {}), + ((['netplan', 'apply'], ), {}), ] @@ -126,23 +102,54 @@ class TestSearchAndSelect: select_activator() -@pytest.mark.parametrize('activator, available_calls, expected_call_list', [ - (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS, IF_UP_DOWN_CALL_LIST), - (NetplanActivator, NETPLAN_AVAILABLE_CALLS, NETPLAN_CALL_LIST), - (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS, - NETWORK_MANAGER_CALL_LIST), +IF_UP_DOWN_AVAILABLE_CALLS = [ + (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), + (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), + (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), +] + +NETPLAN_AVAILABLE_CALLS = [ + (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}), +] + +NETWORK_MANAGER_AVAILABLE_CALLS = [ + (('nmcli',), {'target': None}), +] + + +@pytest.mark.parametrize('activator, available_calls', [ + (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS), + (NetplanActivator, NETPLAN_AVAILABLE_CALLS), + (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS), ]) -class TestIfUpDownActivator: +class TestActivatorsAvailable: def test_available( - self, activator, available_calls, expected_call_list, available_mocks + self, activator, available_calls, available_mocks ): activator.available() assert available_mocks.m_which.call_args_list == available_calls + +IF_UP_DOWN_BRING_UP_CALL_LIST = [ + ((['ifup', 'eth0'], ), {}), + ((['ifup', 'eth1'], ), {}), +] + +NETWORK_MANAGER_BRING_UP_CALL_LIST = [ + ((['nmcli', 'connection', 'up', 'ifname', 'eth0'], ), {}), + ((['nmcli', 'connection', 'up', 'ifname', 'eth1'], ), {}), +] + + +@pytest.mark.parametrize('activator, expected_call_list', [ + (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST), + (NetplanActivator, NETPLAN_CALL_LIST), + (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST), +]) +class TestActivatorsBringUp: @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_interface( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): activator.bring_up_interface('eth0') assert len(m_subp.call_args_list) == 1 @@ -150,16 +157,14 @@ class TestIfUpDownActivator: @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_interfaces( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): activator.bring_up_interfaces(['eth0', 'eth1']) assert expected_call_list == m_subp.call_args_list @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_all_interfaces_v1( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): network_state = parse_net_config_data(load(V1_CONFIG)) activator.bring_up_all_interfaces(network_state) @@ -168,10 +173,60 @@ class TestIfUpDownActivator: @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_all_interfaces_v2( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): network_state = parse_net_config_data(load(V2_CONFIG)) activator.bring_up_all_interfaces(network_state) for call in m_subp.call_args_list: assert call in expected_call_list + + +IF_UP_DOWN_BRING_DOWN_CALL_LIST = [ + ((['ifdown', 'eth0'], ), {}), + ((['ifdown', 'eth1'], ), {}), +] + +NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [ + ((['nmcli', 'connection', 'down', 'eth0'], ), {}), + ((['nmcli', 'connection', 'down', 'eth1'], ), {}), +] + + +@pytest.mark.parametrize('activator, expected_call_list', [ + (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST), + (NetplanActivator, NETPLAN_CALL_LIST), + (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST), +]) +class TestActivatorsBringDown: + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_interface( + self, m_subp, activator, expected_call_list, available_mocks + ): + activator.bring_down_interface('eth0') + assert len(m_subp.call_args_list) == 1 + assert m_subp.call_args_list[0] == expected_call_list[0] + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_interfaces( + self, m_subp, activator, expected_call_list, available_mocks + ): + activator.bring_down_interfaces(['eth0', 'eth1']) + assert expected_call_list == m_subp.call_args_list + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_all_interfaces_v1( + self, m_subp, activator, expected_call_list, available_mocks + ): + network_state = parse_net_config_data(load(V1_CONFIG)) + activator.bring_down_all_interfaces(network_state) + for call in m_subp.call_args_list: + assert call in expected_call_list + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_all_interfaces_v2( + self, m_subp, activator, expected_call_list, available_mocks + ): + network_state = parse_net_config_data(load(V2_CONFIG)) + activator.bring_down_all_interfaces(network_state) + for call in m_subp.call_args_list: + assert call in expected_call_list diff --git a/tools/hook-hotplug b/tools/hook-hotplug new file mode 100755 index 00000000..34e95929 --- /dev/null +++ b/tools/hook-hotplug @@ -0,0 +1,21 @@ +#!/bin/bash +# This file is part of cloud-init. See LICENSE file for license information. + +# This script checks if cloud-init has hotplug hooked and if +# cloud-init has finished; if so invoke cloud-init hotplug-hook + +is_finished() { + [ -e /run/cloud-init/result.json ] +} + +if is_finished; then + # open cloud-init's hotplug-hook fifo rw + exec 3<>/run/cloud-init/hook-hotplug-cmd + env_params=( + --devpath="${DEVPATH}" + --subsystem="${SUBSYSTEM}" + --udevaction="${ACTION}" + ) + # write params to cloud-init's hotplug-hook fifo + echo "${env_params[@]}" >&3 +fi diff --git a/udev/10-cloud-init-hook-hotplug.rules b/udev/10-cloud-init-hook-hotplug.rules new file mode 100644 index 00000000..2e382679 --- /dev/null +++ b/udev/10-cloud-init-hook-hotplug.rules @@ -0,0 +1,6 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# Handle device adds only +ACTION!="add|remove", GOTO="cloudinit_end" +LABEL="cloudinit_hook" +SUBSYSTEM=="net|block", RUN+="/usr/lib/cloud-init/hook-hotplug" +LABEL="cloudinit_end" -- cgit v1.2.3 From 824977bd58bae601600682f134bfec00b0c69bbd Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 29 Jul 2021 12:29:46 -0500 Subject: testing: fix test_ssh_import_id.py (#954) test_ssh_import_id.py occassionally fails because cloud-init finishes before the keys have been fully imported. A retry has been added to the test. --- .../modules/test_ssh_import_id.py | 6 +++++ tests/integration_tests/util.py | 30 ++++++++++++++++++++++ 2 files changed, 36 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py index 3db573b5..b90fe95f 100644 --- a/tests/integration_tests/modules/test_ssh_import_id.py +++ b/tests/integration_tests/modules/test_ssh_import_id.py @@ -12,6 +12,7 @@ TODO: import pytest +from tests.integration_tests.util import retry USER_DATA = """\ #cloud-config @@ -26,6 +27,11 @@ ssh_import_id: class TestSshImportId: @pytest.mark.user_data(USER_DATA) + # Retry is needed here because ssh import id is one of the last modules + # run, and it fires off a web request, then continues with the rest of + # cloud-init. It is possible cloud-init's status is "done" before the + # id's have been fully imported. + @retry(tries=30, delay=1) def test_ssh_import_id(self, client): ssh_output = client.read_from_file( "/home/ubuntu/.ssh/authorized_keys") diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index ce62ffc8..80430eab 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -1,3 +1,4 @@ +import functools import logging import multiprocessing import os @@ -64,3 +65,32 @@ def get_test_rsa_keypair(key_name: str = 'test1') -> key_pair: with private_key_path.open() as private_file: private_key = private_file.read() return key_pair(public_key, private_key) + + +def retry(*, tries: int = 30, delay: int = 1): + """Decorator for retries. + + Retry a function until code no longer raises an exception or + max tries is reached. + + Example: + @retry(tries=5, delay=1) + def try_something_that_may_not_be_ready(): + ... + """ + def _retry(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + last_error = None + for _ in range(tries): + try: + func(*args, **kwargs) + break + except Exception as e: + last_error = e + time.sleep(delay) + else: + if last_error: + raise last_error + return wrapper + return _retry -- cgit v1.2.3 From 758acf976f2cb67a85411467fa5fca2ea17a2283 Mon Sep 17 00:00:00 2001 From: Mal Graty Date: Tue, 3 Aug 2021 17:08:26 +0100 Subject: Implementing device_aliases as described in docs (#945) Implement missing device_aliases feature The device_aliases key has been documented as part of disk_setup for years, however the feature was never implemented. This implements the feature as documented allowing usercfg (rather than dsconfig) to create a mapping of device names. This is not to be confused with disk_aliases, a very similar map but existing solely for use by datasources. LP: #1867532 --- cloudinit/config/cc_disk_setup.py | 13 +- cloudinit/config/cc_mounts.py | 17 +- tests/integration_tests/bugs/test_lp1920939.py | 140 --------------- tests/integration_tests/modules/test_disk_setup.py | 192 +++++++++++++++++++++ .../unittests/test_handler/test_handler_mounts.py | 9 + tools/.github-cla-signers | 1 + 6 files changed, 225 insertions(+), 147 deletions(-) delete mode 100644 tests/integration_tests/bugs/test_lp1920939.py create mode 100644 tests/integration_tests/modules/test_disk_setup.py (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 22af3813..3ec49ca5 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -125,9 +125,15 @@ def handle(_name, cfg, cloud, log, _args): See doc/examples/cloud-config-disk-setup.txt for documentation on the format. """ + device_aliases = cfg.get("device_aliases", {}) + + def alias_to_device(cand): + name = device_aliases.get(cand) + return cloud.device_name_to_device(name or cand) or name + disk_setup = cfg.get("disk_setup") if isinstance(disk_setup, dict): - update_disk_setup_devices(disk_setup, cloud.device_name_to_device) + update_disk_setup_devices(disk_setup, alias_to_device) log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): @@ -145,7 +151,7 @@ def handle(_name, cfg, cloud, log, _args): fs_setup = cfg.get("fs_setup") if isinstance(fs_setup, list): log.debug("setting up filesystems: %s", str(fs_setup)) - update_fs_setup_devices(fs_setup, cloud.device_name_to_device) + update_fs_setup_devices(fs_setup, alias_to_device) for definition in fs_setup: if not isinstance(definition, dict): log.warning("Invalid file system definition: %s" % definition) @@ -174,7 +180,8 @@ def update_disk_setup_devices(disk_setup, tformer): del disk_setup[transformed] disk_setup[transformed] = disk_setup[origname] - disk_setup[transformed]['_origname'] = origname + if isinstance(disk_setup[transformed], dict): + disk_setup[transformed]['_origname'] = origname del disk_setup[origname] LOG.debug("updated disk_setup device entry '%s' to '%s'", origname, transformed) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index c22d1698..eeb008d2 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -123,7 +123,7 @@ def _is_block_device(device_path, partition_path=None): return os.path.exists(sys_path) -def sanitize_devname(startname, transformer, log): +def sanitize_devname(startname, transformer, log, aliases=None): log.debug("Attempting to determine the real name of %s", startname) # workaround, allow user to specify 'ephemeral' @@ -137,9 +137,14 @@ def sanitize_devname(startname, transformer, log): return startname device_path, partition_number = util.expand_dotted_devname(devname) + orig = device_path + + if aliases: + device_path = aliases.get(device_path, device_path) + if orig != device_path: + log.debug("Mapped device alias %s to %s", orig, device_path) if is_meta_device_name(device_path): - orig = device_path device_path = transformer(device_path) if not device_path: return None @@ -394,6 +399,8 @@ def handle(_name, cfg, cloud, log, _args): fstab_devs[toks[0]] = line fstab_lines.append(line) + device_aliases = cfg.get("device_aliases", {}) + for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): @@ -402,7 +409,8 @@ def handle(_name, cfg, cloud, log, _args): continue start = str(cfgmnt[i][0]) - sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + sanitized = sanitize_devname(start, cloud.device_name_to_device, log, + aliases=device_aliases) if sanitized != start: log.debug("changed %s => %s" % (start, sanitized)) @@ -444,7 +452,8 @@ def handle(_name, cfg, cloud, log, _args): # entry has the same device name for defmnt in defmnts: start = defmnt[0] - sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + sanitized = sanitize_devname(start, cloud.device_name_to_device, log, + aliases=device_aliases) if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) diff --git a/tests/integration_tests/bugs/test_lp1920939.py b/tests/integration_tests/bugs/test_lp1920939.py deleted file mode 100644 index 408792a6..00000000 --- a/tests/integration_tests/bugs/test_lp1920939.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -Test that disk setup can run successfully on a mounted partition when -partprobe is being used. - -lp-1920939 -""" -import json -import os -import pytest -from uuid import uuid4 -from pycloudlib.lxd.instance import LXDInstance - -from cloudinit.subp import subp -from tests.integration_tests.instances import IntegrationInstance - -DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) - - -def setup_and_mount_lxd_disk(instance: LXDInstance): - subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( - instance.name, DISK_PATH).split()) - - -@pytest.yield_fixture -def create_disk(): - # 640k should be enough for anybody - subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split()) - yield - os.remove(DISK_PATH) - - -USERDATA = """\ -#cloud-config -disk_setup: - /dev/sdb: - table_type: mbr - layout: [50, 50] - overwrite: True -fs_setup: - - label: test - device: /dev/sdb1 - filesystem: ext4 - - label: test2 - device: /dev/sdb2 - filesystem: ext4 -mounts: -- ["/dev/sdb1", "/mnt1"] -- ["/dev/sdb2", "/mnt2"] -""" - -UPDATED_USERDATA = """\ -#cloud-config -disk_setup: - /dev/sdb: - table_type: mbr - layout: [100] - overwrite: True -fs_setup: - - label: test3 - device: /dev/sdb1 - filesystem: ext4 -mounts: -- ["/dev/sdb1", "/mnt3"] -""" - - -def _verify_first_disk_setup(client, log): - assert 'Traceback' not in log - assert 'WARN' not in log - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 2 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['children'][0]['mountpoint'] == '/mnt1' - assert sdb['children'][1]['name'] == 'sdb2' - assert sdb['children'][1]['mountpoint'] == '/mnt2' - - -@pytest.mark.user_data(USERDATA) -@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) -@pytest.mark.ubuntu -@pytest.mark.lxd_vm -# Not bionic or xenial because the LXD agent gets in the way of us -# changing the userdata -@pytest.mark.not_bionic -@pytest.mark.not_xenial -def test_disk_setup_when_mounted(create_disk, client: IntegrationInstance): - """Test lp-1920939. - - We insert an extra disk into our VM, format it to have two partitions, - modify our cloud config to mount devices before disk setup, and modify - our userdata to setup a single partition on the disk. - - This allows cloud-init to attempt disk setup on a mounted partition. - When blockdev is in use, it will fail with - "blockdev: ioctl error on BLKRRPART: Device or resource busy" along - with a warning and a traceback. When partprobe is in use, everything - should work successfully. - """ - log = client.read_from_file('/var/log/cloud-init.log') - _verify_first_disk_setup(client, log) - - # Update our userdata and cloud.cfg to mount then perform new disk setup - client.write_to_file( - '/var/lib/cloud/seed/nocloud-net/user-data', - UPDATED_USERDATA - ) - client.execute("sed -i 's/write-files/write-files\\n - mounts/' " - "/etc/cloud/cloud.cfg") - - client.execute('cloud-init clean --logs') - client.restart() - - # Assert new setup works as expected - assert 'Traceback' not in log - assert 'WARN' not in log - - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 1 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['children'][0]['mountpoint'] == '/mnt3' - - -@pytest.mark.user_data(USERDATA) -@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) -@pytest.mark.ubuntu -@pytest.mark.lxd_vm -def test_disk_setup_no_partprobe(create_disk, client: IntegrationInstance): - """Ensure disk setup still works as expected without partprobe.""" - # We can't do this part in a bootcmd because the path has already - # been found by the time we get to the bootcmd - client.execute('rm $(which partprobe)') - client.execute('cloud-init clean --logs') - client.restart() - - log = client.read_from_file('/var/log/cloud-init.log') - _verify_first_disk_setup(client, log) - - assert 'partprobe' not in log diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py new file mode 100644 index 00000000..1fc96c52 --- /dev/null +++ b/tests/integration_tests/modules/test_disk_setup.py @@ -0,0 +1,192 @@ +import json +import os +import pytest +from uuid import uuid4 +from pycloudlib.lxd.instance import LXDInstance + +from cloudinit.subp import subp +from tests.integration_tests.instances import IntegrationInstance + +DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) + + +def setup_and_mount_lxd_disk(instance: LXDInstance): + subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( + instance.name, DISK_PATH).split()) + + +@pytest.yield_fixture +def create_disk(): + # 640k should be enough for anybody + subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split()) + yield + os.remove(DISK_PATH) + + +ALIAS_USERDATA = """\ +#cloud-config +device_aliases: + my_alias: /dev/sdb +disk_setup: + my_alias: + table_type: mbr + layout: [50, 50] + overwrite: True +fs_setup: +- label: fs1 + device: my_alias.1 + filesystem: ext4 +- label: fs2 + device: my_alias.2 + filesystem: ext4 +mounts: +- ["my_alias.1", "/mnt1"] +- ["my_alias.2", "/mnt2"] +""" + + +@pytest.mark.user_data(ALIAS_USERDATA) +@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) +@pytest.mark.ubuntu +@pytest.mark.lxd_vm +class TestDeviceAliases: + """Test devices aliases work on disk setup/mount""" + + def test_device_alias(self, create_disk, client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert ( + "updated disk_setup device entry 'my_alias' to '/dev/sdb'" + ) in log + assert 'changed my_alias.1 => /dev/sdb1' in log + assert 'changed my_alias.2 => /dev/sdb2' in log + assert 'WARN' not in log + assert 'Traceback' not in log + + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 2 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt1' + assert sdb['children'][1]['name'] == 'sdb2' + assert sdb['children'][1]['mountpoint'] == '/mnt2' + + +PARTPROBE_USERDATA = """\ +#cloud-config +disk_setup: + /dev/sdb: + table_type: mbr + layout: [50, 50] + overwrite: True +fs_setup: + - label: test + device: /dev/sdb1 + filesystem: ext4 + - label: test2 + device: /dev/sdb2 + filesystem: ext4 +mounts: +- ["/dev/sdb1", "/mnt1"] +- ["/dev/sdb2", "/mnt2"] +""" + +UPDATED_PARTPROBE_USERDATA = """\ +#cloud-config +disk_setup: + /dev/sdb: + table_type: mbr + layout: [100] + overwrite: True +fs_setup: + - label: test3 + device: /dev/sdb1 + filesystem: ext4 +mounts: +- ["/dev/sdb1", "/mnt3"] +""" + + +@pytest.mark.user_data(PARTPROBE_USERDATA) +@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) +@pytest.mark.ubuntu +@pytest.mark.lxd_vm +class TestPartProbeAvailability: + """Test disk setup works with partprobe + + Disk setup can run successfully on a mounted partition when + partprobe is being used. + + lp-1920939 + """ + + def _verify_first_disk_setup(self, client, log): + assert 'Traceback' not in log + assert 'WARN' not in log + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 2 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt1' + assert sdb['children'][1]['name'] == 'sdb2' + assert sdb['children'][1]['mountpoint'] == '/mnt2' + + # Not bionic or xenial because the LXD agent gets in the way of us + # changing the userdata + @pytest.mark.not_bionic + @pytest.mark.not_xenial + def test_disk_setup_when_mounted( + self, create_disk, client: IntegrationInstance + ): + """Test lp-1920939. + + We insert an extra disk into our VM, format it to have two partitions, + modify our cloud config to mount devices before disk setup, and modify + our userdata to setup a single partition on the disk. + + This allows cloud-init to attempt disk setup on a mounted partition. + When blockdev is in use, it will fail with + "blockdev: ioctl error on BLKRRPART: Device or resource busy" along + with a warning and a traceback. When partprobe is in use, everything + should work successfully. + """ + log = client.read_from_file('/var/log/cloud-init.log') + self._verify_first_disk_setup(client, log) + + # Update our userdata and cloud.cfg to mount then perform new disk + # setup + client.write_to_file( + '/var/lib/cloud/seed/nocloud-net/user-data', + UPDATED_PARTPROBE_USERDATA, + ) + client.execute( + "sed -i 's/write-files/write-files\\n - mounts/' " + "/etc/cloud/cloud.cfg" + ) + + client.execute('cloud-init clean --logs') + client.restart() + + # Assert new setup works as expected + assert 'Traceback' not in log + assert 'WARN' not in log + + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 1 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt3' + + def test_disk_setup_no_partprobe( + self, create_disk, client: IntegrationInstance + ): + """Ensure disk setup still works as expected without partprobe.""" + # We can't do this part in a bootcmd because the path has already + # been found by the time we get to the bootcmd + client.execute('rm $(which partprobe)') + client.execute('cloud-init clean --logs') + client.restart() + + log = client.read_from_file('/var/log/cloud-init.log') + self._verify_first_disk_setup(client, log) + + assert 'partprobe' not in log diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index e87069f6..69e8b30d 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -133,6 +133,15 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): disk_path, cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) + def test_device_aliases_remapping(self): + disk_path = '/dev/sda' + self.mock_existence_of_disk(disk_path) + self.assertEqual(disk_path, + cc_mounts.sanitize_devname('mydata', + lambda x: None, + mock.Mock(), + {'mydata': disk_path})) + class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index e2979ed4..3c2c6d14 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -32,6 +32,7 @@ klausenbusk landon912 lucasmoura lungj +mal mamercad manuelisimo marlluslustosa -- cgit v1.2.3 From 13b6a8575f813699d406f5cab3424c2beffba26f Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 6 Aug 2021 17:36:21 -0500 Subject: testing: port remaining cloud tests to integration testing framework (SC-191) (#955) This should enable us to remove the cloud-tests entirely. --- tests/integration_tests/modules/test_combined.py | 175 +++++++++++++++++++++ .../modules/test_command_output.py | 23 +++ .../integration_tests/modules/test_ntp_servers.py | 89 +++++++++-- tests/integration_tests/modules/test_snap.py | 2 +- 4 files changed, 278 insertions(+), 11 deletions(-) create mode 100644 tests/integration_tests/modules/test_combined.py create mode 100644 tests/integration_tests/modules/test_command_output.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py new file mode 100644 index 00000000..97b59558 --- /dev/null +++ b/tests/integration_tests/modules/test_combined.py @@ -0,0 +1,175 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""A set of somewhat unrelated tests that can be combined into a single +instance launch. Generally tests should only be added here if a failure +of the test would be unlikely to affect the running of another test using +the same instance launch. Most independent module coherence tests can go +here. +""" +import json +import pytest +import re +from datetime import date + +from tests.integration_tests.clouds import ImageSpecification +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import verify_ordered_items_in_text + +USER_DATA = """\ +#cloud-config +apt: + primary: + - arches: [default] + uri: http://us.archive.ubuntu.com/ubuntu/ +byobu_by_default: enable +final_message: | + This is my final message! + $version + $timestamp + $datasource + $uptime +locale: en_GB.UTF-8 +locale_configfile: /etc/default/locale +ntp: + servers: ['ntp.ubuntu.com'] +""" + + +@pytest.mark.ci +@pytest.mark.user_data(USER_DATA) +class TestCombined: + def test_final_message(self, class_client: IntegrationInstance): + """Test that final_message module works as expected. + + Also tests LP 1511485: final_message is silent + """ + client = class_client + log = client.read_from_file('/var/log/cloud-init.log') + today = date.today().strftime('%a, %d %b %Y') + expected = ( + 'This is my final message!\n' + r'\d+\.\d+.*\n' + '{}.*\n' + 'DataSource.*\n' + r'\d+\.\d+' + ).format(today) + + assert re.search(expected, log) + + def test_ntp_with_apt(self, class_client: IntegrationInstance): + """LP #1628337. + + cloud-init tries to install NTP before even + configuring the archives. + """ + client = class_client + log = client.read_from_file('/var/log/cloud-init.log') + assert 'W: Failed to fetch' not in log + assert 'W: Some index files failed to download' not in log + assert 'E: Unable to locate package ntp' not in log + + def test_byobu(self, class_client: IntegrationInstance): + """Test byobu configured as enabled by default.""" + client = class_client + assert client.execute('test -e "/etc/byobu/autolaunch"').ok + + def test_configured_locale(self, class_client: IntegrationInstance): + """Test locale can be configured correctly.""" + client = class_client + default_locale = client.read_from_file('/etc/default/locale') + assert 'LANG=en_GB.UTF-8' in default_locale + + locale_a = client.execute('locale -a') + verify_ordered_items_in_text([ + 'en_GB.utf8', + 'en_US.utf8' + ], locale_a) + + locale_gen = client.execute( + "cat /etc/locale.gen | grep -v '^#' | uniq" + ) + verify_ordered_items_in_text([ + 'en_GB.UTF-8', + 'en_US.UTF-8' + ], locale_gen) + + def test_no_problems(self, class_client: IntegrationInstance): + """Test no errors, warnings, or tracebacks""" + client = class_client + status_file = client.read_from_file('/run/cloud-init/status.json') + status_json = json.loads(status_file)['v1'] + for stage in ('init', 'init-local', 'modules-config', 'modules-final'): + assert status_json[stage]['errors'] == [] + result_file = client.read_from_file('/run/cloud-init/result.json') + result_json = json.loads(result_file)['v1'] + assert result_json['errors'] == [] + + log = client.read_from_file('/var/log/cloud-init.log') + assert 'WARN' not in log + assert 'Traceback' not in log + + def _check_common_metadata(self, data): + assert data['base64_encoded_keys'] == [] + assert data['merged_cfg'] == 'redacted for non-root user' + + image_spec = ImageSpecification.from_os_image() + assert data['sys_info']['dist'][0] == image_spec.os + + v1_data = data['v1'] + assert re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']) + assert v1_data['variant'] == image_spec.os + assert v1_data['distro'] == image_spec.os + assert v1_data['distro_release'] == image_spec.release + assert v1_data['machine'] == 'x86_64' + assert re.match(r'3.\d\.\d', v1_data['python_version']) + + @pytest.mark.lxd_container + def test_instance_json_lxd(self, class_client: IntegrationInstance): + client = class_client + instance_json_file = client.read_from_file( + '/run/cloud-init/instance-data.json') + + data = json.loads(instance_json_file) + self._check_common_metadata(data) + v1_data = data['v1'] + assert v1_data['cloud_name'] == 'unknown' + assert v1_data['platform'] == 'lxd' + assert v1_data['subplatform'] == ( + 'seed-dir (/var/lib/cloud/seed/nocloud-net)') + assert v1_data['availability_zone'] is None + assert v1_data['instance_id'] == client.instance.name + assert v1_data['local_hostname'] == client.instance.name + assert v1_data['region'] is None + + @pytest.mark.lxd_vm + def test_instance_json_lxd_vm(self, class_client: IntegrationInstance): + client = class_client + instance_json_file = client.read_from_file( + '/run/cloud-init/instance-data.json') + + data = json.loads(instance_json_file) + self._check_common_metadata(data) + v1_data = data['v1'] + assert v1_data['cloud_name'] == 'unknown' + assert v1_data['platform'] == 'lxd' + assert v1_data['subplatform'] == ( + 'seed-dir (/var/lib/cloud/seed/nocloud-net)') + assert v1_data['availability_zone'] is None + assert v1_data['instance_id'] == client.instance.name + assert v1_data['local_hostname'] == client.instance.name + assert v1_data['region'] is None + + @pytest.mark.ec2 + def test_instance_json_ec2(self, class_client: IntegrationInstance): + client = class_client + instance_json_file = client.read_from_file( + '/run/cloud-init/instance-data.json') + data = json.loads(instance_json_file) + v1_data = data['v1'] + assert v1_data['cloud_name'] == 'aws' + assert v1_data['platform'] == 'ec2' + assert v1_data['subplatform'].startswith('metadata') + assert v1_data[ + 'availability_zone'] == client.instance.availability_zone + assert v1_data['instance_id'] == client.instance.name + assert v1_data['local_hostname'].startswith('ip-') + assert v1_data['region'] == client.cloud.cloud_instance.region diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py new file mode 100644 index 00000000..15033642 --- /dev/null +++ b/tests/integration_tests/modules/test_command_output.py @@ -0,0 +1,23 @@ +"""Integration test for output redirection. + +This test redirects the output of a command to a file and then checks the file. + +(This is ported from +``tests/cloud_tests/testcases/main/command_output_simple.yaml``.)""" +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +USER_DATA = """\ +#cloud-config +output: { all: "| tee -a /var/log/cloud-init-test-output" } +final_message: "should be last line in cloud-init-test-output file" +""" + + +@pytest.mark.ci +@pytest.mark.user_data(USER_DATA) +def test_runcmd(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init-test-output') + assert 'should be last line in cloud-init-test-output file' in log diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py index e72389c1..7a799139 100644 --- a/tests/integration_tests/modules/test_ntp_servers.py +++ b/tests/integration_tests/modules/test_ntp_servers.py @@ -1,15 +1,19 @@ -"""Integration test for the ntp module's ``servers`` functionality with ntp. +"""Integration test for the ntp module's ntp functionality. This test specifies the use of the `ntp` NTP client, and ensures that the given NTP servers are configured as expected. -(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``.) +(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``, +``tests/cloud_tests/testcases/modules/ntp_pools.yaml``, +and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``) """ import re import yaml import pytest +from tests.integration_tests.instances import IntegrationInstance + USER_DATA = """\ #cloud-config ntp: @@ -17,21 +21,26 @@ ntp: servers: - 172.16.15.14 - 172.16.17.18 + pools: + - 0.cloud-init.mypool + - 1.cloud-init.mypool + - 172.16.15.15 """ EXPECTED_SERVERS = yaml.safe_load(USER_DATA)["ntp"]["servers"] +EXPECTED_POOLS = yaml.safe_load(USER_DATA)["ntp"]["pools"] @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestNtpServers: - def test_ntp_installed(self, class_client): + def test_ntp_installed(self, class_client: IntegrationInstance): """Test that `ntpd --version` succeeds, indicating installation.""" - result = class_client.execute("ntpd --version") - assert 0 == result.return_code + assert class_client.execute("ntpd --version").ok - def test_dist_config_file_is_empty(self, class_client): + def test_dist_config_file_is_empty(self, + class_client: IntegrationInstance): """Test that the distributed config file is empty. (This test is skipped on all currently supported Ubuntu releases, so @@ -42,7 +51,7 @@ class TestNtpServers: dist_file = class_client.read_from_file("/etc/ntp.conf.dist") assert 0 == len(dist_file.strip().splitlines()) - def test_ntp_entries(self, class_client): + def test_ntp_entries(self, class_client: IntegrationInstance): ntp_conf = class_client.read_from_file("/etc/ntp.conf") for expected_server in EXPECTED_SERVERS: assert re.search( @@ -50,9 +59,69 @@ class TestNtpServers: ntp_conf, re.MULTILINE ) + for expected_pool in EXPECTED_POOLS: + assert re.search( + r"^pool {} iburst".format(expected_pool), + ntp_conf, + re.MULTILINE + ) - def test_ntpq_servers(self, class_client): + def test_ntpq_servers(self, class_client: IntegrationInstance): result = class_client.execute("ntpq -p -w -n") assert result.ok - for expected_server in EXPECTED_SERVERS: - assert expected_server in result.stdout + for expected_server_or_pool in [*EXPECTED_SERVERS, *EXPECTED_POOLS]: + assert expected_server_or_pool in result.stdout + + +CHRONY_DATA = """\ +#cloud-config +ntp: + enabled: true + ntp_client: chrony +""" + + +@pytest.mark.ci +@pytest.mark.user_data(CHRONY_DATA) +def test_chrony(client: IntegrationInstance): + if client.execute('test -f /etc/chrony.conf').ok: + chrony_conf = '/etc/chrony.conf' + else: + chrony_conf = '/etc/chrony/chrony.conf' + contents = client.read_from_file(chrony_conf) + assert '.pool.ntp.org' in contents + + +TIMESYNCD_DATA = """\ +#cloud-config +ntp: + enabled: true + ntp_client: systemd-timesyncd +""" + + +@pytest.mark.ci +@pytest.mark.user_data(TIMESYNCD_DATA) +def test_timesyncd(client: IntegrationInstance): + contents = client.read_from_file( + '/etc/systemd/timesyncd.conf.d/cloud-init.conf' + ) + assert '.pool.ntp.org' in contents + + +EMPTY_NTP = """\ +#cloud-config +ntp: + ntp_client: ntp + pools: [] + servers: [] +""" + + +@pytest.mark.user_data(EMPTY_NTP) +def test_empty_ntp(client: IntegrationInstance): + assert client.execute('ntpd --version').ok + assert client.execute('test -f /etc/ntp.conf.dist').failed + assert 'pool.ntp.org iburst' in client.execute( + 'grep -v "^#" /etc/ntp.conf' + ) diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py index 481edbaa..652efa68 100644 --- a/tests/integration_tests/modules/test_snap.py +++ b/tests/integration_tests/modules/test_snap.py @@ -4,7 +4,7 @@ This test specifies a command to be executed by the ``snap`` module and then checks that if that command was executed during boot. (This is ported from -``tests/cloud_tests/testcases/modules/runcmd.yaml``.)""" +``tests/cloud_tests/testcases/modules/snap.yaml``.)""" import pytest -- cgit v1.2.3 From 65607405aed2fb5e7797bb181dc947025c10f346 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Aug 2021 15:34:16 -0500 Subject: Only invoke hotplug socket when functionality is enabled (#952) Alters hotplug hook to have a query mechanism checking if the functionality is enabled. This allows us to avoid using the hotplug socket and service when hotplug is disabled. --- cloudinit/cmd/devel/hotplug_hook.py | 123 ++++++++++++++++-------- cloudinit/sources/__init__.py | 18 ++-- tests/integration_tests/modules/test_hotplug.py | 14 ++- tools/hook-hotplug | 9 +- 4 files changed, 112 insertions(+), 52 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index 0282f24a..a0058f03 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -3,6 +3,7 @@ import abc import argparse import os +import sys import time from cloudinit import log @@ -12,7 +13,7 @@ from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events from cloudinit.stages import Init -from cloudinit.sources import DataSource +from cloudinit.sources import DataSource, DataSourceNotFoundException LOG = log.getLogger(__name__) @@ -31,15 +32,35 @@ def get_parser(parser=None): parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.description = __doc__ - parser.add_argument("-d", "--devpath", required=True, - metavar="PATH", - help="sysfs path to hotplugged device") - parser.add_argument("-s", "--subsystem", required=True, - help="subsystem to act on", - choices=['net']) - parser.add_argument("-u", "--udevaction", required=True, - help="action to take", - choices=['add', 'remove']) + parser.add_argument( + "-s", "--subsystem", required=True, + help="subsystem to act on", + choices=['net'] + ) + + subparsers = parser.add_subparsers( + title='Hotplug Action', + dest='hotplug_action' + ) + subparsers.required = True + + subparsers.add_parser( + 'query', + help='query if hotplug is enabled for given subsystem' + ) + + parser_handle = subparsers.add_parser( + 'handle', help='handle the hotplug event') + parser_handle.add_argument( + "-d", "--devpath", required=True, + metavar="PATH", + help="sysfs path to hotplugged device" + ) + parser_handle.add_argument( + "-u", "--udevaction", required=True, + help="action to take", + choices=['add', 'remove'] + ) return parser @@ -133,27 +154,42 @@ SUBSYSTEM_PROPERTES_MAP = { } -def handle_hotplug( - hotplug_init: Init, devpath, subsystem, udevaction -): - handler_cls, event_scope = SUBSYSTEM_PROPERTES_MAP.get( - subsystem, (None, None) - ) - if handler_cls is None: +def is_enabled(hotplug_init, subsystem): + try: + scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] + except KeyError as e: raise Exception( 'hotplug-hook: cannot handle events for subsystem: {}'.format( - subsystem)) + subsystem) + ) from e + + return hotplug_init.update_event_enabled( + event_source_type=EventType.HOTPLUG, + scope=scope + ) + +def initialize_datasource(hotplug_init, subsystem): LOG.debug('Fetching datasource') datasource = hotplug_init.fetch(existing="trust") - if not hotplug_init.update_event_enabled( - event_source_type=EventType.HOTPLUG, - scope=EventScope.NETWORK - ): - LOG.debug('hotplug not enabled for event of type %s', event_scope) + if not datasource.get_supported_events([EventType.HOTPLUG]): + LOG.debug('hotplug not supported for event of type %s', subsystem) return + if not is_enabled(hotplug_init, subsystem): + LOG.debug('hotplug not enabled for event of type %s', subsystem) + return + return datasource + + +def handle_hotplug( + hotplug_init: Init, devpath, subsystem, udevaction +): + datasource = initialize_datasource(hotplug_init, subsystem) + if not datasource: + return + handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] LOG.debug('Creating %s event handler', subsystem) event_handler = handler_cls( datasource=datasource, @@ -200,29 +236,36 @@ def handle_args(name, args): log.setupLogging(hotplug_init.cfg) if 'reporting' in hotplug_init.cfg: reporting.update_configuration(hotplug_init.cfg.get('reporting')) - # Logging isn't going to be setup until now LOG.debug( - '%s called with the following arguments: {udevaction: %s, ' - 'subsystem: %s, devpath: %s}', - name, args.udevaction, args.subsystem, args.devpath - ) - LOG.debug( - '%s called with the following arguments:\n' - 'udevaction: %s\n' - 'subsystem: %s\n' - 'devpath: %s', - name, args.udevaction, args.subsystem, args.devpath + '%s called with the following arguments: {' + 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}', + name, + args.hotplug_action, + args.subsystem, + args.udevaction if 'udevaction' in args else None, + args.devpath if 'devpath' in args else None, ) with hotplug_reporter: try: - handle_hotplug( - hotplug_init=hotplug_init, - devpath=args.devpath, - subsystem=args.subsystem, - udevaction=args.udevaction, - ) + if args.hotplug_action == 'query': + try: + datasource = initialize_datasource( + hotplug_init, args.subsystem) + except DataSourceNotFoundException: + print( + "Unable to determine hotplug state. No datasource " + "detected") + sys.exit(1) + print('enabled' if datasource else 'disabled') + else: + handle_hotplug( + hotplug_init=hotplug_init, + devpath=args.devpath, + subsystem=args.subsystem, + udevaction=args.udevaction, + ) except Exception: LOG.exception('Received fatal exception handling hotplug!') raise diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index bf6bf139..cc7e1c3c 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -679,6 +679,16 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): def get_package_mirror_info(self): return self.distro.get_package_mirror_info(data_source=self) + def get_supported_events(self, source_event_types: List[EventType]): + supported_events = {} # type: Dict[EventScope, set] + for event in source_event_types: + for update_scope, update_events in self.supported_update_events.items(): # noqa: E501 + if event in update_events: + if not supported_events.get(update_scope): + supported_events[update_scope] = set() + supported_events[update_scope].add(event) + return supported_events + def update_metadata_if_supported( self, source_event_types: List[EventType] ) -> bool: @@ -694,13 +704,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): @return True if the datasource did successfully update cached metadata due to source_event_type. """ - supported_events = {} # type: Dict[EventScope, set] - for event in source_event_types: - for update_scope, update_events in self.supported_update_events.items(): # noqa: E501 - if event in update_events: - if not supported_events.get(update_scope): - supported_events[update_scope] = set() - supported_events[update_scope].add(event) + supported_events = self.get_supported_events(source_event_types) for scope, matched_events in supported_events.items(): LOG.debug( "Update datasource metadata and %s config due to events: %s", diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index b683566f..a42d1c8c 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -48,7 +48,7 @@ def test_hotplug_add_remove(client: IntegrationInstance): # Add new NIC added_ip = client.instance.add_network_interface() - _wait_till_hotplug_complete(client) + _wait_till_hotplug_complete(client, expected_runs=2) ips_after_add = _get_ip_addr(client) new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0] @@ -63,7 +63,7 @@ def test_hotplug_add_remove(client: IntegrationInstance): # Remove new NIC client.instance.remove_network_interface(added_ip) - _wait_till_hotplug_complete(client, expected_runs=2) + _wait_till_hotplug_complete(client, expected_runs=4) ips_after_remove = _get_ip_addr(client) assert len(ips_after_remove) == len(ips_before) assert added_ip not in [ip.ip4 for ip in ips_after_remove] @@ -72,6 +72,10 @@ def test_hotplug_add_remove(client: IntegrationInstance): config = yaml.safe_load(netplan_cfg) assert new_addition.interface not in config['network']['ethernets'] + assert 'enabled' == client.execute( + 'cloud-init devel hotplug-hook -s net query' + ) + @pytest.mark.openstack def test_no_hotplug_in_userdata(client: IntegrationInstance): @@ -83,7 +87,7 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance): client.instance.add_network_interface() _wait_till_hotplug_complete(client) log = client.read_from_file('/var/log/cloud-init.log') - assert 'hotplug not enabled for event of type network' in log + assert "Event Denied: scopes=['network'] EventType=hotplug" in log ips_after_add = _get_ip_addr(client) if len(ips_after_add) == len(ips_before) + 1: @@ -92,3 +96,7 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance): assert new_ip.state == 'DOWN' else: assert len(ips_after_add) == len(ips_before) + + assert 'disabled' == client.execute( + 'cloud-init devel hotplug-hook -s net query' + ) diff --git a/tools/hook-hotplug b/tools/hook-hotplug index 34e95929..ced268b3 100755 --- a/tools/hook-hotplug +++ b/tools/hook-hotplug @@ -8,12 +8,17 @@ is_finished() { [ -e /run/cloud-init/result.json ] } -if is_finished; then +hotplug_enabled() { + [ "$(cloud-init devel hotplug-hook -s "${SUBSYSTEM}" query)" == "enabled" ] +} + +if is_finished && hotplug_enabled; then # open cloud-init's hotplug-hook fifo rw exec 3<>/run/cloud-init/hook-hotplug-cmd env_params=( - --devpath="${DEVPATH}" --subsystem="${SUBSYSTEM}" + handle + --devpath="${DEVPATH}" --udevaction="${ACTION}" ) # write params to cloud-init's hotplug-hook fifo -- cgit v1.2.3 From b9c96b4f4291b0b8c014817ec6047648dc83845b Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Aug 2021 22:21:52 -0500 Subject: testing: skip upgrade tests on LXD VMs (#980) The issues we see on Bionic VMs don't appear anywhere else, including when invoking kvm directly. It likely has to do with the extra LXD agent setup happening on bionic. Given that we still have Bionic covered on all other platforms, the risk of skipping bionic for LXD VM tests seems low. --- tests/integration_tests/test_upgrade.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 5c7a2e1e..376fcc96 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -3,7 +3,7 @@ import logging import os import pytest -from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud from tests.integration_tests.conftest import get_validated_source @@ -45,6 +45,15 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): if not source.installs_new_version(): pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) return # type checking doesn't understand that skip raises + if (ImageSpecification.from_os_image().release == 'bionic' and + session_cloud.settings.PLATFORM == 'lxd_vm'): + # The issues that we see on Bionic VMs don't appear anywhere + # else, including when calling KVM directly. It likely has to + # do with the extra lxd-agent setup happening on bionic. + # Given that we still have Bionic covered on all other platforms, + # the risk of skipping bionic here seems low enough. + pytest.skip("Upgrade test doesn't run on LXD VMs and bionic") + return launch_kwargs = { 'image_id': session_cloud.released_image_id, -- cgit v1.2.3 From 94679e178613ab5b12327829ca54855ac5b1c1c0 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 20 Aug 2021 11:02:57 -0500 Subject: Add integration test for sensitive jinja substitution (#986) Ensure jinja templates work for both instance-data.json and instance-data-sensitive.json. Test for LP: #1931392 Also removed test_runcmd.py as it's made redundant by this change. --- tests/integration_tests/modules/test_combined.py | 20 +++++++++++++++++++ tests/integration_tests/modules/test_runcmd.py | 25 ------------------------ 2 files changed, 20 insertions(+), 25 deletions(-) delete mode 100644 tests/integration_tests/modules/test_runcmd.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 97b59558..27f3c074 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -15,6 +15,7 @@ from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import verify_ordered_items_in_text USER_DATA = """\ +## template: jinja #cloud-config apt: primary: @@ -31,6 +32,9 @@ locale: en_GB.UTF-8 locale_configfile: /etc/default/locale ntp: servers: ['ntp.ubuntu.com'] +runcmd: + - echo {{ds.meta_data.local_hostname}} > /var/tmp/runcmd_output + - echo {{merged_cfg.def_log_file}} >> /var/tmp/runcmd_output """ @@ -92,6 +96,22 @@ class TestCombined: 'en_US.UTF-8' ], locale_gen) + def test_runcmd_with_variable_substitution( + self, class_client: IntegrationInstance + ): + """Test runcmd, while including jinja substitution. + + Ensure we can also substitue variables from instance-data-sensitive + LP: #1931392 + """ + client = class_client + expected = [ + client.execute('hostname').stdout.strip(), + '/var/log/cloud-init.log', + ] + output = client.read_from_file('/var/tmp/runcmd_output') + verify_ordered_items_in_text(expected, output) + def test_no_problems(self, class_client: IntegrationInstance): """Test no errors, warnings, or tracebacks""" client = class_client diff --git a/tests/integration_tests/modules/test_runcmd.py b/tests/integration_tests/modules/test_runcmd.py deleted file mode 100644 index 50d1851e..00000000 --- a/tests/integration_tests/modules/test_runcmd.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Integration test for the runcmd module. - -This test specifies a command to be executed by the ``runcmd`` module -and then checks if that command was executed during boot. - -(This is ported from -``tests/cloud_tests/testcases/modules/runcmd.yaml``.)""" - -import pytest - - -USER_DATA = """\ -#cloud-config -runcmd: - - echo cloud-init run cmd test > /var/tmp/run_cmd -""" - - -@pytest.mark.ci -class TestRuncmd: - - @pytest.mark.user_data(USER_DATA) - def test_runcmd(self, client): - runcmd_output = client.read_from_file("/var/tmp/run_cmd") - assert runcmd_output.strip() == "cloud-init run cmd test" -- cgit v1.2.3 From 7d3f5d750f6111c2716143364ea33486df67c927 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 20 Aug 2021 17:09:49 -0500 Subject: Fix home permissions modified by ssh module (SC-338) (#984) Fix home permissions modified by ssh module In #956, we updated the file and directory permissions for keys not in the user's home directory. We also unintentionally modified the permissions within the home directory as well. These should not change, and this commit changes that back. LP: #1940233 --- cloudinit/ssh_util.py | 35 +++++- .../integration_tests/modules/test_ssh_keysfile.py | 132 ++++++++++++++++++--- 2 files changed, 146 insertions(+), 21 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index b8a3c8f7..9ccadf09 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -321,23 +321,48 @@ def check_create_path(username, filename, strictmodes): home_folder = os.path.dirname(user_pwent.pw_dir) for directory in directories: parent_folder += "/" + directory - if home_folder.startswith(parent_folder): + + # security check, disallow symlinks in the AuthorizedKeysFile path. + if os.path.islink(parent_folder): + LOG.debug( + "Invalid directory. Symlink exists in path: %s", + parent_folder) + return False + + if os.path.isfile(parent_folder): + LOG.debug( + "Invalid directory. File exists in path: %s", + parent_folder) + return False + + if (home_folder.startswith(parent_folder) or + parent_folder == user_pwent.pw_dir): continue - if not os.path.isdir(parent_folder): + if not os.path.exists(parent_folder): # directory does not exist, and permission so far are good: # create the directory, and make it accessible by everyone # but owned by root, as it might be used by many users. with util.SeLinuxGuard(parent_folder): - os.makedirs(parent_folder, mode=0o755, exist_ok=True) - util.chownbyid(parent_folder, root_pwent.pw_uid, - root_pwent.pw_gid) + mode = 0o755 + uid = root_pwent.pw_uid + gid = root_pwent.pw_gid + if parent_folder.startswith(user_pwent.pw_dir): + mode = 0o700 + uid = user_pwent.pw_uid + gid = user_pwent.pw_gid + os.makedirs(parent_folder, mode=mode, exist_ok=True) + util.chownbyid(parent_folder, uid, gid) permissions = check_permissions(username, parent_folder, filename, False, strictmodes) if not permissions: return False + if os.path.islink(filename) or os.path.isdir(filename): + LOG.debug("%s is not a file!", filename) + return False + # check the file if not os.path.exists(filename): # if file does not exist: we need to create it, since the diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py index f82d7649..3159feb9 100644 --- a/tests/integration_tests/modules/test_ssh_keysfile.py +++ b/tests/integration_tests/modules/test_ssh_keysfile.py @@ -10,10 +10,10 @@ TEST_USER1_KEYS = get_test_rsa_keypair('test1') TEST_USER2_KEYS = get_test_rsa_keypair('test2') TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') -USERDATA = """\ +_USERDATA = """\ #cloud-config bootcmd: - - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config + - {bootcmd} ssh_authorized_keys: - {default} users: @@ -24,27 +24,17 @@ users: - name: test_user2 ssh_authorized_keys: - {user2} -""".format( # noqa: E501 +""".format( + bootcmd='{bootcmd}', default=TEST_DEFAULT_KEYS.public_key, user1=TEST_USER1_KEYS.public_key, user2=TEST_USER2_KEYS.public_key, ) -@pytest.mark.ubuntu -@pytest.mark.user_data(USERDATA) -def test_authorized_keys(client: IntegrationInstance): - expected_keys = [ - ('test_user1', '/home/test_user1/.ssh/authorized_keys2', - TEST_USER1_KEYS), - ('test_user2', '/home/test_user2/.ssh/authorized_keys2', - TEST_USER2_KEYS), - ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', - TEST_DEFAULT_KEYS), - ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), - ] - +def common_verify(client, expected_keys): for user, filename, keys in expected_keys: + # Ensure key is in the key file contents = client.read_from_file(filename) if user in ['ubuntu', 'root']: # Our personal public key gets added by pycloudlib @@ -83,3 +73,113 @@ def test_authorized_keys(client: IntegrationInstance): look_for_keys=False, allow_agent=False, ) + + # Ensure we haven't messed with any /home permissions + # See LP: #1940233 + home_dir = '/home/{}'.format(user) + home_perms = '755' + if user == 'root': + home_dir = '/root' + home_perms = '700' + assert '{} {}'.format(user, home_perms) == client.execute( + 'stat -c "%U %a" {}'.format(home_dir) + ) + if client.execute("test -d {}/.ssh".format(home_dir)).ok: + assert '{} 700'.format(user) == client.execute( + 'stat -c "%U %a" {}/.ssh'.format(home_dir) + ) + assert '{} 600'.format(user) == client.execute( + 'stat -c "%U %a" {}'.format(filename) + ) + + # Also ensure ssh-keygen works as expected + client.execute('mkdir {}/.ssh'.format(home_dir)) + assert client.execute( + "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format( + home_dir) + ).ok + assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir)) + assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir)) + + assert 'root 755' == client.execute('stat -c "%U %a" /home') + + +DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""') + + +@pytest.mark.ubuntu +@pytest.mark.user_data(DEFAULT_KEYS_USERDATA) +def test_authorized_keys_default(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/.ssh/authorized_keys', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/.ssh/authorized_keys', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/.ssh/authorized_keys', + TEST_DEFAULT_KEYS), + ('root', '/root/.ssh/authorized_keys', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) + + +AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' " + "/etc/ssh/sshd_config")) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA) +def test_authorized_keys2(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/.ssh/authorized_keys2', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/.ssh/authorized_keys2', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', + TEST_DEFAULT_KEYS), + ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) + + +NESTED_KEYS_USERDATA = _USERDATA.format(bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' " + "/etc/ssh/sshd_config")) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(NESTED_KEYS_USERDATA) +def test_nested_keys(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/foo/bar/ssh/keys', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/foo/bar/ssh/keys', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/foo/bar/ssh/keys', + TEST_DEFAULT_KEYS), + ('root', '/root/foo/bar/ssh/keys', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) + + +EXTERNAL_KEYS_USERDATA = _USERDATA.format(bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' " + "/etc/ssh/sshd_config")) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(EXTERNAL_KEYS_USERDATA) +def test_external_keys(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/etc/ssh/authorized_keys/test_user1/keys', + TEST_USER1_KEYS), + ('test_user2', '/etc/ssh/authorized_keys/test_user2/keys', + TEST_USER2_KEYS), + ('ubuntu', '/etc/ssh/authorized_keys/ubuntu/keys', + TEST_DEFAULT_KEYS), + ('root', '/etc/ssh/authorized_keys/root/keys', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) -- cgit v1.2.3 From 6803368dec44c8b42196931b3a42d014a10b600d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 23 Aug 2021 16:50:40 -0500 Subject: testing: Fix ssh keys integration test (#992) Home directory permissions changed in hirsute. The integration test assumed permissions from earlier releases. Test was fixed to take both permissions into account --- tests/integration_tests/modules/test_ssh_keysfile.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py index 3159feb9..5c720578 100644 --- a/tests/integration_tests/modules/test_ssh_keysfile.py +++ b/tests/integration_tests/modules/test_ssh_keysfile.py @@ -3,6 +3,7 @@ import pytest from io import StringIO from paramiko.ssh_exception import SSHException +from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import get_test_rsa_keypair @@ -77,7 +78,12 @@ def common_verify(client, expected_keys): # Ensure we haven't messed with any /home permissions # See LP: #1940233 home_dir = '/home/{}'.format(user) - home_perms = '755' + # Home permissions aren't consistent between releases. On ubuntu + # this can change to 750 once focal is unsupported. + if ImageSpecification.from_os_image().release in ("bionic", "focal"): + home_perms = '755' + else: + home_perms = '750' if user == 'root': home_dir = '/root' home_perms = '700' -- cgit v1.2.3 From 58c2de4c97de6cfa6edbf5319641f2ef71284895 Mon Sep 17 00:00:00 2001 From: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> Date: Wed, 1 Sep 2021 19:53:55 +0530 Subject: Fix `make style-check` errors (#1000) Using flake8 inplace of pyflakes Renamed run-pyflakes -> run-flake8 Changed target name to flake8 in Makefile With pyflakes we can't suppress warnings/errors in few required places. flake8 is flexible in that regard. Hence using flake8 seems to be a better choice here. flake8 does the job of pep8 anyway. So, removed pep8 target from Makefile along with tools/run-pep8 script. Included setup.py in flake8 checks --- Makefile | 11 ++++------- cloudinit/cmd/devel/hotplug_hook.py | 3 ++- cloudinit/distros/__init__.py | 2 +- cloudinit/sources/__init__.py | 2 +- cloudinit/stages.py | 2 +- setup.py | 8 ++++++-- tests/integration_tests/clouds.py | 2 +- tests/integration_tests/instances.py | 4 +++- tools/run-flake8 | 17 +++++++++++++++++ tools/run-pep8 | 21 --------------------- tools/run-pyflakes | 17 ----------------- tox.ini | 4 ++-- 12 files changed, 38 insertions(+), 55 deletions(-) create mode 100755 tools/run-flake8 delete mode 100755 tools/run-pep8 delete mode 100755 tools/run-pyflakes (limited to 'tests/integration_tests') diff --git a/Makefile b/Makefile index 5fb0fcbf..0c015dae 100644 --- a/Makefile +++ b/Makefile @@ -18,13 +18,10 @@ all: check check: check_version test yaml -style-check: pep8 $(pyflakes) +style-check: flake8 -pep8: - @$(CWD)/tools/run-pep8 - -pyflakes: - @$(CWD)/tools/run-pyflakes +flake8: + @$(CWD)/tools/run-flake8 unittest: clean_pyc python3 -m pytest -v tests/unittests cloudinit @@ -86,6 +83,6 @@ deb-src: doc: tox -e doc -.PHONY: test pyflakes clean pep8 rpm srpm deb deb-src yaml +.PHONY: test flake8 clean rpm srpm deb deb-src yaml .PHONY: check_version pip-test-requirements pip-requirements clean_pyc .PHONY: unittest style-check doc diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index a0058f03..d4f0547e 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -13,7 +13,8 @@ from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events from cloudinit.stages import Init -from cloudinit.sources import DataSource, DataSourceNotFoundException +from cloudinit.sources import DataSource # noqa: F401 +from cloudinit.sources import DataSourceNotFoundException LOG = log.getLogger(__name__) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index a634623a..2e629143 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -16,7 +16,7 @@ import stat import string import urllib.parse from io import StringIO -from typing import Any, Mapping +from typing import Any, Mapping # noqa: F401 from cloudinit import importer from cloudinit import log as logging diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index cc7e1c3c..54b8240a 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -13,7 +13,7 @@ import copy import json import os from collections import namedtuple -from typing import Dict, List +from typing import Dict, List # noqa: F401 from cloudinit import dmi from cloudinit import importer diff --git a/cloudinit/stages.py b/cloudinit/stages.py index bc164fa0..80aa9f5e 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -9,7 +9,7 @@ import os import pickle import sys from collections import namedtuple -from typing import Dict, Set +from typing import Dict, Set # noqa: F401 from cloudinit.settings import ( FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG) diff --git a/setup.py b/setup.py index 7fa03e63..100575ff 100755 --- a/setup.py +++ b/setup.py @@ -28,9 +28,11 @@ import subprocess RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" VARIANT = None + def is_f(p): return os.path.isfile(p) + def is_generator(p): return '-generator' in p @@ -111,6 +113,7 @@ def render_tmpl(template, mode=None): # return path relative to setup.py return os.path.join(os.path.basename(tmpd), bname) + # User can set the variant for template rendering if '--distro' in sys.argv: idx = sys.argv.index('--distro') @@ -166,7 +169,7 @@ elif os.path.isfile('/etc/system-release-cpe'): with open('/etc/system-release-cpe') as f: cpe_data = f.read().rstrip().split(':') - if cpe_data[1] == "\o": + if cpe_data[1] == "\o": # noqa: W605 # URI formated CPE inc = 0 else: @@ -216,7 +219,8 @@ class InitsysInstallData(install): if self.init_system and isinstance(self.init_system, str): self.init_system = self.init_system.split(",") - if len(self.init_system) == 0 and not platform.system().endswith('BSD'): + if (len(self.init_system) == 0 and + not platform.system().endswith('BSD')): self.init_system = ['systemd'] bad = [f for f in self.init_system if f not in INITSYS_TYPES] diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index f2362b5d..32fdc91e 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -28,7 +28,7 @@ from tests.integration_tests.instances import ( from tests.integration_tests.util import emit_dots_on_travis try: - from typing import Optional + from typing import Optional # noqa: F401 except ImportError: pass diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 055ec758..63e0e630 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -13,7 +13,9 @@ from tests.integration_tests import integration_settings try: from typing import TYPE_CHECKING if TYPE_CHECKING: - from tests.integration_tests.clouds import IntegrationCloud + from tests.integration_tests.clouds import ( # noqa: F401 + IntegrationCloud + ) except ImportError: pass diff --git a/tools/run-flake8 b/tools/run-flake8 new file mode 100755 index 00000000..0021cdb9 --- /dev/null +++ b/tools/run-flake8 @@ -0,0 +1,17 @@ +#!/bin/bash + +CR=" +" +pycheck_dirs=( "cloudinit/" "tests/" "tools/" "setup.py" ) + +set -f +if [ $# -eq 0 ]; then + files=( "${pycheck_dirs[@]}" ) +else + files=( "$@" ) +fi + +cmd=( "python3" -m "flake8" "${files[@]}" ) + +echo "Running: " "${cmd[@]}" 1>&2 +exec "${cmd[@]}" diff --git a/tools/run-pep8 b/tools/run-pep8 deleted file mode 100755 index 4bd0bbfb..00000000 --- a/tools/run-pep8 +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -pycheck_dirs=( "cloudinit/" "tests/" "tools/" ) - -CR=" -" -[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose="" - -set -f -if [ $# -eq 0 ]; then unset IFS - IFS="$CR" - files=( "${bin_files[@]}" "${pycheck_dirs[@]}" ) - unset IFS -else - files=( "$@" ) -fi - -myname=${0##*/} -cmd=( "${myname#run-}" $verbose "${files[@]}" ) -echo "Running: " "${cmd[@]}" 1>&2 -exec "${cmd[@]}" diff --git a/tools/run-pyflakes b/tools/run-pyflakes deleted file mode 100755 index 179afebe..00000000 --- a/tools/run-pyflakes +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -CR=" -" -pycheck_dirs=( "cloudinit/" "tests/" "tools/" ) - -set -f -if [ $# -eq 0 ]; then - files=( "${pycheck_dirs[@]}" ) -else - files=( "$@" ) -fi - -cmd=( "python3" -m "pyflakes" "${files[@]}" ) - -echo "Running: " "${cmd[@]}" 1>&2 -exec "${cmd[@]}" diff --git a/tox.ini b/tox.ini index 27c16ef3..aad286ff 100644 --- a/tox.ini +++ b/tox.ini @@ -13,7 +13,7 @@ passenv= basepython = python3 deps = flake8==3.8.2 -commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} +commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} # https://github.com/gabrielfalcao/HTTPretty/issues/223 setenv = @@ -119,7 +119,7 @@ deps = pytest==3.0.7 [testenv:tip-flake8] -commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} +commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} deps = flake8 [testenv:tip-pylint] -- cgit v1.2.3 From 023f97d4e64c267b8bd809510b3fc75fcb9da688 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 15 Sep 2021 10:44:26 -0500 Subject: Integration test upgrades for the 21.3-1 SRU (#1001) * Update test_combined.py to allow either valid LXD subplatform * Split jinja templated tests into separate module as they can be more fragile * Move checks for warnings and tracebacks into dedicated utility function. This allows us to work around persistent and expected tracebacks/warnings on particular clouds. * Update test_upgrade.py to allow either valid Azure datasource. /var/lib/waagent or a mounted device are both valid. * Add specificity to test_ntp_servers.py Clouds will often specify their own ntp servers in the ntp configuration files, so make the tests manually specify their own. * Account for additional keys on system in test_ssh_keysfiles.py * Update tests to account for invalid cache test_user_events.py and test_version_change.py both have tests that assume we will have valid ds cache when rebooting. In test_user_events.py, subsequent boots should block applying network on boot if boot event is denied. However, if the cache is invalid, it is valid to apply networking config that boot. In test_version_change.py no cache found won't trigger the expected debug log. Additionally, the pickle used for that test on an older release triggered an unexpected issue that took a different error path. * Ignore bionic in hotplug tests (LP: #1942247) On Bionic, we traceback when attempting to detect the hotplugged device in the updated metadata. This is because Bionic is specifically configured not to provide network metadata. See LP: #1942247 for more details. * Fix date used in test_final_message. In test_final_message, we ensured the variable substitution works as expected. For $timestamp, we compared against the current date. It's possible for the host date to be massively different from the client date, so obtain date on client rather than host. * Remove module success from lp1813396 test. Module may fail unrelatedly (in this case apt-get update is failing), but the test should still pass. * Skip testing events if network is disabled * Ensure we install expected version of cloud-init As part of test setup, we can install cloud-init from various sources, including PROPOSED, PPAs, etc. We were never checking that this install completes successfully, and on OCI, it wasn't completing successfully because of apt locking issues. Code has been updated to retry, and then fail loudly if we can't complete the install. * Remove ubuntu-azure-fips metapkg which mandates FIPS-flavour kernel In test_lp1835584.py * Update test_user_events.py to account for Azure behavior since Azure has a separate service to clear the pickled metadata every boot * Change failure to warning in test_upgrade.py if initial boot errors If there's already a pre-existing cause for warnings or tracebacks, that shouldn't cause the new version to fail. * Add retry to test_random_passwords_emitted_to_serial_console It's possible we haven't retrieved the entire log when the call returns, so retry a few times if the output isn't empty. --- tests/integration_tests/bugs/test_gh632.py | 6 +-- tests/integration_tests/bugs/test_gh868.py | 4 +- tests/integration_tests/bugs/test_lp1813396.py | 1 - tests/integration_tests/bugs/test_lp1835584.py | 3 ++ tests/integration_tests/bugs/test_lp1886531.py | 4 +- tests/integration_tests/bugs/test_lp1898997.py | 4 +- tests/integration_tests/instances.py | 40 +++++++++++-------- tests/integration_tests/modules/test_combined.py | 46 ++++++++++------------ tests/integration_tests/modules/test_disk_setup.py | 10 ++--- tests/integration_tests/modules/test_hotplug.py | 4 ++ .../modules/test_jinja_templating.py | 30 ++++++++++++++ tests/integration_tests/modules/test_lxd_bridge.py | 4 +- .../integration_tests/modules/test_ntp_servers.py | 8 +++- .../integration_tests/modules/test_set_password.py | 12 ++++++ .../integration_tests/modules/test_ssh_keysfile.py | 10 ++++- .../integration_tests/modules/test_user_events.py | 23 ++++++++--- .../modules/test_version_change.py | 25 ++++++++---- tests/integration_tests/test_upgrade.py | 25 +++++++++--- tests/integration_tests/util.py | 34 ++++++++++++++++ 19 files changed, 212 insertions(+), 81 deletions(-) create mode 100644 tests/integration_tests/modules/test_jinja_templating.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py index 3c1f9347..f3702a2e 100644 --- a/tests/integration_tests/bugs/test_gh632.py +++ b/tests/integration_tests/bugs/test_gh632.py @@ -3,16 +3,15 @@ Verify that if cloud-init is using DataSourceRbxCloud, there is no traceback if the metadata disk cannot be found. """ - import pytest from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import verify_clean_log # With some datasource hacking, we can run this on a NoCloud instance @pytest.mark.lxd_container @pytest.mark.lxd_vm -@pytest.mark.sru_2020_11 def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): client.write_to_file( '/etc/cloud/cloud.cfg.d/90_dpkg.cfg', @@ -26,8 +25,7 @@ def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): client.restart() log = client.read_from_file('/var/log/cloud-init.log') - assert 'WARNING' not in log - assert 'Traceback' not in log + verify_clean_log(log) assert 'Failed to load metadata and userdata' not in log assert ("Getting data from failed") not in log diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py index 838efca6..73c03451 100644 --- a/tests/integration_tests/bugs/test_gh868.py +++ b/tests/integration_tests/bugs/test_gh868.py @@ -1,6 +1,8 @@ """Ensure no Traceback when 'chef_license' is set""" import pytest + from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import verify_clean_log USERDATA = """\ @@ -17,4 +19,4 @@ chef: @pytest.mark.user_data(USERDATA) def test_chef_license(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') - assert 'Traceback' not in log + verify_clean_log(log) diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py index 68b96b1d..27d41c2b 100644 --- a/tests/integration_tests/bugs/test_lp1813396.py +++ b/tests/integration_tests/bugs/test_lp1813396.py @@ -29,6 +29,5 @@ def test_gpg_no_tty(client: IntegrationInstance): "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] " "with allowed return codes [0] (shell=False, capture=True)", "Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'", - "finish: modules-config/config-apt-configure: SUCCESS", ] verify_ordered_items_in_text(to_verify, log) diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py index 660d2a2a..732f2179 100644 --- a/tests/integration_tests/bugs/test_lp1835584.py +++ b/tests/integration_tests/bugs/test_lp1835584.py @@ -59,6 +59,9 @@ def _check_iid_insensitive_across_kernel_upgrade( result = instance.execute("apt-get install linux-azure --assume-yes") if not result.ok: pytest.fail("Unable to install linux-azure kernel: {}".format(result)) + # Remove ubuntu-azure-fips metapkg which mandates FIPS-flavour kernel + result = instance.execute("ua disable fips --assume-yes") + assert result.ok, "Unable to disable fips: {}".format(result) instance.restart() new_kernel = instance.execute("uname -r").strip() assert orig_kernel != new_kernel diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py index 058ea8bb..6dd61222 100644 --- a/tests/integration_tests/bugs/test_lp1886531.py +++ b/tests/integration_tests/bugs/test_lp1886531.py @@ -11,6 +11,8 @@ https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1886531 """ import pytest +from tests.integration_tests.util import verify_clean_log + USER_DATA = """\ #cloud-config @@ -24,4 +26,4 @@ class TestLp1886531: @pytest.mark.user_data(USER_DATA) def test_lp1886531(self, client): log_content = client.read_from_file("/var/log/cloud-init.log") - assert "WARNING" not in log_content + verify_clean_log(log_content) diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index bde93d06..909bc690 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -10,7 +10,9 @@ network configuration, and confirms that the bridge can be used to ping the default gateway. """ import pytest + from tests.integration_tests import random_mac_address +from tests.integration_tests.util import verify_clean_log MAC_ADDRESS = random_mac_address() @@ -59,7 +61,7 @@ class TestInterfaceListingWithOpenvSwitch: cloudinit_output = client.read_from_file("/var/log/cloud-init.log") # Confirm that the network configuration was applied successfully - assert "WARN" not in cloudinit_output + verify_clean_log(cloudinit_output) # Confirm that the applied network config created the OVS bridge assert "ovs-br" in client.execute("ip addr") diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 63e0e630..8f66bf43 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -9,6 +9,7 @@ from pycloudlib.instance import BaseInstance from pycloudlib.result import Result from tests.integration_tests import integration_settings +from tests.integration_tests.util import retry try: from typing import TYPE_CHECKING @@ -142,26 +143,31 @@ class IntegrationInstance: snapshot_id = self.snapshot() self.cloud.snapshot_id = snapshot_id + # assert with retry because we can compete with apt already running in the + # background and get: E: Could not get lock /var/lib/apt/lists/lock - open + # (11: Resource temporarily unavailable) + + @retry(tries=30, delay=1) def install_proposed_image(self): log.info('Installing proposed image') - remote_script = ( + assert self.execute( 'echo deb "http://archive.ubuntu.com/ubuntu ' - '$(lsb_release -sc)-proposed main" | ' - 'tee /etc/apt/sources.list.d/proposed.list\n' - 'apt-get update -q\n' - 'apt-get install -qy cloud-init' - ) - self.execute(remote_script) + '$(lsb_release -sc)-proposed main" >> ' + '/etc/apt/sources.list.d/proposed.list' + ).ok + assert self.execute('apt-get update -q').ok + assert self.execute('apt-get install -qy cloud-init').ok + @retry(tries=30, delay=1) def install_ppa(self): log.info('Installing PPA') - remote_script = ( - 'add-apt-repository {repo} -y && ' - 'apt-get update -q && ' - 'apt-get install -qy cloud-init' - ).format(repo=self.settings.CLOUD_INIT_SOURCE) - self.execute(remote_script) + assert self.execute('add-apt-repository {} -y'.format( + self.settings.CLOUD_INIT_SOURCE) + ).ok + assert self.execute('apt-get update -q').ok + assert self.execute('apt-get install -qy cloud-init').ok + @retry(tries=30, delay=1) def install_deb(self): log.info('Installing deb package') deb_path = integration_settings.CLOUD_INIT_SOURCE @@ -170,13 +176,13 @@ class IntegrationInstance: self.push_file( local_path=integration_settings.CLOUD_INIT_SOURCE, remote_path=remote_path) - remote_script = 'dpkg -i {path}'.format(path=remote_path) - self.execute(remote_script) + assert self.execute('dpkg -i {path}'.format(path=remote_path)).ok + @retry(tries=30, delay=1) def upgrade_cloud_init(self): log.info('Upgrading cloud-init to latest version in archive') - self.execute("apt-get update -q") - self.execute("apt-get install -qy cloud-init") + assert self.execute("apt-get update -q").ok + assert self.execute("apt-get install -qy cloud-init").ok def __enter__(self): return self diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 27f3c074..9cd1648a 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -8,14 +8,15 @@ here. import json import pytest import re -from datetime import date from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance -from tests.integration_tests.util import verify_ordered_items_in_text +from tests.integration_tests.util import ( + verify_clean_log, + verify_ordered_items_in_text, +) USER_DATA = """\ -## template: jinja #cloud-config apt: primary: @@ -33,8 +34,7 @@ locale_configfile: /etc/default/locale ntp: servers: ['ntp.ubuntu.com'] runcmd: - - echo {{ds.meta_data.local_hostname}} > /var/tmp/runcmd_output - - echo {{merged_cfg.def_log_file}} >> /var/tmp/runcmd_output + - echo 'hello world' > /var/tmp/runcmd_output """ @@ -44,11 +44,17 @@ class TestCombined: def test_final_message(self, class_client: IntegrationInstance): """Test that final_message module works as expected. - Also tests LP 1511485: final_message is silent + Also tests LP 1511485: final_message is silent. + + It's possible that if this test is run within a minute or so of + midnight that we'll see a failure because the day in the logs + is different from the day specified in the test definition. """ client = class_client log = client.read_from_file('/var/log/cloud-init.log') - today = date.today().strftime('%a, %d %b %Y') + # Get date on host rather than locally as our host could be in a + # wildly different timezone (or more likely recording UTC) + today = client.execute('date "+%a, %d %b %Y"') expected = ( 'This is my final message!\n' r'\d+\.\d+.*\n' @@ -96,21 +102,10 @@ class TestCombined: 'en_US.UTF-8' ], locale_gen) - def test_runcmd_with_variable_substitution( - self, class_client: IntegrationInstance - ): - """Test runcmd, while including jinja substitution. - - Ensure we can also substitue variables from instance-data-sensitive - LP: #1931392 - """ + def test_runcmd(self, class_client: IntegrationInstance): + """Test runcmd works as expected""" client = class_client - expected = [ - client.execute('hostname').stdout.strip(), - '/var/log/cloud-init.log', - ] - output = client.read_from_file('/var/tmp/runcmd_output') - verify_ordered_items_in_text(expected, output) + assert 'hello world' == client.read_from_file('/var/tmp/runcmd_output') def test_no_problems(self, class_client: IntegrationInstance): """Test no errors, warnings, or tracebacks""" @@ -124,8 +119,7 @@ class TestCombined: assert result_json['errors'] == [] log = client.read_from_file('/var/log/cloud-init.log') - assert 'WARN' not in log - assert 'Traceback' not in log + verify_clean_log(log) def _check_common_metadata(self, data): assert data['base64_encoded_keys'] == [] @@ -171,8 +165,10 @@ class TestCombined: v1_data = data['v1'] assert v1_data['cloud_name'] == 'unknown' assert v1_data['platform'] == 'lxd' - assert v1_data['subplatform'] == ( - 'seed-dir (/var/lib/cloud/seed/nocloud-net)') + assert any([ + '/var/lib/cloud/seed/nocloud-net' in v1_data['subplatform'], + '/dev/sr0' in v1_data['subplatform'] + ]) assert v1_data['availability_zone'] is None assert v1_data['instance_id'] == client.instance.name assert v1_data['local_hostname'] == client.instance.name diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py index 1fc96c52..9c9edc46 100644 --- a/tests/integration_tests/modules/test_disk_setup.py +++ b/tests/integration_tests/modules/test_disk_setup.py @@ -6,6 +6,7 @@ from pycloudlib.lxd.instance import LXDInstance from cloudinit.subp import subp from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import verify_clean_log DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) @@ -59,8 +60,7 @@ class TestDeviceAliases: ) in log assert 'changed my_alias.1 => /dev/sdb1' in log assert 'changed my_alias.2 => /dev/sdb2' in log - assert 'WARN' not in log - assert 'Traceback' not in log + verify_clean_log(log) lsblk = json.loads(client.execute('lsblk --json')) sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] @@ -120,8 +120,7 @@ class TestPartProbeAvailability: """ def _verify_first_disk_setup(self, client, log): - assert 'Traceback' not in log - assert 'WARN' not in log + verify_clean_log(log) lsblk = json.loads(client.execute('lsblk --json')) sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] assert len(sdb['children']) == 2 @@ -167,8 +166,7 @@ class TestPartProbeAvailability: client.restart() # Assert new setup works as expected - assert 'Traceback' not in log - assert 'WARN' not in log + verify_clean_log(log) lsblk = json.loads(client.execute('lsblk --json')) sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index a42d1c8c..88cd8c16 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -40,6 +40,10 @@ def _get_ip_addr(client): @pytest.mark.openstack +# On Bionic, we traceback when attempting to detect the hotplugged +# device in the updated metadata. This is because Bionic is specifically +# configured not to provide network metadata. +@pytest.mark.not_bionic @pytest.mark.user_data(USER_DATA) def test_hotplug_add_remove(client: IntegrationInstance): ips_before = _get_ip_addr(client) diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py new file mode 100644 index 00000000..35b8ee2d --- /dev/null +++ b/tests/integration_tests/modules/test_jinja_templating.py @@ -0,0 +1,30 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import verify_ordered_items_in_text + + +USER_DATA = """\ +## template: jinja +#cloud-config +runcmd: + - echo {{v1.local_hostname}} > /var/tmp/runcmd_output + - echo {{merged_cfg._doc}} >> /var/tmp/runcmd_output +""" + + +@pytest.mark.user_data(USER_DATA) +def test_runcmd_with_variable_substitution(client: IntegrationInstance): + """Test jinja substitution. + + Ensure we can also substitute variables from instance-data-sensitive + LP: #1931392 + """ + expected = [ + client.execute('hostname').stdout.strip(), + ('Merged cloud-init system config from /etc/cloud/cloud.cfg and ' + '/etc/cloud/cloud.cfg.d/') + ] + output = client.read_from_file('/var/tmp/runcmd_output') + verify_ordered_items_in_text(expected, output) diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py index cbf11179..65dce3c7 100644 --- a/tests/integration_tests/modules/test_lxd_bridge.py +++ b/tests/integration_tests/modules/test_lxd_bridge.py @@ -6,6 +6,8 @@ import pytest import yaml +from tests.integration_tests.util import verify_clean_log + USER_DATA = """\ #cloud-config @@ -38,7 +40,7 @@ class TestLxdBridge: def test_bridge(self, class_client): """Check that the given bridge is configured""" cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log") - assert "WARN" not in cloud_init_log + verify_clean_log(cloud_init_log) # The bridge should exist assert class_client.execute("ip addr show lxdbr0") diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py index 7a799139..59241faa 100644 --- a/tests/integration_tests/modules/test_ntp_servers.py +++ b/tests/integration_tests/modules/test_ntp_servers.py @@ -78,6 +78,8 @@ CHRONY_DATA = """\ ntp: enabled: true ntp_client: chrony + servers: + - 172.16.15.14 """ @@ -89,7 +91,7 @@ def test_chrony(client: IntegrationInstance): else: chrony_conf = '/etc/chrony/chrony.conf' contents = client.read_from_file(chrony_conf) - assert '.pool.ntp.org' in contents + assert 'server 172.16.15.14' in contents TIMESYNCD_DATA = """\ @@ -97,6 +99,8 @@ TIMESYNCD_DATA = """\ ntp: enabled: true ntp_client: systemd-timesyncd + servers: + - 172.16.15.14 """ @@ -106,7 +110,7 @@ def test_timesyncd(client: IntegrationInstance): contents = client.read_from_file( '/etc/systemd/timesyncd.conf.d/cloud-init.conf' ) - assert '.pool.ntp.org' in contents + assert 'NTP=172.16.15.14' in contents EMPTY_NTP = """\ diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py index d7cf91a5..ac9db19d 100644 --- a/tests/integration_tests/modules/test_set_password.py +++ b/tests/integration_tests/modules/test_set_password.py @@ -13,6 +13,8 @@ import crypt import pytest import yaml +from tests.integration_tests.util import retry + COMMON_USER_DATA = """\ #cloud-config @@ -129,6 +131,7 @@ class Mixin: assert "dick:" not in cloud_init_output assert "harry:" not in cloud_init_output + @retry(tries=30, delay=1) def test_random_passwords_emitted_to_serial_console(self, class_client): """We should emit passwords to the serial console. (LP: #1918303)""" try: @@ -137,6 +140,15 @@ class Mixin: # Assume that an exception here means that we can't use the console # log pytest.skip("NotImplementedError when requesting console log") + return + if console_log.lower() == 'no console output': + # This test retries because we might not have the full console log + # on the first fetch. However, if we have no console output + # at all, we don't want to keep retrying as that would trigger + # another 5 minute wait on the pycloudlib side, which could + # leave us waiting for a couple hours + pytest.fail('no console output') + return assert "dick:" in console_log assert "harry:" in console_log diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py index 5c720578..b39454e6 100644 --- a/tests/integration_tests/modules/test_ssh_keysfile.py +++ b/tests/integration_tests/modules/test_ssh_keysfile.py @@ -38,9 +38,15 @@ def common_verify(client, expected_keys): # Ensure key is in the key file contents = client.read_from_file(filename) if user in ['ubuntu', 'root']: - # Our personal public key gets added by pycloudlib lines = contents.split('\n') - assert len(lines) == 2 + if user == 'root': + # Our personal public key gets added by pycloudlib in + # addition to the default `ssh_authorized_keys` + assert len(lines) == 2 + else: + # Clouds will insert the keys we've added to our accounts + # or for our launches + assert len(lines) >= 2 assert keys.public_key.strip() in contents else: assert contents.strip() == keys.public_key.strip() diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py index a45cad72..ee8f05ae 100644 --- a/tests/integration_tests/modules/test_user_events.py +++ b/tests/integration_tests/modules/test_user_events.py @@ -31,9 +31,12 @@ def _add_dummy_bridge_to_netplan(client: IntegrationInstance): @pytest.mark.gce @pytest.mark.oci @pytest.mark.openstack +@pytest.mark.azure @pytest.mark.not_xenial def test_boot_event_disabled_by_default(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') + if 'network config is disabled' in log: + pytest.skip("network config disabled. Test doesn't apply") assert 'Applying network configuration' in log assert 'dummy0' not in client.execute('ls /sys/class/net') @@ -43,6 +46,12 @@ def test_boot_event_disabled_by_default(client: IntegrationInstance): client.restart() log2 = client.read_from_file('/var/log/cloud-init.log') + if 'cache invalid in datasource' in log2: + # Invalid cache will get cleared, meaning we'll create a new + # "instance" and apply networking config, so events aren't + # really relevant here + pytest.skip("Test only valid for existing instances") + # We attempt to apply network config twice on every boot. # Ensure neither time works. assert 2 == len( @@ -62,25 +71,27 @@ def test_boot_event_disabled_by_default(client: IntegrationInstance): def _test_network_config_applied_on_reboot(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') + if 'network config is disabled' in log: + pytest.skip("network config disabled. Test doesn't apply") assert 'Applying network configuration' in log assert 'dummy0' not in client.execute('ls /sys/class/net') _add_dummy_bridge_to_netplan(client) client.execute('rm /var/log/cloud-init.log') client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + if 'cache invalid in datasource' in log: + # Invalid cache will get cleared, meaning we'll create a new + # "instance" and apply networking config, so events aren't + # really relevant here + pytest.skip("Test only valid for existing instances") assert 'Event Allowed: scope=network EventType=boot' in log assert 'Applying network configuration' in log assert 'dummy0' not in client.execute('ls /sys/class/net') -@pytest.mark.azure -@pytest.mark.not_xenial -def test_boot_event_enabled_by_default(client: IntegrationInstance): - _test_network_config_applied_on_reboot(client) - - USER_DATA = """\ #cloud-config updates: diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py index 4e9ab63f..ffea794a 100644 --- a/tests/integration_tests/modules/test_version_change.py +++ b/tests/integration_tests/modules/test_version_change.py @@ -1,7 +1,7 @@ from pathlib import Path from tests.integration_tests.instances import IntegrationInstance -from tests.integration_tests.util import ASSETS_DIR +from tests.integration_tests.util import ASSETS_DIR, verify_clean_log PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') @@ -10,8 +10,7 @@ TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl' def _assert_no_pickle_problems(log): assert 'Failed loading pickled blob' not in log - assert 'Traceback' not in log - assert 'WARN' not in log + verify_clean_log(log) def test_reboot_without_version_change(client: IntegrationInstance): @@ -30,7 +29,13 @@ def test_reboot_without_version_change(client: IntegrationInstance): client.push_file(TEST_PICKLE, PICKLE_PATH) client.restart() log = client.read_from_file('/var/log/cloud-init.log') - assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log + + # no cache found is an "expected" upgrade error, and + # "Failed" means we're unable to load the pickle + assert any([ + 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log, + 'no cache found' in log + ]) def test_cache_purged_on_version_change(client: IntegrationInstance): @@ -48,9 +53,13 @@ def test_log_message_on_missing_version_file(client: IntegrationInstance): # Start by pushing a pickle so we can see the log message client.push_file(TEST_PICKLE, PICKLE_PATH) client.execute("rm /var/lib/cloud/data/python-version") + client.execute("rm /var/log/cloud-init.log") client.restart() log = client.read_from_file('/var/log/cloud-init.log') - assert ( - 'Writing python-version file. ' - 'Cache compatibility status is currently unknown.' - ) in log + if 'no cache found' not in log: + # We don't expect the python version file to exist if we have no + # pre-existing cache + assert ( + 'Writing python-version file. ' + 'Cache compatibility status is currently unknown.' + ) in log diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 376fcc96..e90a5f9d 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -5,6 +5,7 @@ import pytest from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud from tests.integration_tests.conftest import get_validated_source +from tests.integration_tests.util import verify_clean_log LOG = logging.getLogger('integration_testing.test_upgrade') @@ -73,11 +74,15 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): pre_cloud_blame = instance.execute('cloud-init analyze blame') # Ensure no issues pre-upgrade + log = instance.read_from_file('/var/log/cloud-init.log') assert not json.loads(pre_result)['v1']['errors'] - log = instance.read_from_file('/var/log/cloud-init.log') - assert 'Traceback' not in log - assert 'WARN' not in log + try: + verify_clean_log(log) + except AssertionError: + LOG.warning( + 'There were errors/warnings/tracebacks pre-upgrade. ' + 'Any failures may be due to pre-upgrade problem') # Upgrade and reboot instance.install_new_cloud_init(source, take_snapshot=False) @@ -105,13 +110,21 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): assert not json.loads(pre_result)['v1']['errors'] log = instance.read_from_file('/var/log/cloud-init.log') - assert 'Traceback' not in log - assert 'WARN' not in log + verify_clean_log(log) # Ensure important things stayed the same assert pre_hostname == post_hostname assert pre_cloud_id == post_cloud_id - assert pre_result == post_result + try: + assert pre_result == post_result + except AssertionError: + if instance.settings.PLATFORM == 'azure': + pre_json = json.loads(pre_result) + post_json = json.loads(post_result) + assert pre_json['v1']['datasource'].startswith( + 'DataSourceAzure') + assert post_json['v1']['datasource'].startswith( + 'DataSourceAzure') assert pre_network == post_network # Calculate and log all the boot numbers diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 80430eab..407096cd 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -28,6 +28,40 @@ def verify_ordered_items_in_text(to_verify: list, text: str): assert index > -1, "Expected item not found: '{}'".format(item) +def verify_clean_log(log): + """Assert no unexpected tracebacks or warnings in logs""" + warning_count = log.count('WARN') + expected_warnings = 0 + traceback_count = log.count('Traceback') + expected_tracebacks = 0 + + warning_texts = [ + # Consistently on all Azure launches: + # azure.py[WARNING]: No lease found; using default endpoint + 'No lease found; using default endpoint' + ] + traceback_texts = [] + if 'oracle' in log: + # LP: #1842752 + lease_exists_text = 'Stderr: RTNETLINK answers: File exists' + warning_texts.append(lease_exists_text) + traceback_texts.append(lease_exists_text) + # LP: #1833446 + fetch_error_text = ( + 'UrlError: 404 Client Error: Not Found for url: ' + 'http://169.254.169.254/latest/meta-data/') + warning_texts.append(fetch_error_text) + traceback_texts.append(fetch_error_text) + + for warning_text in warning_texts: + expected_warnings += log.count(warning_text) + for traceback_text in traceback_texts: + expected_tracebacks += log.count(traceback_text) + + assert warning_count == expected_warnings + assert traceback_count == expected_tracebacks + + @contextmanager def emit_dots_on_travis(): """emit a dot every 60 seconds if running on Travis. -- cgit v1.2.3 From dc22786980a05129c5971e68ae37b1a9f76f882d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 17 Sep 2021 16:25:22 -0500 Subject: Set Azure to apply networking config every BOOT (#1023) In #1006, we set Azure to apply networking config every BOOT_NEW_INSTANCE because the BOOT_LEGACY option was causing problems applying networking the second time per boot. However, BOOT_NEW_INSTANCE is also wrong as Azure needs to apply networking once per boot, during init-local phase. --- cloudinit/sources/DataSourceAzure.py | 6 +++++- tests/integration_tests/modules/test_user_events.py | 10 ++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 3fb564c8..f8641dfd 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,7 +22,7 @@ import requests from cloudinit import dmi from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.net import device_driver from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources @@ -339,6 +339,10 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): dsname = 'Azure' + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + }} _negotiated = False _metadata_imds = sources.UNSET _ci_pkl_version = 1 diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py index ee8f05ae..fffa0746 100644 --- a/tests/integration_tests/modules/test_user_events.py +++ b/tests/integration_tests/modules/test_user_events.py @@ -31,8 +31,6 @@ def _add_dummy_bridge_to_netplan(client: IntegrationInstance): @pytest.mark.gce @pytest.mark.oci @pytest.mark.openstack -@pytest.mark.azure -@pytest.mark.not_xenial def test_boot_event_disabled_by_default(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') if 'network config is disabled' in log: @@ -77,7 +75,7 @@ def _test_network_config_applied_on_reboot(client: IntegrationInstance): assert 'dummy0' not in client.execute('ls /sys/class/net') _add_dummy_bridge_to_netplan(client) - client.execute('rm /var/log/cloud-init.log') + client.execute('echo "" > /var/log/cloud-init.log') client.restart() log = client.read_from_file('/var/log/cloud-init.log') @@ -92,6 +90,11 @@ def _test_network_config_applied_on_reboot(client: IntegrationInstance): assert 'dummy0' not in client.execute('ls /sys/class/net') +@pytest.mark.azure +def test_boot_event_enabled_by_default(client: IntegrationInstance): + _test_network_config_applied_on_reboot(client) + + USER_DATA = """\ #cloud-config updates: @@ -100,7 +103,6 @@ updates: """ -@pytest.mark.not_xenial @pytest.mark.user_data(USER_DATA) def test_boot_event_enabled(client: IntegrationInstance): _test_network_config_applied_on_reboot(client) -- cgit v1.2.3 From c29c5b1d6d872a4b9754d7a1393bd1acde621447 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 27 Sep 2021 13:02:07 -0500 Subject: Skip test_cache_purged_on_version_change on Azure (#1033) --- tests/integration_tests/modules/test_version_change.py | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py index ffea794a..f28079d4 100644 --- a/tests/integration_tests/modules/test_version_change.py +++ b/tests/integration_tests/modules/test_version_change.py @@ -1,5 +1,7 @@ from pathlib import Path +import pytest + from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import ASSETS_DIR, verify_clean_log @@ -38,6 +40,14 @@ def test_reboot_without_version_change(client: IntegrationInstance): ]) +@pytest.mark.ec2 +@pytest.mark.gce +@pytest.mark.oci +@pytest.mark.openstack +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +# No Azure because the cache gets purged every reboot, so we'll never +# get to the point where we need to purge cache due to version change def test_cache_purged_on_version_change(client: IntegrationInstance): # Start by pushing the invalid pickle so we'll hit an error if the # cache didn't actually get purged -- cgit v1.2.3 From 9c147e8341e287366790e60658f646cdcc59bef2 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 7 Oct 2021 11:27:36 -0500 Subject: Allow disabling of network activation (SC-307) (#1048) In #919 (81299de), we refactored some of the code used to bring up networks across distros. Previously, the call to bring up network interfaces during 'init' stage unintentionally resulted in a no-op such that network interfaces were NEVER brought up by cloud-init, even if new network interfaces were found after crawling the metadata. The code was altered to bring up these discovered network interfaces. On ubuntu, this results in a 'netplan apply' call during 'init' stage for any ubuntu-based distro on a datasource that has a NETWORK dependency. On GCE, this additional 'netplan apply' conflicts with the google-guest-agent service, resulting in an instance that can no be connected to. This commit adds a 'disable_network_activation' option that can be enabled in /etc/cloud.cfg to disable the activation of network interfaces in 'init' stage. LP: #1938299 --- cloudinit/cmd/main.py | 11 +++++- cloudinit/cmd/tests/test_main.py | 23 ++++++++++++ cloudinit/distros/__init__.py | 3 ++ doc/rtd/topics/network-config.rst | 11 ++++++ .../datasources/test_network_dependency.py | 43 ++++++++++++++++++++++ 5 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 tests/integration_tests/datasources/test_network_dependency.py (limited to 'tests/integration_tests') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 1de1de99..63186d34 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -239,6 +239,12 @@ def purge_cache_on_python_version_change(init): util.write_file(python_version_path, current_python_version) +def _should_bring_up_interfaces(init, args): + if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'): + return False + return not args.local + + def main_init(name, args): deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] if args.local: @@ -348,6 +354,7 @@ def main_init(name, args): util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) # Stage 5 + bring_up_interfaces = _should_bring_up_interfaces(init, args) try: init.fetch(existing=existing) # if in network mode, and the datasource is local @@ -367,7 +374,7 @@ def main_init(name, args): util.logexc(LOG, ("No instance datasource found!" " Likely bad things to come!")) if not args.force: - init.apply_network_config(bring_up=not args.local) + init.apply_network_config(bring_up=bring_up_interfaces) LOG.debug("[%s] Exiting without datasource", mode) if mode == sources.DSMODE_LOCAL: return (None, []) @@ -388,7 +395,7 @@ def main_init(name, args): # dhcp clients to advertize this hostname to any DDNS services # LP: #1746455. _maybe_set_hostname(init, stage='local', retry_stage='network') - init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL)) + init.apply_network_config(bring_up=bring_up_interfaces) if mode == sources.DSMODE_LOCAL: if init.datasource.dsmode != mode: diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index 1f5975b0..2e380848 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -4,6 +4,9 @@ from collections import namedtuple import copy import os from io import StringIO +from unittest import mock + +import pytest from cloudinit.cmd import main from cloudinit import safeyaml @@ -162,4 +165,24 @@ class TestMain(FilesystemMockingTestCase): for log in expected_logs: self.assertIn(log, self.stderr.getvalue()) + +class TestShouldBringUpInterfaces: + @pytest.mark.parametrize('cfg_disable,args_local,expected', [ + (True, True, False), + (True, False, False), + (False, True, False), + (False, False, True), + ]) + def test_should_bring_up_interfaces( + self, cfg_disable, args_local, expected + ): + init = mock.Mock() + init.cfg = {'disable_network_activation': cfg_disable} + + args = mock.Mock() + args.local = args_local + + result = main._should_bring_up_interfaces(init, args) + assert result == expected + # vi: ts=4 expandtab diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 63e78591..426a2cf4 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -227,8 +227,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): # Now try to bring them up if bring_up: + LOG.debug('Bringing up newly configured network interfaces') network_activator = activators.select_activator() network_activator.bring_up_all_interfaces(network_state) + else: + LOG.debug("Not bringing up newly configured network interfaces") return False def apply_network_config_names(self, netconfig): diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 8eb7a31b..494b687a 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -75,6 +75,17 @@ If `Cloud-init`_ 's networking config has not been disabled, and no other network information is found, then it will proceed to generate a fallback networking configuration. +Disabling Network Activation +---------------------------- + +Some datasources may not be initialized until after network has been brought +up. In this case, cloud-init will attempt to bring up the interfaces specified +by the datasource metadata. + +This behavior can be disabled in the cloud-init configuration dictionary, +merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``:: + + disable_network_activation: true Fallback Network Configuration ============================== diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py new file mode 100644 index 00000000..2e5e3121 --- /dev/null +++ b/tests/integration_tests/datasources/test_network_dependency.py @@ -0,0 +1,43 @@ +import pytest + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.conftest import get_validated_source + + +def _setup_custom_image(session_cloud: IntegrationCloud): + """Like `setup_image` in conftest.py, but with customized content.""" + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + return + client = session_cloud.launch() + + # Insert our "disable_network_activation" file here + client.write_to_file( + '/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg', + 'disable_network_activation: true\n', + ) + + client.install_new_cloud_init(source) + # Even if we're keeping instances, we don't want to keep this + # one around as it was just for image creation + client.destroy() + + +# This test should be able to work on any cloud whose datasource specifies +# a NETWORK dependency +@pytest.mark.gce +@pytest.mark.ubuntu # Because netplan +def test_network_activation_disabled(session_cloud: IntegrationCloud): + """Test that the network is not activated during init mode.""" + _setup_custom_image(session_cloud) + with session_cloud.launch() as client: + result = client.execute('systemctl status google-guest-agent.service') + if not result.ok: + raise AssertionError('google-guest-agent is not active:\n%s', + result.stdout) + log = client.read_from_file('/var/log/cloud-init.log') + + assert "Running command ['netplan', 'apply']" not in log + + assert 'Not bringing up newly configured network interfaces' in log + assert 'Bringing up newly configured network interfaces' not in log -- cgit v1.2.3 From 150fd30ef81cea5c2b0aff5127e33a5db786aa09 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 19 Oct 2021 14:58:56 -0500 Subject: Update Azure _unpickle (SC-500) (#1067) When self.failed_desired_api_version was added to DataSourceAzure, the attribute was never added to the _unpickle method using the upgrade framework. This commit adds the attribute. LP: #1946644 --- cloudinit/sources/DataSourceAzure.py | 4 +++- tests/integration_tests/test_upgrade.py | 10 ++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f8641dfd..26c407ef 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -363,7 +363,9 @@ class DataSourceAzure(sources.DataSource): def _unpickle(self, ci_pkl_version: int) -> None: super()._unpickle(ci_pkl_version) - if "iso_dev" not in self.__dict__: + if not hasattr(self, "failed_desired_api_version"): + self.failed_desired_api_version = False + if not hasattr(self, "iso_dev"): self.iso_dev = None def __str__(self): diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index e90a5f9d..0ba4754c 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -84,11 +84,8 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): 'There were errors/warnings/tracebacks pre-upgrade. ' 'Any failures may be due to pre-upgrade problem') - # Upgrade and reboot + # Upgrade instance.install_new_cloud_init(source, take_snapshot=False) - instance.execute('hostname something-else') - instance.restart() - assert instance.execute('cloud-init status --wait --long').ok # 'cloud-init init' helps us understand if our pickling upgrade paths # have broken across re-constitution of a cached datasource. Some @@ -96,6 +93,11 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): # it here to ensure we get a dirty run. assert instance.execute('cloud-init init').ok + # Reboot + instance.execute('hostname something-else') + instance.restart() + assert instance.execute('cloud-init status --wait --long').ok + # get post values post_hostname = instance.execute('hostname') post_cloud_id = instance.execute('cloud-id') -- cgit v1.2.3 From a4236c375ddf78258a8f9252c1d79c665aa4f88b Mon Sep 17 00:00:00 2001 From: Lucendio Date: Mon, 25 Oct 2021 21:31:07 +0200 Subject: Add module 'write-files-deferred' executed in stage 'final' (#916) The main idea is to introduce a second module that takes care of writing files, but in the 'final' stage. While the introduction of a second module would allow for choosing the appropriate place withing the order of modules (and stages), there is no addition top-level directive being added to the cloud configuration schema. Instead, 'write-files' schema is being extended to include a 'defer' attribute used only by the 'write-deffered-files' modules. The new module 'write-deferred-files' reuses as much as possible of the 'write-files' functionality. --- cloudinit/config/cc_write_files.py | 41 ++++++++++-- cloudinit/config/cc_write_files_deferred.py | 55 ++++++++++++++++ config/cloud.cfg.tmpl | 1 + .../integration_tests/modules/test_write_files.py | 21 ++++++ .../test_handler/test_handler_write_files.py | 13 ++++ .../test_handler_write_files_deferred.py | 77 ++++++++++++++++++++++ tests/unittests/test_handler/test_schema.py | 1 + tools/.github-cla-signers | 1 + 8 files changed, 206 insertions(+), 4 deletions(-) create mode 100644 cloudinit/config/cc_write_files_deferred.py create mode 100644 tests/unittests/test_handler/test_handler_write_files_deferred.py (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 8601e707..41c75fa2 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -21,6 +21,7 @@ frequency = PER_INSTANCE DEFAULT_OWNER = "root:root" DEFAULT_PERMS = 0o644 +DEFAULT_DEFER = False UNKNOWN_ENC = 'text/plain' LOG = logging.getLogger(__name__) @@ -90,6 +91,24 @@ schema = { # Create an empty file on the system write_files: - path: /root/CLOUD_INIT_WAS_HERE + """), + dedent("""\ + # Defer writing the file until after the package (Nginx) is + # installed and its user is created alongside + write_files: + - path: /etc/nginx/conf.d/example.com.conf + content: | + server { + server_name example.com; + listen 80; + root /var/www; + location / { + try_files $uri $uri/ $uri.html =404; + } + } + owner: 'nginx:nginx' + permissions: '0640' + defer: true """)], 'frequency': frequency, 'type': 'object', @@ -151,6 +170,15 @@ schema = { ``path`` exists. Default: **false**. """), }, + 'defer': { + 'type': 'boolean', + 'default': DEFAULT_DEFER, + 'description': dedent("""\ + Defer writing the file until 'final' stage, after + users were created, and packages were installed. + Default: **{defer}**. + """.format(defer=DEFAULT_DEFER)), + }, }, 'required': ['path'], 'additionalProperties': False @@ -163,13 +191,18 @@ __doc__ = get_schema_doc(schema) # Supplement python help() def handle(name, cfg, _cloud, log, _args): - files = cfg.get('write_files') - if not files: + validate_cloudconfig_schema(cfg, schema) + file_list = cfg.get('write_files', []) + filtered_files = [ + f for f in file_list if not util.get_cfg_option_bool(f, + 'defer', + DEFAULT_DEFER) + ] + if not filtered_files: log.debug(("Skipping module named %s," " no/empty 'write_files' key in configuration"), name) return - validate_cloudconfig_schema(cfg, schema) - write_files(name, files) + write_files(name, filtered_files) def canonicalize_extraction(encoding_type): diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py new file mode 100644 index 00000000..0c75aa22 --- /dev/null +++ b/cloudinit/config/cc_write_files_deferred.py @@ -0,0 +1,55 @@ +# Copyright (C) 2021 Canonical Ltd. +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Defer writing certain files""" + +from textwrap import dedent + +from cloudinit.config.schema import validate_cloudconfig_schema +from cloudinit import util +from cloudinit.config.cc_write_files import ( + schema as write_files_schema, write_files, DEFAULT_DEFER) + + +schema = util.mergemanydict([ + { + 'id': 'cc_write_files_deferred', + 'name': 'Write Deferred Files', + 'title': dedent("""\ + write certain files, whose creation as been deferred, during + final stage + """), + 'description': dedent("""\ + This module is based on `'Write Files' `__, and + will handle all files from the write_files list, that have been + marked as deferred and thus are not being processed by the + write-files module. + + *Please note that his module is not exposed to the user through + its own dedicated top-level directive.* + """) + }, + write_files_schema +]) + +# Not exposed, because related modules should document this behaviour +__doc__ = None + + +def handle(name, cfg, _cloud, log, _args): + validate_cloudconfig_schema(cfg, schema) + file_list = cfg.get('write_files', []) + filtered_files = [ + f for f in file_list if util.get_cfg_option_bool(f, + 'defer', + DEFAULT_DEFER) + ] + if not filtered_files: + log.debug(("Skipping module named %s," + " no deferred file defined in configuration"), name) + return + write_files(name, filtered_files) + + +# vi: ts=4 expandtab diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index de1d75e5..66c48fd5 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -151,6 +151,7 @@ cloud_final_modules: {% if variant in ["ubuntu", "unknown"] %} - ubuntu-drivers {% endif %} + - write-files-deferred - puppet - chef - mcollective diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py index 15832ae3..1d532fac 100644 --- a/tests/integration_tests/modules/test_write_files.py +++ b/tests/integration_tests/modules/test_write_files.py @@ -21,6 +21,9 @@ B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8")) # USER_DATA = """\ #cloud-config +users: +- default +- name: myuser write_files: - encoding: b64 content: {} @@ -41,6 +44,12 @@ write_files: H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= path: /root/file_gzip permissions: '0755' +- path: '/home/testuser/my-file' + content: | + echo 'hello world!' + defer: true + owner: 'myuser' + permissions: '0644' """.format(B64_CONTENT.decode("ascii")) @@ -64,3 +73,15 @@ class TestWriteFiles: def test_write_files(self, cmd, expected_out, class_client): out = class_client.execute(cmd) assert expected_out in out + + def test_write_files_deferred(self, class_client): + """Test that write files deferred works as expected. + + Users get created after write_files module runs, so ensure that + with `defer: true`, the file gets written with correct ownership. + """ + out = class_client.read_from_file("/home/testuser/my-file") + assert "echo 'hello world!'" == out + assert class_client.execute( + 'stat -c "%U %a" /home/testuser/my-file' + ) == 'myuser 644' diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py index 727681d3..0af92805 100644 --- a/tests/unittests/test_handler/test_handler_write_files.py +++ b/tests/unittests/test_handler/test_handler_write_files.py @@ -189,6 +189,19 @@ class TestWriteFiles(FilesystemMockingTestCase): len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum)) self.assertEqual(len(expected), flen_expected) + def test_deferred(self): + self.patchUtils(self.tmp) + file_path = '/tmp/deferred.file' + config = { + 'write_files': [ + {'path': file_path, 'defer': True} + ] + } + cc = self.tmp_cloud('ubuntu') + handle('cc_write_file', config, cc, LOG, []) + with self.assertRaises(FileNotFoundError): + util.load_file(file_path) + class TestDecodePerms(CiTestCase): diff --git a/tests/unittests/test_handler/test_handler_write_files_deferred.py b/tests/unittests/test_handler/test_handler_write_files_deferred.py new file mode 100644 index 00000000..57b6934a --- /dev/null +++ b/tests/unittests/test_handler/test_handler_write_files_deferred.py @@ -0,0 +1,77 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import tempfile +import shutil + +from cloudinit.config.cc_write_files_deferred import (handle) +from .test_handler_write_files import (VALID_SCHEMA) +from cloudinit import log as logging +from cloudinit import util + +from cloudinit.tests.helpers import ( + CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + +LOG = logging.getLogger(__name__) + + +@skipUnlessJsonSchema() +@mock.patch('cloudinit.config.cc_write_files_deferred.write_files') +class TestWriteFilesDeferredSchema(CiTestCase): + + with_logs = True + + def test_schema_validation_warns_invalid_value(self, + m_write_files_deferred): + """If 'defer' is defined, it must be of type 'bool'.""" + + valid_config = { + 'write_files': [ + {**VALID_SCHEMA.get('write_files')[0], 'defer': True} + ] + } + + invalid_config = { + 'write_files': [ + {**VALID_SCHEMA.get('write_files')[0], 'defer': str('no')} + ] + } + + cc = self.tmp_cloud('ubuntu') + handle('cc_write_files_deferred', valid_config, cc, LOG, []) + self.assertNotIn('Invalid config:', self.logs.getvalue()) + handle('cc_write_files_deferred', invalid_config, cc, LOG, []) + self.assertIn('Invalid config:', self.logs.getvalue()) + self.assertIn("defer: 'no' is not of type 'boolean'", + self.logs.getvalue()) + + +class TestWriteFilesDeferred(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestWriteFilesDeferred, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + def test_filtering_deferred_files(self): + self.patchUtils(self.tmp) + expected = "hello world\n" + config = { + 'write_files': [ + { + 'path': '/tmp/deferred.file', + 'defer': True, + 'content': expected + }, + {'path': '/tmp/not_deferred.file'} + ] + } + cc = self.tmp_cloud('ubuntu') + handle('cc_write_files_deferred', config, cc, LOG, []) + self.assertEqual(util.load_file('/tmp/deferred.file'), expected) + with self.assertRaises(FileNotFoundError): + util.load_file('/tmp/not_deferred.file') + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 6f37ceb7..59f58f7c 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -34,6 +34,7 @@ class GetSchemaTest(CiTestCase): 'cc_ubuntu_advantage', 'cc_ubuntu_drivers', 'cc_write_files', + 'cc_write_files_deferred', 'cc_zypper_add_repo', 'cc_chef' ], diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 99f7d99c..fac3fcec 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -42,6 +42,7 @@ jshen28 klausenbusk landon912 lucasmoura +lucendio lungj mal mamercad -- cgit v1.2.3 From 1d01da5d9916d97ef463ba61a36b3f98f8911419 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 27 Oct 2021 09:43:34 -0500 Subject: Add "install hotplug" module (SC-476) (#1069) This commit removes automatically installing udev rules for hotplug and adds a module to install them instead. Automatically including the udev rules and checking if hotplug was enabled consumed too many resources in certain circumstances. Moving the rules to a module ensures we don't spend extra extra cycles on hotplug if hotplug functionality isn't desired. LP: #1946003 --- cloudinit/cmd/devel/hotplug_hook.py | 5 +- cloudinit/config/cc_install_hotplug.py | 136 +++++++++++++++++++++ cloudinit/stages.py | 95 +++++++------- config/cloud.cfg.tmpl | 1 + doc/rtd/topics/modules.rst | 1 + tests/integration_tests/modules/test_hotplug.py | 13 +- tests/unittests/cmd/devel/test_hotplug_hook.py | 24 ++-- .../test_handler/test_handler_install_hotplug.py | 104 ++++++++++++++++ tests/unittests/test_handler/test_schema.py | 3 +- tools/hook-hotplug | 6 +- udev/10-cloud-init-hook-hotplug.rules | 6 - 11 files changed, 328 insertions(+), 66 deletions(-) create mode 100644 cloudinit/config/cc_install_hotplug.py create mode 100644 tests/unittests/test_handler/test_handler_install_hotplug.py delete mode 100644 udev/10-cloud-init-hook-hotplug.rules (limited to 'tests/integration_tests') diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index d4f0547e..f6f36a00 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -8,6 +8,7 @@ import time from cloudinit import log from cloudinit import reporting +from cloudinit import stages from cloudinit.event import EventScope, EventType from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data @@ -164,7 +165,9 @@ def is_enabled(hotplug_init, subsystem): subsystem) ) from e - return hotplug_init.update_event_enabled( + return stages.update_event_enabled( + datasource=hotplug_init.datasource, + cfg=hotplug_init.cfg, event_source_type=EventType.HOTPLUG, scope=scope ) diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py new file mode 100644 index 00000000..d6b2a2df --- /dev/null +++ b/cloudinit/config/cc_install_hotplug.py @@ -0,0 +1,136 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Install hotplug udev rules if supported and enabled""" +import os +from textwrap import dedent + +from cloudinit import util +from cloudinit import subp +from cloudinit import stages +from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.distros import ALL_DISTROS +from cloudinit.event import EventType, EventScope +from cloudinit.settings import PER_INSTANCE + + +frequency = PER_INSTANCE +distros = [ALL_DISTROS] + +schema = { + "id": "cc_install_hotplug", + "name": "Install Hotplug", + "title": "Install hotplug if supported and enabled", + "description": dedent("""\ + This module will install the udev rules to enable hotplug if + supported by the datasource and enabled in the userdata. The udev + rules will be installed as + ``/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules``. + + When hotplug is enabled, newly added network devices will be added + to the system by cloud-init. After udev detects the event, + cloud-init will referesh the instance metadata from the datasource, + detect the device in the updated metadata, then apply the updated + network configuration. + + Currently supported datasources: Openstack, EC2 + """), + "distros": distros, + "examples": [ + dedent("""\ + # Enable hotplug of network devices + updates: + network: + when: ["hotplug"] + """), + dedent("""\ + # Enable network hotplug alongside boot event + updates: + network: + when: ["boot", "hotplug"] + """), + ], + "frequency": frequency, + "type": "object", + "properties": { + "updates": { + "type": "object", + "additionalProperties": False, + "properties": { + "network": { + "type": "object", + "required": ["when"], + "additionalProperties": False, + "properties": { + "when": { + "type": "array", + "additionalProperties": False, + "items": { + "type": "string", + "additionalProperties": False, + "enum": [ + "boot-new-instance", + "boot-legacy", + "boot", + "hotplug", + ] + } + } + } + } + } + } + } +} + +__doc__ = get_schema_doc(schema) + + +HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" +HOTPLUG_UDEV_RULES = """\ +# Installed by cloud-init due to network hotplug userdata +ACTION!="add|remove", GOTO="cloudinit_end" +LABEL="cloudinit_hook" +SUBSYSTEM=="net", RUN+="/usr/lib/cloud-init/hook-hotplug" +LABEL="cloudinit_end" +""" + + +def handle(_name, cfg, cloud, log, _args): + validate_cloudconfig_schema(cfg, schema) + network_hotplug_enabled = ( + 'updates' in cfg and + 'network' in cfg['updates'] and + 'when' in cfg['updates']['network'] and + 'hotplug' in cfg['updates']['network']['when'] + ) + hotplug_supported = EventType.HOTPLUG in ( + cloud.datasource.get_supported_events( + [EventType.HOTPLUG]).get(EventScope.NETWORK, set()) + ) + hotplug_enabled = stages.update_event_enabled( + datasource=cloud.datasource, + cfg=cfg, + event_source_type=EventType.HOTPLUG, + scope=EventScope.NETWORK, + ) + if not (hotplug_supported and hotplug_enabled): + if os.path.exists(HOTPLUG_UDEV_PATH): + log.debug("Uninstalling hotplug, not enabled") + util.del_file(HOTPLUG_UDEV_PATH) + subp.subp(["udevadm", "control", "--reload-rules"]) + elif network_hotplug_enabled: + log.warning( + "Hotplug is unsupported by current datasource. " + "Udev rules will NOT be installed." + ) + else: + log.debug("Skipping hotplug install, not enabled") + return + if not subp.which("udevadm"): + log.debug("Skipping hotplug install, udevadm not found") + return + + util.write_file( + filename=HOTPLUG_UDEV_PATH, + content=HOTPLUG_UDEV_RULES, + ) + subp.subp(["udevadm", "control", "--reload-rules"]) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 80aa9f5e..731b2982 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -49,6 +49,54 @@ NULL_DATA_SOURCE = None NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID" +def update_event_enabled( + datasource: sources.DataSource, + cfg: dict, + event_source_type: EventType, + scope: EventScope = None +) -> bool: + """Determine if a particular EventType is enabled. + + For the `event_source_type` passed in, check whether this EventType + is enabled in the `updates` section of the userdata. If `updates` + is not enabled in userdata, check if defined as one of the + `default_events` on the datasource. `scope` may be used to + narrow the check to a particular `EventScope`. + + Note that on first boot, userdata may NOT be available yet. In this + case, we only have the data source's `default_update_events`, + so an event that should be enabled in userdata may be denied. + """ + default_events = datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501 + user_events = userdata_to_events(cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501 + # A value in the first will override a value in the second + allowed = util.mergemanydict([ + copy.deepcopy(user_events), + copy.deepcopy(default_events), + ]) + LOG.debug('Allowed events: %s', allowed) + + if not scope: + scopes = allowed.keys() + else: + scopes = [scope] + scope_values = [s.value for s in scopes] + + for evt_scope in scopes: + if event_source_type in allowed.get(evt_scope, []): + LOG.debug( + 'Event Allowed: scope=%s EventType=%s', + evt_scope.value, event_source_type + ) + return True + + LOG.debug( + 'Event Denied: scopes=%s EventType=%s', + scope_values, event_source_type + ) + return False + + class Init(object): def __init__(self, ds_deps=None, reporter=None): if ds_deps is not None: @@ -715,46 +763,6 @@ class Init(object): return (self.distro.generate_fallback_config(), NetworkConfigSource.fallback) - def update_event_enabled( - self, event_source_type: EventType, scope: EventScope = None - ) -> bool: - """Determine if a particular EventType is enabled. - - For the `event_source_type` passed in, check whether this EventType - is enabled in the `updates` section of the userdata. If `updates` - is not enabled in userdata, check if defined as one of the - `default_events` on the datasource. `scope` may be used to - narrow the check to a particular `EventScope`. - - Note that on first boot, userdata may NOT be available yet. In this - case, we only have the data source's `default_update_events`, - so an event that should be enabled in userdata may be denied. - """ - default_events = self.datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501 - user_events = userdata_to_events(self.cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501 - # A value in the first will override a value in the second - allowed = util.mergemanydict([ - copy.deepcopy(user_events), - copy.deepcopy(default_events), - ]) - LOG.debug('Allowed events: %s', allowed) - - if not scope: - scopes = allowed.keys() - else: - scopes = [scope] - scope_values = [s.value for s in scopes] - - for evt_scope in scopes: - if event_source_type in allowed.get(evt_scope, []): - LOG.debug('Event Allowed: scope=%s EventType=%s', - evt_scope.value, event_source_type) - return True - - LOG.debug('Event Denied: scopes=%s EventType=%s', - scope_values, event_source_type) - return False - def _apply_netcfg_names(self, netcfg): try: LOG.debug("applying net config names for %s", netcfg) @@ -784,8 +792,11 @@ class Init(object): return def event_enabled_and_metadata_updated(event_type): - return self.update_event_enabled( - event_type, scope=EventScope.NETWORK + return update_event_enabled( + datasource=self.datasource, + cfg=self.cfg, + event_source_type=event_type, + scope=EventScope.NETWORK ) and self.datasource.update_metadata_if_supported([event_type]) def should_run_on_boot_event(): diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 66c48fd5..b66bbe60 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -166,6 +166,7 @@ cloud_final_modules: - scripts-user - ssh-authkey-fingerprints - keys-to-console + - install-hotplug - phone-home - final-message - power-state-change diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index e30fe0fe..3ca6b9e3 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -22,6 +22,7 @@ Modules .. automodule:: cloudinit.config.cc_foo .. automodule:: cloudinit.config.cc_growpart .. automodule:: cloudinit.config.cc_grub_dpkg +.. automodule:: cloudinit.config.cc_install_hotplug .. automodule:: cloudinit.config.cc_keys_to_console .. automodule:: cloudinit.config.cc_landscape .. automodule:: cloudinit.config.cc_locale diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index 88cd8c16..f5abc86f 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -49,10 +49,13 @@ def test_hotplug_add_remove(client: IntegrationInstance): ips_before = _get_ip_addr(client) log = client.read_from_file('/var/log/cloud-init.log') assert 'Exiting hotplug handler' not in log + assert client.execute( + 'test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules' + ).ok # Add new NIC added_ip = client.instance.add_network_interface() - _wait_till_hotplug_complete(client, expected_runs=2) + _wait_till_hotplug_complete(client, expected_runs=1) ips_after_add = _get_ip_addr(client) new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0] @@ -67,7 +70,7 @@ def test_hotplug_add_remove(client: IntegrationInstance): # Remove new NIC client.instance.remove_network_interface(added_ip) - _wait_till_hotplug_complete(client, expected_runs=4) + _wait_till_hotplug_complete(client, expected_runs=2) ips_after_remove = _get_ip_addr(client) assert len(ips_after_remove) == len(ips_before) assert added_ip not in [ip.ip4 for ip in ips_after_remove] @@ -86,12 +89,14 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance): ips_before = _get_ip_addr(client) log = client.read_from_file('/var/log/cloud-init.log') assert 'Exiting hotplug handler' not in log + assert client.execute( + 'test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules' + ).failed # Add new NIC client.instance.add_network_interface() - _wait_till_hotplug_complete(client) log = client.read_from_file('/var/log/cloud-init.log') - assert "Event Denied: scopes=['network'] EventType=hotplug" in log + assert 'hotplug-hook' not in log ips_after_add = _get_ip_addr(client) if len(ips_after_add) == len(ips_before) + 1: diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py index 63d2490e..e1c64e2f 100644 --- a/tests/unittests/cmd/devel/test_hotplug_hook.py +++ b/tests/unittests/cmd/devel/test_hotplug_hook.py @@ -30,6 +30,11 @@ def mocks(): return_value=FAKE_MAC ) + update_event_enabled = mock.patch( + 'cloudinit.stages.update_event_enabled', + return_value=True, + ) + m_network_state = mock.MagicMock(spec=NetworkState) parse_net = mock.patch( 'cloudinit.cmd.devel.hotplug_hook.parse_net_config_data', @@ -45,6 +50,7 @@ def mocks(): sleep = mock.patch('time.sleep') read_sys_net.start() + update_event_enabled.start() parse_net.start() select_activator.start() m_sleep = sleep.start() @@ -57,6 +63,7 @@ def mocks(): ) read_sys_net.stop() + update_event_enabled.stop() parse_net.stop() select_activator.stop() sleep.stop() @@ -122,13 +129,16 @@ class TestHotplug: def test_update_event_disabled(self, mocks, caplog): init = mocks.m_init - init.update_event_enabled.return_value = False - handle_hotplug( - hotplug_init=init, - devpath='/dev/fake', - udevaction='remove', - subsystem='net' - ) + with mock.patch( + 'cloudinit.stages.update_event_enabled', + return_value=False + ): + handle_hotplug( + hotplug_init=init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) assert 'hotplug not enabled for event of type' in caplog.text init.datasource.update_metadata_if_supported.assert_not_called() mocks.m_activator.bring_up_interface.assert_not_called() diff --git a/tests/unittests/test_handler/test_handler_install_hotplug.py b/tests/unittests/test_handler/test_handler_install_hotplug.py new file mode 100644 index 00000000..19b0cc41 --- /dev/null +++ b/tests/unittests/test_handler/test_handler_install_hotplug.py @@ -0,0 +1,104 @@ +# This file is part of cloud-init. See LICENSE file for license information. +from collections import namedtuple +from unittest import mock + +import pytest + +from cloudinit.config.cc_install_hotplug import ( + handle, + HOTPLUG_UDEV_PATH, + HOTPLUG_UDEV_RULES, +) +from cloudinit.event import EventScope, EventType + + +@pytest.yield_fixture() +def mocks(): + m_update_enabled = mock.patch('cloudinit.stages.update_event_enabled') + m_write = mock.patch('cloudinit.util.write_file', autospec=True) + m_del = mock.patch('cloudinit.util.del_file', autospec=True) + m_subp = mock.patch('cloudinit.subp.subp') + m_which = mock.patch('cloudinit.subp.which', return_value=None) + m_path_exists = mock.patch('os.path.exists', return_value=False) + + yield namedtuple( + 'Mocks', + 'm_update_enabled m_write m_del m_subp m_which m_path_exists' + )( + m_update_enabled.start(), m_write.start(), m_del.start(), + m_subp.start(), m_which.start(), m_path_exists.start() + ) + + m_update_enabled.stop() + m_write.stop() + m_del.stop() + m_subp.stop() + m_which.stop() + m_path_exists.stop() + + +class TestInstallHotplug: + def test_rules_installed_when_supported_and_enabled(self, mocks): + mocks.m_which.return_value = 'udevadm' + mocks.m_update_enabled.return_value = True + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + + handle(None, {}, m_cloud, mock.Mock(), None) + mocks.m_write.assert_called_once_with( + filename=HOTPLUG_UDEV_PATH, + content=HOTPLUG_UDEV_RULES, + ) + assert mocks.m_subp.call_args_list == [mock.call([ + 'udevadm', 'control', '--reload-rules', + ])] + assert mocks.m_del.call_args_list == [] + + def test_rules_not_installed_when_unsupported(self, mocks): + mocks.m_update_enabled.return_value = True + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = {} + + handle(None, {}, m_cloud, mock.Mock(), None) + assert mocks.m_write.call_args_list == [] + assert mocks.m_del.call_args_list == [] + assert mocks.m_subp.call_args_list == [] + + def test_rules_not_installed_when_disabled(self, mocks): + mocks.m_update_enabled.return_value = False + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + + handle(None, {}, m_cloud, mock.Mock(), None) + assert mocks.m_write.call_args_list == [] + assert mocks.m_del.call_args_list == [] + assert mocks.m_subp.call_args_list == [] + + def test_rules_uninstalled_when_disabled(self, mocks): + mocks.m_path_exists.return_value = True + mocks.m_update_enabled.return_value = False + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = {} + + handle(None, {}, m_cloud, mock.Mock(), None) + mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH) + assert mocks.m_subp.call_args_list == [mock.call([ + 'udevadm', 'control', '--reload-rules', + ])] + assert mocks.m_write.call_args_list == [] + + def test_rules_not_installed_when_no_udevadm(self, mocks): + mocks.m_update_enabled.return_value = True + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + + handle(None, {}, m_cloud, mock.Mock(), None) + assert mocks.m_del.call_args_list == [] + assert mocks.m_write.call_args_list == [] + assert mocks.m_subp.call_args_list == [] diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 59f58f7c..1dae223d 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -36,7 +36,8 @@ class GetSchemaTest(CiTestCase): 'cc_write_files', 'cc_write_files_deferred', 'cc_zypper_add_repo', - 'cc_chef' + 'cc_chef', + 'cc_install_hotplug', ], [subschema['id'] for subschema in schema['allOf']]) self.assertEqual('cloud-config-schema', schema['id']) diff --git a/tools/hook-hotplug b/tools/hook-hotplug index ced268b3..35bd3da2 100755 --- a/tools/hook-hotplug +++ b/tools/hook-hotplug @@ -8,11 +8,7 @@ is_finished() { [ -e /run/cloud-init/result.json ] } -hotplug_enabled() { - [ "$(cloud-init devel hotplug-hook -s "${SUBSYSTEM}" query)" == "enabled" ] -} - -if is_finished && hotplug_enabled; then +if is_finished; then # open cloud-init's hotplug-hook fifo rw exec 3<>/run/cloud-init/hook-hotplug-cmd env_params=( diff --git a/udev/10-cloud-init-hook-hotplug.rules b/udev/10-cloud-init-hook-hotplug.rules deleted file mode 100644 index 2e382679..00000000 --- a/udev/10-cloud-init-hook-hotplug.rules +++ /dev/null @@ -1,6 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -# Handle device adds only -ACTION!="add|remove", GOTO="cloudinit_end" -LABEL="cloudinit_hook" -SUBSYSTEM=="net|block", RUN+="/usr/lib/cloud-init/hook-hotplug" -LABEL="cloudinit_end" -- cgit v1.2.3 From 28581988da4b37e3d2423075c64dc1f3bc5da5cc Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 29 Oct 2021 13:33:33 -0600 Subject: Remove (deprecated) apt-key (#1068) Also, add the "signed by" option to source definitions. This enables users to limit the scope of trust for individual keys. LP: #1836336 --- cloudinit/config/cc_apt_configure.py | 135 ++++++++++++++++++-- cloudinit/gpg.py | 30 +++++ doc/examples/cloud-config-apt.txt | 24 +++- tests/integration_tests/modules/test_apt.py | 62 ++++++++-- tests/unittests/test_gpg.py | 81 ++++++++++++ .../unittests/test_handler/test_handler_apt_key.py | 137 +++++++++++++++++++++ .../test_handler/test_handler_apt_source_v1.py | 75 +++++++---- .../test_handler/test_handler_apt_source_v3.py | 85 ++++++++----- 8 files changed, 548 insertions(+), 81 deletions(-) create mode 100644 tests/unittests/test_gpg.py create mode 100644 tests/unittests/test_handler/test_handler_apt_key.py (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 0c9c7925..c3c48bbd 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -11,6 +11,7 @@ import glob import os import re +import pathlib from textwrap import dedent from cloudinit.config.schema import ( @@ -27,6 +28,10 @@ LOG = logging.getLogger(__name__) # this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') ADD_APT_REPO_MATCH = r"^[\w-]+:\w" +APT_LOCAL_KEYS = '/etc/apt/trusted.gpg' +APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/' +CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/' + frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { @@ -139,7 +144,7 @@ schema = { source1: keyid: 'keyid' keyserver: 'keyserverurl' - source: 'deb http:/// xenial main' + source: 'deb [signed-by=$KEY_FILE] http:/// xenial main' source2: source: 'ppa:' source3: @@ -312,7 +317,8 @@ schema = { - ``$MIRROR`` - ``$RELEASE`` - ``$PRIMARY`` - - ``$SECURITY``""") + - ``$SECURITY`` + - ``$KEY_FILE``""") }, 'conf': { 'type': 'string', @@ -381,7 +387,8 @@ schema = { - ``$MIRROR`` - ``$PRIMARY`` - ``$SECURITY`` - - ``$RELEASE``""") + - ``$RELEASE`` + - ``$KEY_FILE``""") } } } @@ -683,7 +690,7 @@ def add_mirror_keys(cfg, target): """Adds any keys included in the primary/security mirror clauses""" for key in ('primary', 'security'): for mirror in cfg.get(key, []): - add_apt_key(mirror, target) + add_apt_key(mirror, target, file_name=key) def generate_sources_list(cfg, release, mirrors, cloud): @@ -714,20 +721,21 @@ def generate_sources_list(cfg, release, mirrors, cloud): util.write_file(aptsrc, disabled, mode=0o644) -def add_apt_key_raw(key, target=None): +def add_apt_key_raw(key, file_name, hardened=False, target=None): """ actual adding of a key as defined in key argument to the system """ LOG.debug("Adding key:\n'%s'", key) try: - subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target) + name = pathlib.Path(file_name).stem + return apt_key('add', output_file=name, data=key, hardened=hardened) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise -def add_apt_key(ent, target=None): +def add_apt_key(ent, target=None, hardened=False, file_name=None): """ Add key to the system as defined in ent (if any). Supports raw keys or keyid's @@ -741,7 +749,10 @@ def add_apt_key(ent, target=None): ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver) if 'key' in ent: - add_apt_key_raw(ent['key'], target) + return add_apt_key_raw( + ent['key'], + file_name or ent['filename'], + hardened=hardened) def update_packages(cloud): @@ -751,9 +762,28 @@ def update_packages(cloud): def add_apt_sources(srcdict, cloud, target=None, template_params=None, aa_repo_match=None): """ - add entries in /etc/apt/sources.list.d for each abbreviated - sources.list entry in 'srcdict'. When rendering template, also - include the values in dictionary searchList + install keys and repo source .list files defined in 'sources' + + for each 'source' entry in the config: + 1. expand template variables and write source .list file in + /etc/apt/sources.list.d/ + 2. install defined keys + 3. update packages via distro-specific method (i.e. apt-key update) + + + @param srcdict: a dict containing elements required + @param cloud: cloud instance object + + Example srcdict value: + { + 'rio-grande-repo': { + 'source': 'deb [signed-by=$KEY_FILE] $MIRROR $RELEASE main', + 'keyid': 'B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77', + 'keyserver': 'pgp.mit.edu' + } + } + + Note: Deb822 format is not supported """ if template_params is None: template_params = {} @@ -770,7 +800,11 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, if 'filename' not in ent: ent['filename'] = filename - add_apt_key(ent, target) + if 'source' in ent and '$KEY_FILE' in ent['source']: + key_file = add_apt_key(ent, target, hardened=True) + template_params['KEY_FILE'] = key_file + else: + key_file = add_apt_key(ent, target) if 'source' not in ent: continue @@ -1006,7 +1040,7 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): # select the specification matching the target arch default = None for mirror_cfg_elem in mirror_cfg_list: - arches = mirror_cfg_elem.get("arches") + arches = mirror_cfg_elem.get("arches", []) if arch in arches: return mirror_cfg_elem if "default" in arches: @@ -1089,6 +1123,81 @@ def apply_apt_config(cfg, proxy_fname, config_fname): LOG.debug("no apt config configured, removed %s", config_fname) +def apt_key(command, output_file=None, data=None, hardened=False, + human_output=True): + """apt-key replacement + + commands implemented: 'add', 'list', 'finger' + + @param output_file: name of output gpg file (without .gpg or .asc) + @param data: key contents + @param human_output: list keys formatted for human parsing + @param hardened: write keys to to /etc/apt/cloud-init.gpg.d/ (referred to + with [signed-by] in sources file) + """ + + def _get_key_files(): + """return all apt keys + + /etc/apt/trusted.gpg (if it exists) and all keyfiles (and symlinks to + keyfiles) in /etc/apt/trusted.gpg.d/ are returned + + based on apt-key implementation + """ + key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else [] + + for file in os.listdir(APT_TRUSTED_GPG_DIR): + if file.endswith('.gpg') or file.endswith('.asc'): + key_files.append(APT_TRUSTED_GPG_DIR + file) + return key_files if key_files else '' + + def apt_key_add(): + """apt-key add + + returns filepath to new keyring, or '/dev/null' when an error occurs + """ + file_name = '/dev/null' + if not output_file: + util.logexc( + LOG, 'Unknown filename, failed to add key: "{}"'.format(data)) + else: + try: + key_dir = \ + CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR + stdout = gpg.dearmor(data) + file_name = '{}{}.gpg'.format(key_dir, output_file) + util.write_file(file_name, stdout) + except subp.ProcessExecutionError: + util.logexc(LOG, 'Gpg error, failed to add key: {}'.format( + data)) + except UnicodeDecodeError: + util.logexc(LOG, 'Decode error, failed to add key: {}'.format( + data)) + return file_name + + def apt_key_list(): + """apt-key list + + returns string of all trusted keys (in /etc/apt/trusted.gpg and + /etc/apt/trusted.gpg.d/) + """ + key_list = [] + for key_file in _get_key_files(): + try: + key_list.append(gpg.list(key_file, human_output=human_output)) + except subp.ProcessExecutionError as error: + LOG.warning('Failed to list key "%s": %s', key_file, error) + return '\n'.join(key_list) + + if command == 'add': + return apt_key_add() + elif command == 'finger' or command == 'list': + return apt_key_list() + else: + raise ValueError( + 'apt_key() commands add, list, and finger are currently supported') + + CONFIG_CLEANERS = { 'cloud-init': clean_cloud_init, } diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index 3780326c..07d682d2 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -14,6 +14,9 @@ import time LOG = logging.getLogger(__name__) +GPG_LIST = ['gpg', '--with-fingerprint', '--no-default-keyring', '--list-keys', + '--keyring'] + def export_armour(key): """Export gpg key, armoured key gets returned""" @@ -27,6 +30,33 @@ def export_armour(key): return armour +def dearmor(key): + """Dearmor gpg key, dearmored key gets returned + + note: man gpg(1) makes no mention of an --armour spelling, only --armor + """ + return subp.subp(["gpg", "--dearmor"], data=key, decode=False)[0] + + +def list(key_file, human_output=False): + """List keys from a keyring with fingerprints. Default to a stable machine + parseable format. + + @param key_file: a string containing a filepath to a key + @param human_output: return output intended for human parsing + """ + cmd = [] + cmd.extend(GPG_LIST) + if not human_output: + cmd.append('--with-colons') + + cmd.append(key_file) + (stdout, stderr) = subp.subp(cmd, capture=True) + if stderr: + LOG.warning('Failed to export armoured key "%s": %s', key_file, stderr) + return stdout + + def recv_key(key, keyserver, retries=(1, 1)): """Receive gpg key from the specified keyserver. diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt index f4392326..7baa141c 100644 --- a/doc/examples/cloud-config-apt.txt +++ b/doc/examples/cloud-config-apt.txt @@ -149,6 +149,7 @@ apt: # security is optional, if not defined it is set to the same value as primary security: - uri: http://security.ubuntu.com/ubuntu + - arches: [default] # If search_dns is set for security the searched pattern is: # -security-mirror @@ -212,14 +213,14 @@ apt: # # The key of each source entry is the filename and will be prepended by # /etc/apt/sources.list.d/ if it doesn't start with a '/'. - # If it doesn't end with .list it will be appended so that apt picks up it's + # If it doesn't end with .list it will be appended so that apt picks up its # configuration. # # Whenever there is no content to be written into such a file, the key is # not used as filename - yet it can still be used as index for merging # configuration. # - # The values inside the entries consost of the following optional entries: + # The values inside the entries consist of the following optional entries: # 'source': a sources.list entry (some variable replacements apply) # 'keyid': providing a key to import via shortid or fingerprint # 'key': providing a raw PGP key @@ -276,13 +277,14 @@ apt: my-repo2.list: # 2.4 replacement variables # - # sources can use $MIRROR, $PRIMARY, $SECURITY and $RELEASE replacement - # variables. + # sources can use $MIRROR, $PRIMARY, $SECURITY, $RELEASE and $KEY_FILE + # replacement variables. # They will be replaced with the default or specified mirrors and the # running release. # The entry below would be possibly turned into: # source: deb http://archive.ubuntu.com/ubuntu xenial multiverse - source: deb $MIRROR $RELEASE multiverse + source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse + keyid: F430BBA5 my-repo3.list: # this would have the same end effect as 'ppa:curtin-dev/test-archive' @@ -310,9 +312,19 @@ apt: keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77 keyserver: pgp.mit.edu + ignored5: + # 2.8 signed-by + # + # One can specify [signed-by=$KEY_FILE] in the source definition, which + # will make the key be installed in the directory /etc/cloud-init.gpg.d/ + # and the $KEY_FILE replacement variable will be replaced with the path + # to the specified key. If $KEY_FILE is used, but no key is specified, + # apt update will (rightfully) fail due to an invalid value. + source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse + keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77 my-repo4.list: - # 2.8 raw key + # 2.9 raw key # # The apt signing key can also be specified by providing a pgp public key # block. Providing the PGP key this way is the most robust method for diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py index 54711fc0..2c388047 100644 --- a/tests/integration_tests/modules/test_apt.py +++ b/tests/integration_tests/modules/test_apt.py @@ -1,9 +1,11 @@ """Series of integration tests covering apt functionality.""" import re -from tests.integration_tests.clouds import ImageSpecification import pytest +from cloudinit.config import cc_apt_configure +from cloudinit import gpg +from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance @@ -43,6 +45,13 @@ apt: keyid: 441614D8 keyserver: keyserver.ubuntu.com source: "ppa:simplestreams-dev/trunk" + test_signed_by: + keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11 + keyserver: keyserver.ubuntu.com + source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/stable/ubuntu $RELEASE main" + test_bad_key: + key: "" + source: "deb $MIRROR $RELEASE main" test_key: source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main" key: | @@ -91,12 +100,27 @@ TEST_KEYSERVER_KEY = "7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937" TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8" TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF" +TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11" @pytest.mark.ci @pytest.mark.ubuntu @pytest.mark.user_data(USER_DATA) class TestApt: + def get_keys(self, class_client: IntegrationInstance): + """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg + in human readable format. Mimics the output of apt-key finger + """ + list_cmd = ' '.join(gpg.GPG_LIST) + ' ' + keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS) + print(keys) + files = class_client.execute( + 'ls ' + cc_apt_configure.APT_TRUSTED_GPG_DIR) + for file in files.split(): + path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file + keys += class_client.execute(list_cmd + path) or '' + return keys + def test_sources_list(self, class_client: IntegrationInstance): """Integration test for the apt module's `sources_list` functionality. @@ -152,8 +176,33 @@ class TestApt: 'http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu' ) in ppa_path_contents - keys = class_client.execute('apt-key finger') - assert TEST_PPA_KEY in keys + assert TEST_PPA_KEY in self.get_keys(class_client) + + def test_signed_by(self, class_client: IntegrationInstance): + """Test the apt signed-by functionality. + """ + release = ImageSpecification.from_os_image().release + source = ( + "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_signed_by.gpg] " + "http://ppa.launchpad.net/juju/stable/ubuntu" + " {} main".format(release)) + print(class_client.execute('cat /var/log/cloud-init.log')) + path_contents = class_client.read_from_file( + '/etc/apt/sources.list.d/test_signed_by.list') + assert path_contents == source + + key = class_client.execute( + 'gpg --no-default-keyring --with-fingerprint --list-keys ' + '--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg') + + assert TEST_SIGNED_BY_KEY in key + + def test_bad_key(self, class_client: IntegrationInstance): + """Test the apt signed-by functionality. + """ + with pytest.raises(OSError): + class_client.read_from_file( + '/etc/apt/trusted.list.d/test_bad_key.gpg') def test_key(self, class_client: IntegrationInstance): """Test the apt key functionality. @@ -168,9 +217,7 @@ class TestApt: assert ( 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu' ) in test_archive_contents - - keys = class_client.execute('apt-key finger') - assert TEST_KEY in keys + assert TEST_KEY in self.get_keys(class_client) def test_keyserver(self, class_client: IntegrationInstance): """Test the apt keyserver functionality. @@ -186,8 +233,7 @@ class TestApt: 'http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu' ) in test_keyserver_contents - keys = class_client.execute('apt-key finger') - assert TEST_KEYSERVER_KEY in keys + assert TEST_KEYSERVER_KEY in self.get_keys(class_client) def test_os_pipelining(self, class_client: IntegrationInstance): """Test 'os' settings does not write apt config file. diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py new file mode 100644 index 00000000..451ffa91 --- /dev/null +++ b/tests/unittests/test_gpg.py @@ -0,0 +1,81 @@ +import pytest +from unittest import mock + +from cloudinit import gpg +from cloudinit import subp + +TEST_KEY_HUMAN = ''' +/etc/apt/cloud-init.gpg.d/my_key.gpg +-------------------------------------------- +pub rsa4096 2021-10-22 [SC] + 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85 +uid [ unknown] Brett Holman +sub rsa4096 2021-10-22 [A] +sub rsa4096 2021-10-22 [E] +''' + +TEST_KEY_MACHINE = ''' +tru::1:1635129362:0:3:1:5 +pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: +fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: +uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \ +::::::::::0: +sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: +fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: +sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: +fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: +''' + +TEST_KEY_FINGERPRINT_HUMAN = \ + '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' + +TEST_KEY_FINGERPRINT_MACHINE = \ + '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' + + +class TestGPGCommands: + def test_dearmor_bad_value(self): + """This exception is handled by the callee. Ensure it is not caught + internally. + """ + with mock.patch.object( + subp, + 'subp', + side_effect=subp.ProcessExecutionError): + with pytest.raises(subp.ProcessExecutionError): + gpg.dearmor('garbage key value') + + def test_gpg_list_args(self): + """Verify correct command gets called to list keys + """ + no_colons = [ + 'gpg', + '--with-fingerprint', + '--no-default-keyring', + '--list-keys', + '--keyring', + 'key'] + colons = [ + 'gpg', + '--with-fingerprint', + '--no-default-keyring', + '--list-keys', + '--keyring', + '--with-colons', + 'key'] + with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp: + gpg.list('key') + assert mock.call(colons, capture=True) == m_subp.call_args + + gpg.list('key', human_output=True) + test_calls = mock.call((no_colons), capture=True) + assert test_calls == m_subp.call_args + + def test_gpg_dearmor_args(self): + """Verify correct command gets called to dearmor keys + """ + with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp: + gpg.dearmor('key') + test_call = mock.call( + ["gpg", "--dearmor"], data='key', decode=False) + assert test_call == m_subp.call_args diff --git a/tests/unittests/test_handler/test_handler_apt_key.py b/tests/unittests/test_handler/test_handler_apt_key.py new file mode 100644 index 00000000..00e5a38d --- /dev/null +++ b/tests/unittests/test_handler/test_handler_apt_key.py @@ -0,0 +1,137 @@ +import os +from unittest import mock + +from cloudinit.config import cc_apt_configure +from cloudinit import subp +from cloudinit import util + +TEST_KEY_HUMAN = ''' +/etc/apt/cloud-init.gpg.d/my_key.gpg +-------------------------------------------- +pub rsa4096 2021-10-22 [SC] + 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85 +uid [ unknown] Brett Holman +sub rsa4096 2021-10-22 [A] +sub rsa4096 2021-10-22 [E] +''' + +TEST_KEY_MACHINE = ''' +tru::1:1635129362:0:3:1:5 +pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: +fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: +uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \ +::::::::::0: +sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: +fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: +sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: +fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: +''' + +TEST_KEY_FINGERPRINT_HUMAN = \ + '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' + +TEST_KEY_FINGERPRINT_MACHINE = \ + '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' + + +class TestAptKey: + """TestAptKey + Class to test apt-key commands + """ + @mock.patch.object(subp, 'subp', return_value=('fakekey', '')) + @mock.patch.object(util, 'write_file') + def _apt_key_add_success_helper(self, directory, *args, hardened=False): + file = cc_apt_configure.apt_key( + 'add', + output_file='my-key', + data='fakekey', + hardened=hardened) + assert file == directory + '/my-key.gpg' + + def test_apt_key_add_success(self): + """Verify the correct directory path gets returned for unhardened case + """ + self._apt_key_add_success_helper('/etc/apt/trusted.gpg.d') + + def test_apt_key_add_success_hardened(self): + """Verify the correct directory path gets returned for hardened case + """ + self._apt_key_add_success_helper( + '/etc/apt/cloud-init.gpg.d', + hardened=True) + + def test_apt_key_add_fail_no_file_name(self): + """Verify that null filename gets handled correctly + """ + file = cc_apt_configure.apt_key( + 'add', + output_file=None, + data='') + assert '/dev/null' == file + + def _apt_key_fail_helper(self): + file = cc_apt_configure.apt_key( + 'add', + output_file='my-key', + data='fakekey') + assert file == '/dev/null' + + @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) + def test_apt_key_add_fail_no_file_name_subproc(self, *args): + """Verify that bad key value gets handled correctly + """ + self._apt_key_fail_helper() + + @mock.patch.object( + subp, 'subp', side_effect=UnicodeDecodeError('test', b'', 1, 1, '')) + def test_apt_key_add_fail_no_file_name_unicode(self, *args): + """Verify that bad key encoding gets handled correctly + """ + self._apt_key_fail_helper() + + def _apt_key_list_success_helper(self, finger, key, human_output=True): + @mock.patch.object(os, 'listdir', return_value=('/fake/dir/key.gpg',)) + @mock.patch.object(subp, 'subp', return_value=(key, '')) + def mocked_list(*a): + + keys = cc_apt_configure.apt_key('list', human_output) + assert finger in keys + mocked_list() + + def test_apt_key_list_success_human(self): + """Verify expected key output, human + """ + self._apt_key_list_success_helper( + TEST_KEY_FINGERPRINT_HUMAN, + TEST_KEY_HUMAN) + + def test_apt_key_list_success_machine(self): + """Verify expected key output, machine + """ + self._apt_key_list_success_helper( + TEST_KEY_FINGERPRINT_MACHINE, + TEST_KEY_MACHINE, human_output=False) + + @mock.patch.object(os, 'listdir', return_value=()) + @mock.patch.object(subp, 'subp', return_value=('', '')) + def test_apt_key_list_fail_no_keys(self, *args): + """Ensure falsy output for no keys + """ + keys = cc_apt_configure.apt_key('list') + assert not keys + + @mock.patch.object(os, 'listdir', return_value=('file_not_gpg_key.txt')) + @mock.patch.object(subp, 'subp', return_value=('', '')) + def test_apt_key_list_fail_no_keys_file(self, *args): + """Ensure non-gpg file is not returned. + + apt-key used file extensions for this, so we do too + """ + assert not cc_apt_configure.apt_key('list') + + @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) + @mock.patch.object(os, 'listdir', return_value=('bad_gpg_key.gpg')) + def test_apt_key_list_fail_bad_key_file(self, *args): + """Ensure bad gpg key doesn't throw exeption. + """ + assert not cc_apt_configure.apt_key('list') diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py index 367971cb..2357d699 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py @@ -9,6 +9,7 @@ import os import re import shutil import tempfile +import pathlib from unittest import mock from unittest.mock import call @@ -279,16 +280,16 @@ class TestAptSourceConfig(TestCase): """ cfg = self.wrapv1conf(cfg) - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1234', '')) as mockobj: + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - # check if it added the right ammount of keys + # check if it added the right number of keys calls = [] - for _ in range(keynum): - calls.append(call(['apt-key', 'add', '-'], - data=b'fakekey 1234', - target=None)) + sources = cfg['apt']['sources'] + for src in sources: + print(sources[src]) + calls.append(call(sources[src], None)) + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(filename)) @@ -364,11 +365,17 @@ class TestAptSourceConfig(TestCase): """ cfg = self.wrapv1conf([cfg]) - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - mockobj.assert_called_with(['apt-key', 'add', '-'], - data=b'fakekey 4321', target=None) + # check if it added the right amount of keys + sources = cfg['apt']['sources'] + calls = [] + for src in sources: + print(sources[src]) + calls.append(call(sources[src], None)) + + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(filename)) @@ -405,12 +412,15 @@ class TestAptSourceConfig(TestCase): cfg = {'key': "fakekey 4242", 'filename': self.aptlistfile} cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - mockobj.assert_called_once_with(['apt-key', 'add', '-'], - data=b'fakekey 4242', target=None) + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4242', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -422,16 +432,26 @@ class TestAptSourceConfig(TestCase): cfg = self.wrapv1conf([cfg]) with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')) as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - mockobj.assert_called_with(['apt-key', 'add', '-'], - data=b'fakekey 1212', target=None) + return_value=('fakekey 1212', '')): + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + cc_apt_configure.handle( + "test", + cfg, + self.fakecloud, + None, + None) + + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 1212', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) - def apt_src_keyid_real(self, cfg, expectedkey): + def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the @@ -446,9 +466,14 @@ class TestAptSourceConfig(TestCase): return_value=expectedkey) as mockgetkey: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - + if is_hardened is not None: + mockkey.assert_called_with( + expectedkey, + self.aptlistfile, + hardened=is_hardened) + else: + mockkey.assert_called_with(expectedkey, self.aptlistfile) mockgetkey.assert_called_with(key, keyserver) - mockkey.assert_called_with(expectedkey, None) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -459,7 +484,7 @@ class TestAptSourceConfig(TestCase): cfg = {'keyid': keyid, 'filename': self.aptlistfile} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_longkeyid_real(self): """test_apt_src_longkeyid_real - Test long keyid including key add""" @@ -467,7 +492,7 @@ class TestAptSourceConfig(TestCase): cfg = {'keyid': keyid, 'filename': self.aptlistfile} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_longkeyid_ks_real(self): """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" @@ -476,7 +501,7 @@ class TestAptSourceConfig(TestCase): 'keyserver': 'keys.gnupg.net', 'filename': self.aptlistfile} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_ppa(self): """Test adding a ppa""" diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index d4db610f..20289121 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -10,6 +10,7 @@ import re import shutil import socket import tempfile +import pathlib from unittest import TestCase, mock from unittest.mock import call @@ -214,22 +215,24 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} self._apt_src_replace_tri(cfg) - def _apt_src_keyid(self, filename, cfg, keynum): + def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None): """_apt_src_keyid Test specification of a source + keyid """ params = self._get_default_params() - with mock.patch("cloudinit.subp.subp", - return_value=('fakekey 1234', '')) as mockobj: + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: self._add_apt_sources(cfg, TARGET, template_params=params, aa_repo_match=self.matcher) - # check if it added the right ammount of keys + # check if it added the right number of keys calls = [] - for _ in range(keynum): - calls.append(call(['apt-key', 'add', '-'], data=b'fakekey 1234', - target=TARGET)) + for key in cfg: + if is_hardened is not None: + calls.append(call(cfg[key], hardened=is_hardened)) + else: + calls.append(call(cfg[key], TARGET)) + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(filename)) @@ -248,6 +251,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'http://ppa.launchpad.net/' 'smoser/cloud-init-test/ubuntu' ' xenial main'), + 'filename': self.aptlistfile, 'keyid': "03683F77"}} self._apt_src_keyid(self.aptlistfile, cfg, 1) @@ -268,6 +272,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'http://ppa.launchpad.net/' 'smoser/cloud-init-test/ubuntu' ' xenial multiverse'), + 'filename': self.aptlistfile3, 'keyid': "03683F77"}} self._apt_src_keyid(self.aptlistfile, cfg, 3) @@ -293,15 +298,19 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'http://ppa.launchpad.net/' 'smoser/cloud-init-test/ubuntu' ' xenial main'), + 'filename': self.aptlistfile, 'key': "fakekey 4321"}} - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: self._add_apt_sources(cfg, TARGET, template_params=params, aa_repo_match=self.matcher) - mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4321', - target=TARGET) - + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4321', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(self.aptlistfile)) contents = util.load_file(self.aptlistfile) @@ -317,12 +326,16 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): params = self._get_default_params() cfg = {self.aptlistfile: {'key': "fakekey 4242"}} - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: self._add_apt_sources(cfg, TARGET, template_params=params, aa_repo_match=self.matcher) - mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4242', - target=TARGET) + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4242', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -331,19 +344,23 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """test_apt_v3_src_keyidonly - Test keyid without source""" params = self._get_default_params() cfg = {self.aptlistfile: {'keyid': "03683F77"}} - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')) as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) + return_value=('fakekey 1212', '')): + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) - mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 1212', - target=TARGET) + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 1212', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) - def apt_src_keyid_real(self, cfg, expectedkey): + def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the @@ -361,7 +378,11 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): mockgetkey.assert_called_with(keycfg['keyid'], keycfg.get('keyserver', 'keyserver.ubuntu.com')) - mockkey.assert_called_with(expectedkey, TARGET) + if is_hardened is not None: + mockkey.assert_called_with( + expectedkey, + keycfg['keyfile'], + hardened=is_hardened) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -369,21 +390,24 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): def test_apt_v3_src_keyid_real(self): """test_apt_v3_src_keyid_real - Test keyid including key add""" keyid = "03683F77" - cfg = {self.aptlistfile: {'keyid': keyid}} + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_v3_src_longkeyid_real(self): """test_apt_v3_src_longkeyid_real Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid}} + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_v3_src_longkeyid_ks_real(self): """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile, 'keyserver': 'keys.gnupg.net'}} self.apt_src_keyid_real(cfg, EXPECTEDKEY) @@ -393,6 +417,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): keyid = "03683F77" params = self._get_default_params() cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile, 'keyserver': 'test.random.com'}} # in some test environments only *.ubuntu.com is reachable @@ -405,7 +430,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): aa_repo_match=self.matcher) mockgetkey.assert_called_with('03683F77', 'test.random.com') - mockadd.assert_called_with('fakekey', TARGET) + mockadd.assert_called_with('fakekey', self.aptlistfile, hardened=False) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -1002,10 +1027,12 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") 'primary': [ {'arches': [arch], 'uri': 'http://test.ubuntu.com/', + 'filename': 'primary', 'key': 'fakekey_primary'}], 'security': [ {'arches': [arch], 'uri': 'http://testsec.ubuntu.com/', + 'filename': 'security', 'key': 'fakekey_security'}] } @@ -1013,8 +1040,8 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") 'add_apt_key_raw') as mockadd: cc_apt_configure.add_mirror_keys(cfg, TARGET) calls = [ - mock.call('fakekey_primary', TARGET), - mock.call('fakekey_security', TARGET), + mock.call('fakekey_primary', 'primary', hardened=False), + mock.call('fakekey_security', 'security', hardened=False), ] mockadd.assert_has_calls(calls, any_order=True) -- cgit v1.2.3 From 773765346ba543987aa64a1119fa760f0b1cbb6f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 1 Nov 2021 14:43:05 -0600 Subject: Add LXD datasource (#1040) Add DataSourceLXD which knows how to talk to the dev-lxd socket to obtain all instance metadata API: https://linuxcontainers.org/lxd/docs/master/dev-lxd. This first branch is to deliver feature parity with the existing NoCloud datasource which is currently used to intialize LXC instances on first boot. Introduce a SocketConnectionPool and LXDSocketAdapter to support performing HTTP GETs on the following routes which are surfaced by the LXD host to all containers: http://unix.socket/1.0/meta-data http://unix.socket/1.0/config/user.user-data http://unix.socket/1.0/config/user.network-config http://unix.socket/1.0/config/user.vendor-data These 4 routes minimally replace the static content provided in the following nocloud-net seed files: /var/lib/cloud/nocloud-net/{meta-data,vendor-data,user-data,network-config} The intent of this commit is to set a foundation for LXD socket communication that will allow us to build network hot-plug features by eventually consuming LXD's websocket upgrade route 1.0/events to react to network, meta-data and user-data config changes over time. In the event that no custom network-config is provided, default to the same network-config definition provided by LXD to the NoCloud network-config seed file. Supplemental features above NoCloud datasource: surface all custom instance data config keys via cloud-init query ds which aids in discoverability of features/tags/labels as well as conditional #cloud-config jinja templates operations based on custom config options. TBD: better cloud-init query support for dot-delimited keys --- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceLXD.py | 358 +++++++++++++++++++++ cloudinit/sources/tests/test_lxd.py | 185 +++++++++++ doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/lxd.rst | 65 ++++ tests/integration_tests/clouds.py | 9 +- .../datasources/test_lxd_discovery.py | 80 +++++ tests/unittests/test_datasource/test_common.py | 2 + tools/ds-identify | 8 +- 9 files changed, 707 insertions(+), 2 deletions(-) create mode 100644 cloudinit/sources/DataSourceLXD.py create mode 100644 cloudinit/sources/tests/test_lxd.py create mode 100644 doc/rtd/topics/datasources/lxd.rst create mode 100644 tests/integration_tests/datasources/test_lxd_discovery.py (limited to 'tests/integration_tests') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index f69005ea..43c8fa24 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -21,6 +21,7 @@ CFG_BUILTIN = { 'datasource_list': [ 'NoCloud', 'ConfigDrive', + 'LXD', 'OpenNebula', 'DigitalOcean', 'Azure', diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py new file mode 100644 index 00000000..732b32ff --- /dev/null +++ b/cloudinit/sources/DataSourceLXD.py @@ -0,0 +1,358 @@ + +"""Datasource for LXD, reads /dev/lxd/sock representaton of instance data. + +Notes: + * This datasource replaces previous NoCloud datasource for LXD. + * Older LXD images may not have updates for cloud-init so NoCloud may + still be detected on those images. + * Detect LXD datasource when /dev/lxd/sock is an active socket file. + * Info on dev-lxd API: https://linuxcontainers.org/lxd/docs/master/dev-lxd + * TODO( Hotplug support using websockets API 1.0/events ) +""" + +import os + +import requests +from requests.adapters import HTTPAdapter + +# pylint fails to import the two modules below. +# These are imported via requests.packages rather than urllib3 because: +# a.) the provider of the requests package should ensure that urllib3 +# contained in it is consistent/correct. +# b.) cloud-init does not specifically have a dependency on urllib3 +# +# For future reference, see: +# https://github.com/kennethreitz/requests/pull/2375 +# https://github.com/requests/requests/issues/4104 +# pylint: disable=E0401 +from requests.packages.urllib3.connection import HTTPConnection +from requests.packages.urllib3.connectionpool import HTTPConnectionPool + +import socket +import stat + +from cloudinit import log as logging +from cloudinit import sources, subp, util + +LOG = logging.getLogger(__name__) + +LXD_SOCKET_PATH = "/dev/lxd/sock" +LXD_SOCKET_API_VERSION = "1.0" + +# Config key mappings to alias as top-level instance data keys +CONFIG_KEY_ALIASES = { + "user.user-data": "user-data", + "user.network-config": "network-config", + "user.network_mode": "network_mode", + "user.vendor-data": "vendor-data" +} + + +def generate_fallback_network_config(network_mode: str = "") -> dict: + """Return network config V1 dict representing instance network config.""" + network_v1 = { + "version": 1, + "config": [ + { + "type": "physical", "name": "eth0", + "subnets": [{"type": "dhcp", "control": "auto"}] + } + ] + } + if subp.which("systemd-detect-virt"): + try: + virt_type, _ = subp.subp(['systemd-detect-virt']) + except subp.ProcessExecutionError as err: + LOG.warning( + "Unable to run systemd-detect-virt: %s." + " Rendering default network config.", err + ) + return network_v1 + if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE + arch = util.system_info()["uname"][4] + if arch == "ppc64le": + network_v1["config"][0]["name"] = "enp0s5" + elif arch == "s390x": + network_v1["config"][0]["name"] = "enc9" + else: + network_v1["config"][0]["name"] = "enp5s0" + if network_mode == "link-local": + network_v1["config"][0]["subnets"][0]["control"] = "manual" + elif network_mode not in ("", "dhcp"): + LOG.warning( + "Ignoring unexpected value user.network_mode: %s", network_mode + ) + return network_v1 + + +class SocketHTTPConnection(HTTPConnection): + def __init__(self, socket_path): + super().__init__('localhost') + self.socket_path = socket_path + + def connect(self): + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.sock.connect(self.socket_path) + + +class SocketConnectionPool(HTTPConnectionPool): + def __init__(self, socket_path): + self.socket_path = socket_path + super().__init__('localhost') + + def _new_conn(self): + return SocketHTTPConnection(self.socket_path) + + +class LXDSocketAdapter(HTTPAdapter): + def get_connection(self, url, proxies=None): + return SocketConnectionPool(LXD_SOCKET_PATH) + + +def _maybe_remove_top_network(cfg): + """If network-config contains top level 'network' key, then remove it. + + Some providers of network configuration may provide a top level + 'network' key (LP: #1798117) even though it is not necessary. + + Be friendly and remove it if it really seems so. + + Return the original value if no change or the updated value if changed.""" + if "network" not in cfg: + return cfg + network_val = cfg["network"] + bmsg = 'Top level network key in network-config %s: %s' + if not isinstance(network_val, dict): + LOG.debug(bmsg, "was not a dict", cfg) + return cfg + if len(list(cfg.keys())) != 1: + LOG.debug(bmsg, "had multiple top level keys", cfg) + return cfg + if network_val.get('config') == "disabled": + LOG.debug(bmsg, "was config/disabled", cfg) + elif not all(('config' in network_val, 'version' in network_val)): + LOG.debug(bmsg, "but missing 'config' or 'version'", cfg) + return cfg + LOG.debug(bmsg, "fixed by removing shifting network.", cfg) + return network_val + + +def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict: + """Convert raw instance data from str, bytes, YAML to dict + + :param metadata_type: string, one of as: meta-data, vendor-data, user-data + network-config + + :param metadata_value: str, bytes or dict representing or instance-data. + + :raises: InvalidMetaDataError on invalid instance-data content. + """ + if isinstance(metadata_value, dict): + return metadata_value + if metadata_value is None: + return {} + try: + parsed_metadata = util.load_yaml(metadata_value) + except AttributeError as exc: # not str or bytes + raise sources.InvalidMetaDataException( + "Invalid {md_type}. Expected str, bytes or dict but found:" + " {value}".format(md_type=metadata_type, value=metadata_value) + ) from exc + if parsed_metadata is None: + raise sources.InvalidMetaDataException( + "Invalid {md_type} format. Expected YAML but found:" + " {value}".format(md_type=metadata_type, value=metadata_value) + ) + return parsed_metadata + + +class DataSourceLXD(sources.DataSource): + + dsname = 'LXD' + + _network_config = sources.UNSET + _crawled_metadata = sources.UNSET + + sensitive_metadata_keys = ( + 'merged_cfg', 'user.meta-data', 'user.vendor-data', 'user.user-data', + ) + + def _is_platform_viable(self) -> bool: + """Check platform environment to report if this datasource may run.""" + return is_platform_viable() + + def _get_data(self) -> bool: + """Crawl LXD socket API instance data and return True on success""" + if not self._is_platform_viable(): + LOG.debug("Not an LXD datasource: No LXD socket found.") + return False + + self._crawled_metadata = util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=read_metadata) + self.metadata = _raw_instance_data_to_dict( + "meta-data", self._crawled_metadata.get("meta-data") + ) + if LXD_SOCKET_API_VERSION in self._crawled_metadata: + config = self._crawled_metadata[LXD_SOCKET_API_VERSION].get( + "config", {} + ) + user_metadata = config.get("user.meta-data", {}) + if user_metadata: + user_metadata = _raw_instance_data_to_dict( + "user.meta-data", user_metadata + ) + if not isinstance(self.metadata, dict): + self.metadata = util.mergemanydict( + [util.load_yaml(self.metadata), user_metadata] + ) + if "user-data" in self._crawled_metadata: + self.userdata_raw = self._crawled_metadata["user-data"] + if "network-config" in self._crawled_metadata: + self._network_config = _maybe_remove_top_network( + _raw_instance_data_to_dict( + "network-config", self._crawled_metadata["network-config"] + ) + ) + if "vendor-data" in self._crawled_metadata: + self.vendordata_raw = self._crawled_metadata["vendor-data"] + return True + + def _get_subplatform(self) -> str: + """Return subplatform details for this datasource""" + return "LXD socket API v. {ver} ({socket})".format( + ver=LXD_SOCKET_API_VERSION, socket=LXD_SOCKET_PATH + ) + + def check_instance_id(self, sys_cfg) -> str: + """Return True if instance_id unchanged.""" + response = read_metadata(metadata_only=True) + md = response.get("meta-data", {}) + if not isinstance(md, dict): + md = util.load_yaml(md) + return md.get("instance-id") == self.metadata.get("instance-id") + + @property + def network_config(self) -> dict: + """Network config read from LXD socket config/user.network-config. + + If none is present, then we generate fallback configuration. + """ + if self._network_config == sources.UNSET: + if self._crawled_metadata.get("network-config"): + self._network_config = self._crawled_metadata.get( + "network-config" + ) + else: + network_mode = self._crawled_metadata.get("network_mode", "") + self._network_config = generate_fallback_network_config( + network_mode + ) + return self._network_config + + +def is_platform_viable() -> bool: + """Return True when this platform appears to have an LXD socket.""" + if os.path.exists(LXD_SOCKET_PATH): + return stat.S_ISSOCK(os.lstat(LXD_SOCKET_PATH).st_mode) + return False + + +def read_metadata( + api_version: str = LXD_SOCKET_API_VERSION, metadata_only: bool = False +) -> dict: + """Fetch metadata from the /dev/lxd/socket routes. + + Perform a number of HTTP GETs on known routes on the devlxd socket API. + Minimally all containers must respond to http://lxd/1.0/meta-data when + the LXD configuration setting `security.devlxd` is true. + + When `security.devlxd` is false, no /dev/lxd/socket file exists. This + datasource will return False from `is_platform_viable` in that case. + + Perform a GET of /config` and walk all `user.*` + configuration keys, storing all keys and values under a dict key + LXD_SOCKET_API_VERSION: config {...}. + + In the presence of the following optional user config keys, + create top level aliases: + - user.user-data -> user-data + - user.vendor-data -> vendor-data + - user.network-config -> network-config + + :return: + A dict with the following mandatory key: meta-data. + Optional keys: user-data, vendor-data, network-config, network_mode + + Below is a dict representation of all raw + configuration keys and values provided to the container surfaced by + the socket under the /1.0/config/ route. + """ + md = {} + lxd_url = "http://lxd" + version_url = lxd_url + "/" + api_version + "/" + with requests.Session() as session: + session.mount(version_url, LXDSocketAdapter()) + # Raw meta-data as text + md_route = "{route}/meta-data".format(route=version_url) + response = session.get(md_route) + LOG.debug("[GET] [HTTP:%d] %s", response.status_code, md_route) + if not response.ok: + raise sources.InvalidMetaDataException( + "Invalid HTTP response [{code}] from {route}: {resp}".format( + code=response.status_code, + route=md_route, + resp=response.txt + ) + ) + + md["meta-data"] = response.text + if metadata_only: + return md # Skip network-data, vendor-data, user-data + + config_url = version_url + "config" + # Represent all advertized/available config routes under + # the dict path {LXD_SOCKET_API_VERSION: {config: {...}}. + LOG.debug("[GET] %s", config_url) + config_routes = session.get(config_url).json() + md[LXD_SOCKET_API_VERSION] = { + "config": {}, + "meta-data": md["meta-data"] + } + for config_route in config_routes: + url = "http://lxd{route}".format(route=config_route) + LOG.debug("[GET] %s", url) + response = session.get(url) + if response.ok: + cfg_key = config_route.rpartition("/")[-1] + # Leave raw data values/format unchanged to represent it in + # instance-data.json for cloud-init query or jinja template + # use. + md[LXD_SOCKET_API_VERSION]["config"][cfg_key] = response.text + # Promote common CONFIG_KEY_ALIASES to top-level keys. + if cfg_key in CONFIG_KEY_ALIASES: + md[CONFIG_KEY_ALIASES[cfg_key]] = response.text + else: + LOG.debug("Skipping %s on invalid response", url) + return md + + +# Used to match classes to dependencies +datasources = [ + (DataSourceLXD, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import argparse + + description = """Query LXD metadata and emit a JSON object.""" + parser = argparse.ArgumentParser(description=description) + parser.parse_args() + print(util.json_dumps(read_metadata())) +# vi: ts=4 expandtab diff --git a/cloudinit/sources/tests/test_lxd.py b/cloudinit/sources/tests/test_lxd.py new file mode 100644 index 00000000..c2027616 --- /dev/null +++ b/cloudinit/sources/tests/test_lxd.py @@ -0,0 +1,185 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +from copy import deepcopy +import stat +from unittest import mock +import yaml + +import pytest + +from cloudinit.sources import DataSourceLXD as lxd, UNSET +DS_PATH = "cloudinit.sources.DataSourceLXD." + + +LStatResponse = namedtuple("lstatresponse", "st_mode") + + +NETWORK_V1 = { + "version": 1, + "config": [ + { + "type": "physical", "name": "eth0", + "subnets": [{"type": "dhcp", "control": "auto"}] + } + ] +} +NETWORK_V1_MANUAL = deepcopy(NETWORK_V1) +NETWORK_V1_MANUAL["config"][0]["subnets"][0]["control"] = "manual" + + +def _add_network_v1_device(devname) -> dict: + """Helper to inject device name into default network v1 config.""" + network_cfg = deepcopy(NETWORK_V1) + network_cfg["config"][0]["name"] = devname + return network_cfg + + +LXD_V1_METADATA = { + "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "network-config": NETWORK_V1, + "user-data": "#cloud-config\npackages: [sl]\n", + "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", + "1.0": { + "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "config": { + "user.user-data": + "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "user.vendor-data": + "#cloud-config\nruncmd: ['echo vendor-data']\n", + "user.network-config": yaml.safe_dump(NETWORK_V1), + } + } +} + + +@pytest.fixture +def lxd_metadata(): + return LXD_V1_METADATA + + +@pytest.yield_fixture +def lxd_ds(request, paths, lxd_metadata): + """ + Return an instantiated DataSourceLXD. + + This also performs the mocking required for the default test case: + * ``is_platform_viable`` returns True, + * ``read_metadata`` returns ``LXD_V1_METADATA`` + + (This uses the paths fixture for the required helpers.Paths object) + """ + with mock.patch(DS_PATH + "is_platform_viable", return_value=True): + with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata): + yield lxd.DataSourceLXD( + sys_cfg={}, distro=mock.Mock(), paths=paths + ) + + +class TestGenerateFallbackNetworkConfig: + + @pytest.mark.parametrize( + "uname_machine,systemd_detect_virt,network_mode,expected", ( + # None for systemd_detect_virt returns None from which + ({}, None, "", NETWORK_V1), + ({}, None, "dhcp", NETWORK_V1), + # invalid network_mode logs warning + ({}, None, "bogus", NETWORK_V1), + ({}, None, "link-local", NETWORK_V1_MANUAL), + ("anything", "lxc\n", "", NETWORK_V1), + # `uname -m` on kvm determines devname + ("x86_64", "kvm\n", "", _add_network_v1_device("enp5s0")), + ("ppc64le", "kvm\n", "", _add_network_v1_device("enp0s5")), + ("s390x", "kvm\n", "", _add_network_v1_device("enc9")) + ) + ) + @mock.patch(DS_PATH + "util.system_info") + @mock.patch(DS_PATH + "subp.subp") + @mock.patch(DS_PATH + "subp.which") + def test_net_v2_based_on_network_mode_virt_type_and_uname_machine( + self, + m_which, + m_subp, + m_system_info, + uname_machine, + systemd_detect_virt, + network_mode, + expected, + caplog + ): + """Return network config v2 based on uname -m, systemd-detect-virt. + + LXC config network_mode of "link-local" will determine whether to set + "activation-mode: manual", leaving the interface down. + """ + if systemd_detect_virt is None: + m_which.return_value = None + m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]} + m_subp.return_value = (systemd_detect_virt, "") + assert expected == lxd.generate_fallback_network_config( + network_mode=network_mode + ) + if systemd_detect_virt is None: + assert 0 == m_subp.call_count + assert 0 == m_system_info.call_count + else: + assert [ + mock.call(["systemd-detect-virt"]) + ] == m_subp.call_args_list + if systemd_detect_virt != "kvm\n": + assert 0 == m_system_info.call_count + else: + assert 1 == m_system_info.call_count + if network_mode not in ("dhcp", "", "link-local"): + assert "Ignoring unexpected value user.network_mode: {}".format( + network_mode + ) in caplog.text + + +class TestDataSourceLXD: + def test_platform_info(self, lxd_ds): + assert "LXD" == lxd_ds.dsname + assert "lxd" == lxd_ds.cloud_name + assert "lxd" == lxd_ds.platform_type + + def test_subplatform(self, lxd_ds): + assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform + + def test__get_data(self, lxd_ds): + """get_data calls read_metadata, setting appropiate instance attrs.""" + assert UNSET == lxd_ds._crawled_metadata + assert UNSET == lxd_ds._network_config + assert None is lxd_ds.userdata_raw + assert True is lxd_ds._get_data() + assert LXD_V1_METADATA == lxd_ds._crawled_metadata + # network-config is dumped from YAML + assert NETWORK_V1 == lxd_ds._network_config + # Any user-data and vendor-data are saved as raw + assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw + assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw + + +class TestIsPlatformViable: + @pytest.mark.parametrize( + "exists,lstat_mode,expected", ( + (False, None, False), + (True, stat.S_IFREG, False), + (True, stat.S_IFSOCK, True), + ) + ) + @mock.patch(DS_PATH + "os.lstat") + @mock.patch(DS_PATH + "os.path.exists") + def test_expected_viable( + self, m_exists, m_lstat, exists, lstat_mode, expected + ): + """Return True only when LXD_SOCKET_PATH exists and is a socket.""" + m_exists.return_value = exists + m_lstat.return_value = LStatResponse(lstat_mode) + assert expected is lxd.is_platform_viable() + m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) + if exists: + m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) + else: + assert 0 == m_lstat.call_count + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index f5aee1c2..0ebc0f32 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -39,6 +39,7 @@ The following is a list of documents for each supported datasource: datasources/exoscale.rst datasources/fallback.rst datasources/gce.rst + datasources/lxd.rst datasources/maas.rst datasources/nocloud.rst datasources/opennebula.rst diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst new file mode 100644 index 00000000..3991a4dd --- /dev/null +++ b/doc/rtd/topics/datasources/lxd.rst @@ -0,0 +1,65 @@ +.. _datasource_lxd: + +LXD +=== + +The data source ``LXD`` allows the user to provide custom user-data, +vendor-data, meta-data and network-config to the instance without running +a network service (or even without having a network at all). This datasource +performs HTTP GETs against the `LXD socket device`_ which is provided to each +running LXD container and VM as ``/dev/lxd/sock`` and represents all +instance-metadata as versioned HTTP routes such as: + + - 1.0/meta-data + - 1.0/config/user.meta-data + - 1.0/config/user.vendor-data + - 1.0/config/user.user-data + - 1.0/config/user. + +The LXD socket device ``/dev/lxd/sock`` is only present on containers and VMs +when the instance configuration has ``security.devlxd=true`` (default). +Disabling ``security.devlxd`` configuration setting at initial launch will +ensure that cloud-init uses the :ref:`datasource_nocloud` datasource. +Disabling ``security.devlxd`` ove the life of the container will result in +warnings from cloud-init and cloud-init will keep the originally detected LXD +datasource. + +The LXD datasource provides cloud-init the opportunity to react to meta-data, +vendor-data, user-data and network-config changes and render the updated +configuration across a system reboot. + +One can manipulate what meta-data, vendor-data or user-data is provided to +the launched container using the LXD profiles or +``lxc launch ... -c =""`` at initial container launch using one of +the following keys: + + - user.meta-data: YAML metadata which will be appended to base meta-data + - user.vendor-data: YAML which overrides any meta-data values + - user.network-config: YAML representing either :ref:`network_config_v1` or + :ref:`network_config_v2` format + - user.user-data: YAML which takes preference and overrides both meta-data + and vendor-data values + - user.any-key: Custom user configuration key and value pairs can be passed to + cloud-init. Those keys/values will be present in instance-data which can be + used by both `#template: jinja` #cloud-config templates and + the `cloud-init query` command. + + +By default, network configuration from this datasource will be: + +.. code:: yaml + + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp + control: auto + +This datasource is intended to replace :ref:`datasource_nocloud` +datasource for LXD instances with a more direct support for LXD APIs instead +of static NoCloud seed files. + +.. _LXD socket device: https://linuxcontainers.org/lxd/docs/master/dev-lxd +.. vi: textwidth=78 diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 32fdc91e..dee2adff 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. from abc import ABC, abstractmethod +import datetime import logging import os.path +import random +import string from uuid import UUID from pycloudlib import ( @@ -309,8 +312,12 @@ class _LxdIntegrationCloud(IntegrationCloud): except KeyError: profile_list = self._get_or_set_profile_list(release) + prefix = datetime.datetime.utcnow().strftime("cloudinit-%m%d-%H%M%S") + default_name = prefix + "".join( + random.choices(string.ascii_lowercase + string.digits, k=8) + ) pycloudlib_instance = self.cloud_instance.init( - launch_kwargs.pop('name', None), + launch_kwargs.pop('name', default_name), release, profile_list=profile_list, **launch_kwargs diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py new file mode 100644 index 00000000..f7d8bfc3 --- /dev/null +++ b/tests/integration_tests/datasources/test_lxd_discovery.py @@ -0,0 +1,80 @@ +import json +import pytest +import yaml + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.conftest import get_validated_source +from tests.integration_tests.util import verify_clean_log + + +def _setup_custom_image(session_cloud: IntegrationCloud): + """Like `setup_image` in conftest.py, but with customized content.""" + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + return + client = session_cloud.launch() + + # Insert our "detect_lxd_ds" file here + client.write_to_file( + '/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg', + 'datasource_list: [LXD]\n', + ) + + client.execute('rm -f /etc/netplan/50-cloud-init.yaml') + + client.install_new_cloud_init(source) + # Even if we're keeping instances, we don't want to keep this + # one around as it was just for image creation + client.destroy() + + +# This test should be able to work on any cloud whose datasource specifies +# a NETWORK dependency +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.ubuntu # Because netplan +def test_lxd_datasource_discovery(session_cloud: IntegrationCloud): + """Test that DataSourceLXD is detected instead of NoCloud.""" + _setup_custom_image(session_cloud) + nic_dev = "eth0" + if session_cloud.settings.PLATFORM == "lxd_vm": + nic_dev = "enp5s0" + + with session_cloud.launch() as client: + result = client.execute('cloud-init status --long') + if not result.ok: + raise AssertionError('cloud-init failed:\n%s', + result.stderr) + if "DataSourceLXD" not in result.stdout: + raise AssertionError( + 'cloud-init did not discover DataSourceLXD', result.stdout + ) + netplan_yaml = client.execute('cat /etc/netplan/50-cloud-init.yaml') + netplan_cfg = yaml.safe_load(netplan_yaml) + assert { + 'network': {'ethernets': {nic_dev: {'dhcp4': True}}, 'version': 2} + } == netplan_cfg + log = client.read_from_file('/var/log/cloud-init.log') + verify_clean_log(log) + result = client.execute('cloud-id') + if "lxd" != result.stdout: + raise AssertionError( + "cloud-id didn't report lxd. Result: %s", result.stdout + ) + # Validate config instance data represented + data = json.loads(client.read_from_file( + '/run/cloud-init/instance-data.json') + ) + v1 = data["v1"] + ds_cfg = data["ds"] + assert "lxd" == v1["platform"] + assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] + ds_cfg = json.loads(client.execute('cloud-init query ds').stdout) + assert ["config", "meta_data"] == sorted(list(ds_cfg["1.0"].keys())) + assert ["user.meta_data"] == list(ds_cfg["1.0"]["config"].keys()) + assert {"public-keys": v1["public_ssh_keys"][0]} == ( + yaml.safe_load(ds_cfg["1.0"]["config"]["user.meta_data"]) + ) + assert ( + "#cloud-config\ninstance-id" in ds_cfg["1.0"]["meta_data"] + ) diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 00f0a78c..17d53160 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -18,6 +18,7 @@ from cloudinit.sources import ( DataSourceGCE as GCE, DataSourceHetzner as Hetzner, DataSourceIBMCloud as IBMCloud, + DataSourceLXD as LXD, DataSourceMAAS as MAAS, DataSourceNoCloud as NoCloud, DataSourceOpenNebula as OpenNebula, @@ -42,6 +43,7 @@ DEFAULT_LOCAL = [ DigitalOcean.DataSourceDigitalOcean, Hetzner.DataSourceHetzner, IBMCloud.DataSourceIBMCloud, + LXD.DataSourceLXD, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, Oracle.DataSourceOracle, diff --git a/tools/ds-identify b/tools/ds-identify index c2f710e9..30d4b0f6 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -123,7 +123,7 @@ DS_MAYBE=2 DI_DSNAME="" # this has to match the builtin list in cloud-init, it is what will # be searched if there is no setting found in config. -DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ +DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud LXD AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \ OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware" DI_DSLIST="" @@ -802,6 +802,12 @@ dscheck_MAAS() { return ${DS_NOT_FOUND} } +# LXD datasource requires active /dev/lxd/sock +# https://linuxcontainers.org/lxd/docs/master/dev-lxd +dscheck_LXD() { + [ -S /dev/lxd/sock ] && return ${DS_FOUND} || return ${DS_NOT_FOUND} +} + dscheck_NoCloud() { local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in -- cgit v1.2.3 From d54e23bf61bb61af9aef3b30f41084d93961b7e3 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 2 Nov 2021 11:11:26 -0500 Subject: testing: Remove calls to 'install_new_cloud_init' (#1092) In our integration tests, a few tests were modifying the environment and then calling 'install_new_cloud_init'. This is problematic because it updates the environment for all future tests. Other instances of 'install_new_cloud_init' aren't problematic because they aren't modifying the underlying environment. --- .../datasources/test_lxd_discovery.py | 100 +++++++++------------ .../datasources/test_network_dependency.py | 33 +++---- 2 files changed, 52 insertions(+), 81 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py index f7d8bfc3..be76e179 100644 --- a/tests/integration_tests/datasources/test_lxd_discovery.py +++ b/tests/integration_tests/datasources/test_lxd_discovery.py @@ -2,30 +2,17 @@ import json import pytest import yaml -from tests.integration_tests.clouds import IntegrationCloud -from tests.integration_tests.conftest import get_validated_source +from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import verify_clean_log -def _setup_custom_image(session_cloud: IntegrationCloud): - """Like `setup_image` in conftest.py, but with customized content.""" - source = get_validated_source(session_cloud) - if not source.installs_new_version(): - return - client = session_cloud.launch() - - # Insert our "detect_lxd_ds" file here +def _customize_envionment(client: IntegrationInstance): client.write_to_file( '/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg', 'datasource_list: [LXD]\n', ) - - client.execute('rm -f /etc/netplan/50-cloud-init.yaml') - - client.install_new_cloud_init(source) - # Even if we're keeping instances, we don't want to keep this - # one around as it was just for image creation - client.destroy() + client.execute('cloud-init clean --logs') + client.restart() # This test should be able to work on any cloud whose datasource specifies @@ -33,48 +20,43 @@ def _setup_custom_image(session_cloud: IntegrationCloud): @pytest.mark.lxd_container @pytest.mark.lxd_vm @pytest.mark.ubuntu # Because netplan -def test_lxd_datasource_discovery(session_cloud: IntegrationCloud): +def test_lxd_datasource_discovery(client: IntegrationInstance): """Test that DataSourceLXD is detected instead of NoCloud.""" - _setup_custom_image(session_cloud) - nic_dev = "eth0" - if session_cloud.settings.PLATFORM == "lxd_vm": - nic_dev = "enp5s0" - - with session_cloud.launch() as client: - result = client.execute('cloud-init status --long') - if not result.ok: - raise AssertionError('cloud-init failed:\n%s', - result.stderr) - if "DataSourceLXD" not in result.stdout: - raise AssertionError( - 'cloud-init did not discover DataSourceLXD', result.stdout - ) - netplan_yaml = client.execute('cat /etc/netplan/50-cloud-init.yaml') - netplan_cfg = yaml.safe_load(netplan_yaml) - assert { - 'network': {'ethernets': {nic_dev: {'dhcp4': True}}, 'version': 2} - } == netplan_cfg - log = client.read_from_file('/var/log/cloud-init.log') - verify_clean_log(log) - result = client.execute('cloud-id') - if "lxd" != result.stdout: - raise AssertionError( - "cloud-id didn't report lxd. Result: %s", result.stdout - ) - # Validate config instance data represented - data = json.loads(client.read_from_file( - '/run/cloud-init/instance-data.json') + _customize_envionment(client) + nic_dev = "enp5s0" if client.settings.PLATFORM == "lxd_vm" else "eth0" + result = client.execute('cloud-init status --long') + if not result.ok: + raise AssertionError('cloud-init failed:\n%s', result.stderr) + if "DataSourceLXD" not in result.stdout: + raise AssertionError( + 'cloud-init did not discover DataSourceLXD', result.stdout ) - v1 = data["v1"] - ds_cfg = data["ds"] - assert "lxd" == v1["platform"] - assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] - ds_cfg = json.loads(client.execute('cloud-init query ds').stdout) - assert ["config", "meta_data"] == sorted(list(ds_cfg["1.0"].keys())) - assert ["user.meta_data"] == list(ds_cfg["1.0"]["config"].keys()) - assert {"public-keys": v1["public_ssh_keys"][0]} == ( - yaml.safe_load(ds_cfg["1.0"]["config"]["user.meta_data"]) - ) - assert ( - "#cloud-config\ninstance-id" in ds_cfg["1.0"]["meta_data"] + netplan_yaml = client.execute('cat /etc/netplan/50-cloud-init.yaml') + netplan_cfg = yaml.safe_load(netplan_yaml) + assert { + 'network': {'ethernets': {nic_dev: {'dhcp4': True}}, 'version': 2} + } == netplan_cfg + log = client.read_from_file('/var/log/cloud-init.log') + verify_clean_log(log) + result = client.execute('cloud-id') + if result.stdout != "lxd": + raise AssertionError( + "cloud-id didn't report lxd. Result: %s", result.stdout ) + # Validate config instance data represented + data = json.loads(client.read_from_file( + '/run/cloud-init/instance-data.json') + ) + v1 = data["v1"] + ds_cfg = data["ds"] + assert "lxd" == v1["platform"] + assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] + ds_cfg = json.loads(client.execute('cloud-init query ds').stdout) + assert ["config", "meta_data"] == sorted(list(ds_cfg["1.0"].keys())) + assert ["user.meta_data"] == list(ds_cfg["1.0"]["config"].keys()) + assert {"public-keys": v1["public_ssh_keys"][0]} == ( + yaml.safe_load(ds_cfg["1.0"]["config"]["user.meta_data"]) + ) + assert ( + "#cloud-config\ninstance-id" in ds_cfg["1.0"]["meta_data"] + ) diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py index 2e5e3121..24e71f9d 100644 --- a/tests/integration_tests/datasources/test_network_dependency.py +++ b/tests/integration_tests/datasources/test_network_dependency.py @@ -1,41 +1,30 @@ import pytest -from tests.integration_tests.clouds import IntegrationCloud -from tests.integration_tests.conftest import get_validated_source +from tests.integration_tests.instances import IntegrationInstance -def _setup_custom_image(session_cloud: IntegrationCloud): - """Like `setup_image` in conftest.py, but with customized content.""" - source = get_validated_source(session_cloud) - if not source.installs_new_version(): - return - client = session_cloud.launch() - +def _customize_envionment(client: IntegrationInstance): # Insert our "disable_network_activation" file here client.write_to_file( '/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg', 'disable_network_activation: true\n', ) - - client.install_new_cloud_init(source) - # Even if we're keeping instances, we don't want to keep this - # one around as it was just for image creation - client.destroy() + client.execute('cloud-init clean --logs') + client.restart() # This test should be able to work on any cloud whose datasource specifies # a NETWORK dependency @pytest.mark.gce @pytest.mark.ubuntu # Because netplan -def test_network_activation_disabled(session_cloud: IntegrationCloud): +def test_network_activation_disabled(client: IntegrationInstance): """Test that the network is not activated during init mode.""" - _setup_custom_image(session_cloud) - with session_cloud.launch() as client: - result = client.execute('systemctl status google-guest-agent.service') - if not result.ok: - raise AssertionError('google-guest-agent is not active:\n%s', - result.stdout) - log = client.read_from_file('/var/log/cloud-init.log') + _customize_envionment(client) + result = client.execute('systemctl status google-guest-agent.service') + if not result.ok: + raise AssertionError( + 'google-guest-agent is not active:\n%s', result.stdout) + log = client.read_from_file('/var/log/cloud-init.log') assert "Running command ['netplan', 'apply']" not in log -- cgit v1.2.3 From 6421a2026f05267f2112c52417d0c4b036aeaf40 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 9 Nov 2021 08:15:38 -0600 Subject: testing: stop chef test from running on openstack (#1102) Chef tests attempt to reach out to test URLs, which will get blocked by our on our openstack installs. --- tests/integration_tests/bugs/test_gh868.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py index 73c03451..1119d461 100644 --- a/tests/integration_tests/bugs/test_gh868.py +++ b/tests/integration_tests/bugs/test_gh868.py @@ -16,6 +16,12 @@ chef: @pytest.mark.adhoc # Can't be regularly reaching out to chef install script +@pytest.mark.ec2 +@pytest.mark.gce +@pytest.mark.azure +@pytest.mark.oci +@pytest.mark.lxd_container +@pytest.mark.lxd_vm @pytest.mark.user_data(USERDATA) def test_chef_license(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') -- cgit v1.2.3 From 22150a20d94cddf2a41073e71a7f0e44bfb4bc51 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 11 Nov 2021 12:04:21 -0700 Subject: Add convenience symlink to integration test output (#1105) Integration test runs get unique log directories at /tmp/cloud_init_test_logs/$DATE_TIME. Make /tmp/cloud_init_test_logs/last always point to the most recent integration test directory. --- tests/integration_tests/conftest.py | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 5a543e39..68b92e4b 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -190,6 +190,15 @@ def _collect_logs(instance: IntegrationInstance, node_id: str, integration_settings.LOCAL_LOG_PATH ) / session_start_time / node_id_path log.info("Writing logs to %s", log_dir) + + # Add a symlink to the latest log output directory + last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / 'last' + if os.path.islink(last_symlink): + os.unlink(last_symlink) + os.symlink( + Path(integration_settings.LOCAL_LOG_PATH) / session_start_time, + last_symlink) + if not log_dir.exists(): log_dir.mkdir(parents=True) tarball_path = log_dir / 'cloud-init.tar.gz' -- cgit v1.2.3 From 918d69a02ec9cb891a5bc987bdce783802b1a743 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 11 Nov 2021 12:30:59 -0700 Subject: tests: specialize lxd_discovery test for lxd_vm vendordata (#1106) On Bionic and Xenial, pycloudlib sets user.vendor-data config in lxd to ensure that lxd-agent is setup on those images. Adapt the lxd_discovery integration test to assert the appropriate user.vendor-data config key exists if we are on xenial or bionic. Also add assertions that /var/lib/cloud/nocloud-net/meta-data still exists in the images because we want NoCloud to be a viable fallback datasource if LXD config security.lxddev = false or LXD datasource discovery encountered an unexpected error. --- .../datasources/test_lxd_discovery.py | 28 +++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py index be76e179..93200962 100644 --- a/tests/integration_tests/datasources/test_lxd_discovery.py +++ b/tests/integration_tests/datasources/test_lxd_discovery.py @@ -2,6 +2,7 @@ import json import pytest import yaml +from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import verify_clean_log @@ -53,10 +54,35 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] ds_cfg = json.loads(client.execute('cloud-init query ds').stdout) assert ["config", "meta_data"] == sorted(list(ds_cfg["1.0"].keys())) - assert ["user.meta_data"] == list(ds_cfg["1.0"]["config"].keys()) + if ( + client.settings.PLATFORM == "lxd_vm" and + ImageSpecification.from_os_image().release in ("xenial", "bionic") + ): + # pycloudlib injects user.vendor_data for lxd_vm on bionic and xenial + # to start the lxd-agent. + # https://github.com/canonical/pycloudlib/blob/main/pycloudlib/\ + # lxd/defaults.py#L13-L27 + lxd_config_keys = ["user.meta_data", "user.vendor_data"] + else: + lxd_config_keys = ["user.meta_data"] + assert lxd_config_keys == list(ds_cfg["1.0"]["config"].keys()) assert {"public-keys": v1["public_ssh_keys"][0]} == ( yaml.safe_load(ds_cfg["1.0"]["config"]["user.meta_data"]) ) assert ( "#cloud-config\ninstance-id" in ds_cfg["1.0"]["meta_data"] ) + # Assert NoCloud seed data is still present in cloud image metadata + # This will start failing if we redact metadata templates from + # https://cloud-images.ubuntu.com/daily/server/jammy/current/\ + # jammy-server-cloudimg-amd64-lxd.tar.xz + nocloud_metadata = yaml.safe_load( + client.read_from_file( + "/var/lib/cloud/seed/nocloud-net/meta-data" + ) + ) + assert client.instance.name == nocloud_metadata["instance-id"] + assert ( + nocloud_metadata["instance-id"] == nocloud_metadata["local-hostname"] + ) + assert v1["public_ssh_keys"][0] == nocloud_metadata["public-keys"] -- cgit v1.2.3 From f0af9f78796ad73bfc2f8016b9bf744da2f12761 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 15 Nov 2021 16:20:10 -0600 Subject: Some miscellaneous integration test fixes (SC-606) (#1103) - Added to list of expected warnings on Oracle when opc user has no ssh key - Added retries to tests that read from syslog as that can sometimes take time to reflect in the log - Updated test_apt.py to remove proxy info into its own test as that can cause failures in updating, which will immediately traceback out of the module and prevent us from running further class tests - Updated test_apt.py to use a more updated ppa in the test_keyserver - Added basic rsyslog test to test_combined.py - Added basic puppet test as test_puppet.py --- tests/integration_tests/modules/test_apt.py | 51 ++++++++++++---------- tests/integration_tests/modules/test_combined.py | 18 ++++++++ .../modules/test_keys_to_console.py | 5 +++ tests/integration_tests/modules/test_puppet.py | 39 +++++++++++++++++ .../modules/test_ssh_auth_key_fingerprints.py | 5 +++ tests/integration_tests/util.py | 9 ++++ 6 files changed, 103 insertions(+), 24 deletions(-) create mode 100644 tests/integration_tests/modules/test_puppet.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py index 2c388047..2de3e202 100644 --- a/tests/integration_tests/modules/test_apt.py +++ b/tests/integration_tests/modules/test_apt.py @@ -19,10 +19,6 @@ apt: Fix-Broken "true"; } } - proxy: "http://proxy.internal:3128" - http_proxy: "http://squid.internal:3128" - ftp_proxy: "ftp://squid.internal:3128" - https_proxy: "https://squid.internal:3128" primary: - arches: [default] uri: http://badarchive.ubuntu.com/ubuntu @@ -38,9 +34,9 @@ apt: deb-src $SECURITY $RELEASE-security multiverse sources: test_keyserver: - keyid: 72600DB15B8E4C8B1964B868038ACC97C660A937 - keyserver: keyserver.ubuntu.com - source: "deb http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu $RELEASE main" + keyid: 110E21D8B0E2A1F0243AF6820856F197B892ACEA + keyserver: keyserver.ubuntu.com + source: "deb http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu $RELEASE main" test_ppa: keyid: 441614D8 keyserver: keyserver.ubuntu.com @@ -95,10 +91,8 @@ EXPECTED_REGEXES = [ r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse", ] -TEST_KEYSERVER_KEY = "7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937" - +TEST_KEYSERVER_KEY = "110E 21D8 B0E2 A1F0 243A F682 0856 F197 B892 ACEA" TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8" - TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF" TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11" @@ -148,18 +142,6 @@ class TestApt: assert 'Assume-Yes "true";' in apt_config assert 'Fix-Broken "true";' in apt_config - def test_apt_proxy(self, class_client: IntegrationInstance): - """Test the apt proxy functionality. - - Ported from tests/cloud_tests/testcases/modules/apt_configure_proxy.py - """ - out = class_client.read_from_file( - '/etc/apt/apt.conf.d/90cloud-init-aptproxy') - assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out - assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out - assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out - assert 'Acquire::https::Proxy "https://squid.internal:3128";' in out - def test_ppa_source(self, class_client: IntegrationInstance): """Test the apt ppa functionality. @@ -186,7 +168,6 @@ class TestApt: "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_signed_by.gpg] " "http://ppa.launchpad.net/juju/stable/ubuntu" " {} main".format(release)) - print(class_client.execute('cat /var/log/cloud-init.log')) path_contents = class_client.read_from_file( '/etc/apt/sources.list.d/test_signed_by.list') assert path_contents == source @@ -230,7 +211,7 @@ class TestApt: ) assert ( - 'http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu' + 'http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu' ) in test_keyserver_contents assert TEST_KEYSERVER_KEY in self.get_keys(class_client) @@ -342,3 +323,25 @@ class TestDisabled: '/etc/apt/apt.conf.d/90cloud-init-pipelining' ) assert 'Acquire::http::Pipeline-Depth "0";' in conf + + +APT_PROXY_DATA = """\ +#cloud-config +apt: + proxy: "http://proxy.internal:3128" + http_proxy: "http://squid.internal:3128" + ftp_proxy: "ftp://squid.internal:3128" + https_proxy: "https://squid.internal:3128" +""" + + +@pytest.mark.ubuntu +@pytest.mark.user_data(APT_PROXY_DATA) +def test_apt_proxy(client: IntegrationInstance): + """Test the apt proxy data gets written correctly.""" + out = client.read_from_file( + '/etc/apt/apt.conf.d/90cloud-init-aptproxy') + assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out + assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out + assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out + assert 'Acquire::https::Proxy "https://squid.internal:3128";' in out diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 9cd1648a..57c02f47 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -33,8 +33,21 @@ locale: en_GB.UTF-8 locale_configfile: /etc/default/locale ntp: servers: ['ntp.ubuntu.com'] +rsyslog: + configs: + - "*.* @@127.0.0.1" + - filename: 0-basic-config.conf + content: | + module(load="imtcp") + input(type="imtcp" port="514") + $template RemoteLogs,"/var/tmp/rsyslog.log" + *.* ?RemoteLogs + & ~ + remotes: + me: "127.0.0.1" runcmd: - echo 'hello world' > /var/tmp/runcmd_output + - logger "My test log" """ @@ -102,6 +115,11 @@ class TestCombined: 'en_US.UTF-8' ], locale_gen) + def test_rsyslog(self, class_client: IntegrationInstance): + """Test rsyslog is configured correctly.""" + client = class_client + assert 'My test log' in client.read_from_file('/var/tmp/rsyslog.log') + def test_runcmd(self, class_client: IntegrationInstance): """Test runcmd works as expected""" client = class_client diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py index 56dff9a0..39e06b55 100644 --- a/tests/integration_tests/modules/test_keys_to_console.py +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -4,6 +4,8 @@ ``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)""" import pytest +from tests.integration_tests.util import retry + BLACKLIST_USER_DATA = """\ #cloud-config ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] @@ -30,6 +32,9 @@ class TestKeysToConsoleBlacklist: syslog = class_client.read_from_file("/var/log/syslog") assert "({})".format(key_type) not in syslog + # retry decorator here because it can take some time to be reflected + # in syslog + @retry(tries=30, delay=1) @pytest.mark.parametrize("key_type", ["ED25519", "RSA"]) def test_included_keys(self, class_client, key_type): syslog = class_client.read_from_file("/var/log/syslog") diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py new file mode 100644 index 00000000..f40a6ca3 --- /dev/null +++ b/tests/integration_tests/modules/test_puppet.py @@ -0,0 +1,39 @@ +"""Test installation configuration of puppet module.""" +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import verify_clean_log + +SERVICE_DATA = """\ +#cloud-config +puppet: + install: true + install_type: packages +""" + + +@pytest.mark.user_data(SERVICE_DATA) +def test_puppet_service(client: IntegrationInstance): + """Basic test that puppet gets installed and runs.""" + log = client.read_from_file('/var/log/cloud-init.log') + verify_clean_log(log) + assert client.execute('systemctl is-active puppet').ok + assert "Running command ['puppet', 'agent'" not in log + + +EXEC_DATA = """\ +#cloud-config +puppet: + install: true + install_type: packages + exec: true + exec_args: ['--noop'] +""" + + +@pytest.mark.user_data +@pytest.mark.user_data(EXEC_DATA) +def test_pupet_exec(client: IntegrationInstance): + """Basic test that puppet gets installed and runs.""" + log = client.read_from_file('/var/log/cloud-init.log') + assert "Running command ['puppet', 'agent', '--noop']" in log diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py index e1946cb1..cf14d0b0 100644 --- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py @@ -12,6 +12,8 @@ import re import pytest +from tests.integration_tests.util import retry + USER_DATA_SSH_AUTHKEY_DISABLE = """\ #cloud-config @@ -38,6 +40,9 @@ class TestSshAuthkeyFingerprints: "Skipping module named ssh-authkey-fingerprints, " "logging of SSH fingerprints disabled") in cloudinit_output + # retry decorator here because it can take some time to be reflected + # in syslog + @retry(tries=30, delay=1) @pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_ENABLE) def test_ssh_authkey_fingerprints_enable(self, client): syslog_output = client.read_from_file("/var/log/syslog") diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 407096cd..e40d80fe 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -52,6 +52,15 @@ def verify_clean_log(log): 'http://169.254.169.254/latest/meta-data/') warning_texts.append(fetch_error_text) traceback_texts.append(fetch_error_text) + # Oracle has a file in /etc/cloud/cloud.cfg.d that contains + # users: + # - default + # - name: opc + # ssh_redirect_user: true + # This can trigger a warning about opc having no public key + warning_texts.append( + 'Unable to disable SSH logins for opc given ssh_redirect_user' + ) for warning_text in warning_texts: expected_warnings += log.count(warning_text) -- cgit v1.2.3 From 8c52bb3fc530742fce50f7f1061a24f3c453ef94 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 16 Nov 2021 18:04:57 -0600 Subject: integration_test: Speed up CI run time (#1111) Move more tests into test_combined.py and remove the CI mark from module tests that aren't updated often or don't represent core functionality. --- tests/integration_tests/modules/test_apt.py | 1 - tests/integration_tests/modules/test_combined.py | 68 ++++++++++++++++++++++ .../modules/test_command_output.py | 1 - .../integration_tests/modules/test_ntp_servers.py | 3 - .../modules/test_seed_random_data.py | 30 ---------- tests/integration_tests/modules/test_snap.py | 30 ---------- .../modules/test_ssh_import_id.py | 40 ------------- tests/integration_tests/modules/test_timezone.py | 25 -------- 8 files changed, 68 insertions(+), 130 deletions(-) delete mode 100644 tests/integration_tests/modules/test_seed_random_data.py delete mode 100644 tests/integration_tests/modules/test_snap.py delete mode 100644 tests/integration_tests/modules/test_ssh_import_id.py delete mode 100644 tests/integration_tests/modules/test_timezone.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py index 2de3e202..f5f6c813 100644 --- a/tests/integration_tests/modules/test_apt.py +++ b/tests/integration_tests/modules/test_apt.py @@ -97,7 +97,6 @@ TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF" TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11" -@pytest.mark.ci @pytest.mark.ubuntu @pytest.mark.user_data(USER_DATA) class TestApt: diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 57c02f47..2635d41a 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -12,6 +12,7 @@ import re from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import ( + retry, verify_clean_log, verify_ordered_items_in_text, ) @@ -33,6 +34,11 @@ locale: en_GB.UTF-8 locale_configfile: /etc/default/locale ntp: servers: ['ntp.ubuntu.com'] +package_update: true +random_seed: + data: 'MYUb34023nD:LFDK10913jk;dfnk:Df' + encoding: raw + file: /root/seed rsyslog: configs: - "*.* @@127.0.0.1" @@ -48,6 +54,14 @@ rsyslog: runcmd: - echo 'hello world' > /var/tmp/runcmd_output - logger "My test log" +snap: + squashfuse_in_container: true + commands: + - snap install hello-world +ssh_import_id: + - gh:powersj + - lp:smoser +timezone: US/Aleutian """ @@ -115,6 +129,20 @@ class TestCombined: 'en_US.UTF-8' ], locale_gen) + def test_random_seed_data(self, class_client: IntegrationInstance): + """Integration test for the random seed module. + + This test specifies a command to be executed by the ``seed_random`` + module, by providing a different data to be used as seed data. We will + then check if that seed data was actually used. + """ + client = class_client + + # Only read the first 31 characters, because the rest could be + # binary data + result = client.execute("head -c 31 < /root/seed") + assert result.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df") + def test_rsyslog(self, class_client: IntegrationInstance): """Test rsyslog is configured correctly.""" client = class_client @@ -125,6 +153,46 @@ class TestCombined: client = class_client assert 'hello world' == client.read_from_file('/var/tmp/runcmd_output') + @retry(tries=30, delay=1) + def test_ssh_import_id(self, class_client: IntegrationInstance): + """Integration test for the ssh_import_id module. + + This test specifies ssh keys to be imported by the ``ssh_import_id`` + module and then checks that if the ssh keys were successfully imported. + + TODO: + * This test assumes that SSH keys will be imported into the + /home/ubuntu; this will need modification to run on other OSes. + """ + client = class_client + ssh_output = client.read_from_file( + "/home/ubuntu/.ssh/authorized_keys") + + assert '# ssh-import-id gh:powersj' in ssh_output + assert '# ssh-import-id lp:smoser' in ssh_output + + def test_snap(self, class_client: IntegrationInstance): + """Integration test for the snap module. + + This test specifies a command to be executed by the ``snap`` module + and then checks that if that command was executed during boot. + """ + client = class_client + snap_output = client.execute("snap list") + assert "core " in snap_output + assert "hello-world " in snap_output + + def test_timezone(self, class_client: IntegrationInstance): + """Integration test for the timezone module. + + This test specifies a timezone to be used by the ``timezone`` module + and then checks that if that timezone was respected during boot. + """ + client = class_client + timezone_output = client.execute( + 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"') + assert timezone_output.strip() == "HDT" + def test_no_problems(self, class_client: IntegrationInstance): """Test no errors, warnings, or tracebacks""" client = class_client diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py index 15033642..8429873f 100644 --- a/tests/integration_tests/modules/test_command_output.py +++ b/tests/integration_tests/modules/test_command_output.py @@ -16,7 +16,6 @@ final_message: "should be last line in cloud-init-test-output file" """ -@pytest.mark.ci @pytest.mark.user_data(USER_DATA) def test_runcmd(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init-test-output') diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py index 59241faa..c777a641 100644 --- a/tests/integration_tests/modules/test_ntp_servers.py +++ b/tests/integration_tests/modules/test_ntp_servers.py @@ -31,7 +31,6 @@ EXPECTED_SERVERS = yaml.safe_load(USER_DATA)["ntp"]["servers"] EXPECTED_POOLS = yaml.safe_load(USER_DATA)["ntp"]["pools"] -@pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestNtpServers: @@ -83,7 +82,6 @@ ntp: """ -@pytest.mark.ci @pytest.mark.user_data(CHRONY_DATA) def test_chrony(client: IntegrationInstance): if client.execute('test -f /etc/chrony.conf').ok: @@ -104,7 +102,6 @@ ntp: """ -@pytest.mark.ci @pytest.mark.user_data(TIMESYNCD_DATA) def test_timesyncd(client: IntegrationInstance): contents = client.read_from_file( diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py deleted file mode 100644 index 94e982e0..00000000 --- a/tests/integration_tests/modules/test_seed_random_data.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Integration test for the random seed module. - -This test specifies a command to be executed by the ``seed_random`` module, by -providing a different data to be used as seed data. We will then check -if that seed data was actually used. - -(This is ported from -``tests/cloud_tests/testcases/modules/seed_random_data.yaml``.)""" - -import pytest - - -USER_DATA = """\ -#cloud-config -random_seed: - data: 'MYUb34023nD:LFDK10913jk;dfnk:Df' - encoding: raw - file: /root/seed -""" - - -@pytest.mark.ci -class TestSeedRandomData: - - @pytest.mark.user_data(USER_DATA) - def test_seed_random_data(self, client): - # Only read the first 31 characters, because the rest could be - # binary data - result = client.execute("head -c 31 < /root/seed") - assert result.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df") diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py deleted file mode 100644 index 652efa68..00000000 --- a/tests/integration_tests/modules/test_snap.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Integration test for the snap module. - -This test specifies a command to be executed by the ``snap`` module -and then checks that if that command was executed during boot. - -(This is ported from -``tests/cloud_tests/testcases/modules/snap.yaml``.)""" - -import pytest - - -USER_DATA = """\ -#cloud-config -package_update: true -snap: - squashfuse_in_container: true - commands: - - snap install hello-world -""" - - -@pytest.mark.ci -@pytest.mark.ubuntu -class TestSnap: - - @pytest.mark.user_data(USER_DATA) - def test_snap(self, client): - snap_output = client.execute("snap list") - assert "core " in snap_output - assert "hello-world " in snap_output diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py deleted file mode 100644 index b90fe95f..00000000 --- a/tests/integration_tests/modules/test_ssh_import_id.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Integration test for the ssh_import_id module. - -This test specifies ssh keys to be imported by the ``ssh_import_id`` module -and then checks that if the ssh keys were successfully imported. - -TODO: -* This test assumes that SSH keys will be imported into the /home/ubuntu; this - will need modification to run on other OSes. - -(This is ported from -``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)""" - -import pytest - -from tests.integration_tests.util import retry - -USER_DATA = """\ -#cloud-config -ssh_import_id: - - gh:powersj - - lp:smoser -""" - - -@pytest.mark.ci -@pytest.mark.ubuntu -class TestSshImportId: - - @pytest.mark.user_data(USER_DATA) - # Retry is needed here because ssh import id is one of the last modules - # run, and it fires off a web request, then continues with the rest of - # cloud-init. It is possible cloud-init's status is "done" before the - # id's have been fully imported. - @retry(tries=30, delay=1) - def test_ssh_import_id(self, client): - ssh_output = client.read_from_file( - "/home/ubuntu/.ssh/authorized_keys") - - assert '# ssh-import-id gh:powersj' in ssh_output - assert '# ssh-import-id lp:smoser' in ssh_output diff --git a/tests/integration_tests/modules/test_timezone.py b/tests/integration_tests/modules/test_timezone.py deleted file mode 100644 index 111d53f7..00000000 --- a/tests/integration_tests/modules/test_timezone.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Integration test for the timezone module. - -This test specifies a timezone to be used by the ``timezone`` module -and then checks that if that timezone was respected during boot. - -(This is ported from -``tests/cloud_tests/testcases/modules/timezone.yaml``.)""" - -import pytest - - -USER_DATA = """\ -#cloud-config -timezone: US/Aleutian -""" - - -@pytest.mark.ci -class TestTimezone: - - @pytest.mark.user_data(USER_DATA) - def test_timezone(self, client): - timezone_output = client.execute( - 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"') - assert timezone_output.strip() == "HDT" -- cgit v1.2.3 From 7f03da357e4e72f7fe09e9b35b23ba1d83477f6c Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Wed, 17 Nov 2021 11:35:00 -0700 Subject: testing: add growpart integration test (#1104) Add growpart integration test and associated unit tests Additionally, a small runcmd check for a commented line. --- tests/integration_tests/modules/test_combined.py | 2 + tests/integration_tests/modules/test_growpart.py | 62 +++++++++++++++++++ .../test_handler/test_handler_growpart.py | 69 +++++++++++++++++++++- 3 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 tests/integration_tests/modules/test_growpart.py (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 2635d41a..bc19c2a2 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -53,6 +53,8 @@ rsyslog: me: "127.0.0.1" runcmd: - echo 'hello world' > /var/tmp/runcmd_output + + - # - logger "My test log" snap: squashfuse_in_container: true diff --git a/tests/integration_tests/modules/test_growpart.py b/tests/integration_tests/modules/test_growpart.py new file mode 100644 index 00000000..af1e3a15 --- /dev/null +++ b/tests/integration_tests/modules/test_growpart.py @@ -0,0 +1,62 @@ +import os +import pytest +import pathlib +import json +from uuid import uuid4 +from pycloudlib.lxd.instance import LXDInstance + +from cloudinit.subp import subp +from tests.integration_tests.instances import IntegrationInstance + +DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) + + +def setup_and_mount_lxd_disk(instance: LXDInstance): + subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( + instance.name, DISK_PATH).split()) + + +@pytest.fixture(scope='class', autouse=True) +def create_disk(): + """Create 16M sparse file""" + pathlib.Path(DISK_PATH).touch() + os.truncate(DISK_PATH, 1 << 24) + yield + os.remove(DISK_PATH) + + +# Create undersized partition in bootcmd +ALIAS_USERDATA = """\ +#cloud-config +bootcmd: + - parted /dev/sdb --script \ + mklabel gpt \ + mkpart primary 0 1MiB + - parted /dev/sdb --script print +growpart: + devices: + - "/" + - "/dev/sdb1" +runcmd: + - parted /dev/sdb --script print +""" + + +@pytest.mark.user_data(ALIAS_USERDATA) +@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) +@pytest.mark.ubuntu +@pytest.mark.lxd_vm +class TestGrowPart: + """Test growpart""" + + def test_grow_part(self, client: IntegrationInstance): + """Verify """ + log = client.read_from_file('/var/log/cloud-init.log') + assert ("cc_growpart.py[INFO]: '/dev/sdb1' resized:" + " changed (/dev/sdb, 1) from") in log + + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 1 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['size'] == '16M' diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 7f039b79..b7d5d7ba 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -3,16 +3,19 @@ from cloudinit import cloud from cloudinit.config import cc_growpart from cloudinit import subp +from cloudinit import temp_utils from cloudinit.tests.helpers import TestCase import errno import logging import os +import shutil import re import unittest from contextlib import ExitStack from unittest import mock +import stat # growpart: # mode: auto # off, on, auto, 'growpart' @@ -58,6 +61,28 @@ usage: gpart add -t type [-a alignment] [-b start] geom """ +class Dir: + '''Stub object''' + def __init__(self, name): + self.name = name + self.st_mode = name + + def is_dir(self, *args, **kwargs): + return True + + def stat(self, *args, **kwargs): + return self + + +class Scanner: + '''Stub object''' + def __enter__(self): + return (Dir(''), Dir(''),) + + def __exit__(self, *args): + pass + + class TestDisabled(unittest.TestCase): def setUp(self): super(TestDisabled, self).setUp() @@ -91,6 +116,13 @@ class TestConfig(TestCase): self.cloud_init = None self.handle = cc_growpart.handle + self.tmppath = '/tmp/cloudinit-test-file' + self.tmpdir = os.scandir('/tmp') + self.tmpfile = open(self.tmppath, 'w') + + def tearDown(self): + self.tmpfile.close() + os.remove(self.tmppath) @mock.patch.dict("os.environ", clear=True) def test_no_resizers_auto_is_fine(self): @@ -130,7 +162,42 @@ class TestConfig(TestCase): mockobj.assert_called_once_with( ['growpart', '--help'], env={'LANG': 'C'}) - @mock.patch.dict("os.environ", clear=True) + @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) + @mock.patch.object(temp_utils, 'mkdtemp', return_value='/tmp/much-random') + @mock.patch.object(stat, 'S_ISDIR', return_value=False) + @mock.patch.object(os.path, 'samestat', return_value=True) + @mock.patch.object(os.path, "join", return_value='/tmp') + @mock.patch.object(os, 'scandir', return_value=Scanner()) + @mock.patch.object(os, 'mkdir') + @mock.patch.object(os, 'unlink') + @mock.patch.object(os, 'rmdir') + @mock.patch.object(os, 'open', return_value=1) + @mock.patch.object(os, 'close') + @mock.patch.object(shutil, 'rmtree') + @mock.patch.object(os, 'lseek', return_value=1024) + @mock.patch.object(os, 'lstat', return_value='interesting metadata') + def test_force_lang_check_tempfile(self, *args, **kwargs): + with mock.patch.object( + subp, + 'subp', + return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: + + ret = cc_growpart.resizer_factory(mode="auto") + self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) + diskdev = '/dev/sdb' + partnum = 1 + partdev = '/dev/sdb' + ret.resize(diskdev, partnum, partdev) + mockobj.assert_has_calls([ + mock.call( + ["growpart", '--dry-run', diskdev, partnum], + env={'LANG': 'C', 'TMPDIR': '/tmp'}), + mock.call( + ["growpart", diskdev, partnum], + env={'LANG': 'C', 'TMPDIR': '/tmp'}), + ]) + + @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) def test_mode_auto_falls_back_to_gpart(self): with mock.patch.object( subp, 'subp', -- cgit v1.2.3 From 36adb6dab494f23b30c68cf6af04c10ddb693c43 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 18 Nov 2021 14:45:21 -0600 Subject: integration_tests: Ensure log directory exists before symlinking to it (#1110) Also simplify a path and fix a spelling error while in the file --- tests/integration_tests/conftest.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 68b92e4b..5eab5a45 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -191,16 +191,15 @@ def _collect_logs(instance: IntegrationInstance, node_id: str, ) / session_start_time / node_id_path log.info("Writing logs to %s", log_dir) + if not log_dir.exists(): + log_dir.mkdir(parents=True) + # Add a symlink to the latest log output directory last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / 'last' if os.path.islink(last_symlink): os.unlink(last_symlink) - os.symlink( - Path(integration_settings.LOCAL_LOG_PATH) / session_start_time, - last_symlink) + os.symlink(log_dir.parent, last_symlink) - if not log_dir.exists(): - log_dir.mkdir(parents=True) tarball_path = log_dir / 'cloud-init.tar.gz' instance.pull_file('/var/tmp/cloud-init.tar.gz', tarball_path) @@ -251,7 +250,7 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): local_launch_kwargs = {} if lxd_setup is not None: if not isinstance(session_cloud, _LxdIntegrationCloud): - pytest.skip('lxd_setup requres LXD') + pytest.skip('lxd_setup requires LXD') local_launch_kwargs['lxd_setup'] = lxd_setup with session_cloud.launch( -- cgit v1.2.3 From c39d4f455d6663948c06c1f8186ab69b24ea0013 Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Tue, 30 Nov 2021 20:08:42 +0000 Subject: cc_ssh_authkey_fingerprints.py: prevent duplicate messages on console (#1081) When cloud-init is configured to show SSH user key fingerprints during boot two of the same message appears for each user. This appears to be as the util.multi_log call defaults to send to both console directly and to stderr (which also goes to console). This change sends them only to console directly. --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 2 +- .../modules/test_keys_to_console.py | 38 ++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 05d30ad1..5323522c 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -70,7 +70,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256', if not key_entries: message = ("%sno authorized SSH keys fingerprints found for user %s.\n" % (prefix, user)) - util.multi_log(message) + util.multi_log(message, console=True, stderr=False) return tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options', 'Comment'] diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py index 39e06b55..e79db3c7 100644 --- a/tests/integration_tests/modules/test_keys_to_console.py +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -23,6 +23,15 @@ ssh: emit_keys_to_console: false """ +ENABLE_KEYS_TO_CONSOLE_USER_DATA = """\ +#cloud-config +ssh: + emit_keys_to_console: true +users: + - default + - name: barfoo +""" + @pytest.mark.user_data(BLACKLIST_USER_DATA) class TestKeysToConsoleBlacklist: @@ -70,3 +79,32 @@ class TestKeysToConsoleDisabled: def test_footer_excluded(self, class_client): syslog = class_client.read_from_file("/var/log/syslog") assert "END SSH HOST KEY FINGERPRINTS" not in syslog + + +@pytest.mark.user_data(ENABLE_KEYS_TO_CONSOLE_USER_DATA) +@pytest.mark.ec2 +@pytest.mark.lxd_container +@pytest.mark.oci +@pytest.mark.openstack +class TestKeysToConsoleEnabled: + """Test that output can be enabled disabled.""" + + def test_duplicate_messaging_console_log(self, class_client): + class_client.execute('cloud-init status --wait --long').ok + try: + console_log = class_client.instance.console_log() + except NotImplementedError: + # Assume that an exception here means that we can't use the console + # log + pytest.skip("NotImplementedError when requesting console log") + return + if console_log.lower() == 'no console output': + # This test retries because we might not have the full console log + # on the first fetch. However, if we have no console output + # at all, we don't want to keep retrying as that would trigger + # another 5 minute wait on the pycloudlib side, which could + # leave us waiting for a couple hours + pytest.fail('no console output') + return + msg = "no authorized SSH keys fingerprints found for user barfoo." + assert 1 == console_log.count(msg) -- cgit v1.2.3 From cf38c2cbc5875813fbb9858f45e5b95789b7ffea Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 2 Dec 2021 08:51:26 -0600 Subject: Move GCE metadata fetch to init-local (SC-502) (#1122) GCE currently fetches metadata after network has come up. There's no reason we can't fetch at init-local time, so update GCE to fetch at init-local time to be more performant and consistent with other datasources. --- cloudinit/sources/DataSourceGCE.py | 25 ++++++++++++--- tests/integration_tests/modules/test_combined.py | 41 ++++++++++++++++++++++++ tests/unittests/test_datasource/test_common.py | 1 + tests/unittests/test_datasource/test_gce.py | 24 ++++++++++++++ 4 files changed, 87 insertions(+), 4 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 9f838bd4..b82fa410 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -4,6 +4,7 @@ import datetime import json +from contextlib import suppress as noop from base64 import b64decode @@ -13,6 +14,7 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper from cloudinit import util +from cloudinit.net.dhcp import EphemeralDHCPv4 LOG = logging.getLogger(__name__) @@ -58,6 +60,7 @@ class GoogleMetadataFetcher(object): class DataSourceGCE(sources.DataSource): dsname = 'GCE' + perform_dhcp_setup = False def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -73,10 +76,19 @@ class DataSourceGCE(sources.DataSource): def _get_data(self): url_params = self.get_url_params() - ret = util.log_time( - LOG.debug, 'Crawl of GCE metadata service', - read_md, kwargs={'address': self.metadata_address, - 'url_params': url_params}) + network_context = noop() + if self.perform_dhcp_setup: + network_context = EphemeralDHCPv4(self.fallback_interface) + with network_context: + ret = util.log_time( + LOG.debug, + "Crawl of GCE metadata service", + read_md, + kwargs={ + "address": self.metadata_address, + "url_params": url_params, + }, + ) if not ret['success']: if ret['platform_reports_gce']: @@ -117,6 +129,10 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] +class DataSourceGCELocal(DataSourceGCE): + perform_dhcp_setup = True + + def _write_host_key_to_guest_attributes(key_type, key_value): url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) key_value = key_value.encode('utf-8') @@ -272,6 +288,7 @@ def platform_reports_gce(): # Used to match classes to dependencies. datasources = [ + (DataSourceGCELocal, (sources.DEP_FILESYSTEM,)), (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index bc19c2a2..758c96fa 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -209,6 +209,31 @@ class TestCombined: log = client.read_from_file('/var/log/cloud-init.log') verify_clean_log(log) + def test_correct_datasource_detected( + self, class_client: IntegrationInstance + ): + """Test datasource is detected at the proper boot stage.""" + client = class_client + status_file = client.read_from_file("/run/cloud-init/status.json") + + platform_datasources = { + "azure": "DataSourceAzure [seed=/dev/sr0]", + "ec2": "DataSourceEc2Local", + "gce": "DataSourceGCELocal", + "oci": "DataSourceOracle", + "openstack": "DataSourceOpenStackLocal [net,ver=2]", + "lxd_container": ( + "DataSourceNoCloud " + "[seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]" + ), + "lxd_vm": "DataSourceNoCloud [seed=/dev/sr0][dsmode=net]", + } + + assert ( + platform_datasources[client.settings.PLATFORM] + == json.loads(status_file)["v1"]["datasource"] + ) + def _check_common_metadata(self, data): assert data['base64_encoded_keys'] == [] assert data['merged_cfg'] == 'redacted for non-root user' @@ -277,3 +302,19 @@ class TestCombined: assert v1_data['instance_id'] == client.instance.name assert v1_data['local_hostname'].startswith('ip-') assert v1_data['region'] == client.cloud.cloud_instance.region + + @pytest.mark.gce + def test_instance_json_gce(self, class_client: IntegrationInstance): + client = class_client + instance_json_file = client.read_from_file( + "/run/cloud-init/instance-data.json" + ) + data = json.loads(instance_json_file) + self._check_common_metadata(data) + v1_data = data["v1"] + assert v1_data["cloud_name"] == "gce" + assert v1_data["platform"] == "gce" + assert v1_data["subplatform"].startswith("metadata") + assert v1_data["availability_zone"] == client.instance.zone + assert v1_data["instance_id"] == client.instance.instance_id + assert v1_data["local_hostname"] == client.instance.name diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 17d53160..9089e5de 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -41,6 +41,7 @@ DEFAULT_LOCAL = [ CloudSigma.DataSourceCloudSigma, ConfigDrive.DataSourceConfigDrive, DigitalOcean.DataSourceDigitalOcean, + GCE.DataSourceGCELocal, Hetzner.DataSourceHetzner, IBMCloud.DataSourceIBMCloud, LXD.DataSourceLXD, diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 80b38f9e..1d91b301 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -360,5 +360,29 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.ds.publish_host_keys(hostkeys) m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + @mock.patch( + "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface" + ) + def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCELocal( + sys_cfg={}, distro=None, paths=None + ) + ds._get_data() + assert m_dhcp.call_count == 1 + + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None) + ds._get_data() + assert m_dhcp.call_count == 0 # vi: ts=4 expandtab -- cgit v1.2.3 From ff10fc0914a8b29acc23348d7848439a5eb4960a Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 2 Dec 2021 22:08:34 -0600 Subject: testing: Remove date from final_message test (SC-638) (#1127) --- tests/integration_tests/modules/test_combined.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'tests/integration_tests') diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 758c96fa..26a8397d 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -74,23 +74,16 @@ class TestCombined: """Test that final_message module works as expected. Also tests LP 1511485: final_message is silent. - - It's possible that if this test is run within a minute or so of - midnight that we'll see a failure because the day in the logs - is different from the day specified in the test definition. """ client = class_client log = client.read_from_file('/var/log/cloud-init.log') - # Get date on host rather than locally as our host could be in a - # wildly different timezone (or more likely recording UTC) - today = client.execute('date "+%a, %d %b %Y"') expected = ( - 'This is my final message!\n' - r'\d+\.\d+.*\n' - '{}.*\n' - 'DataSource.*\n' - r'\d+\.\d+' - ).format(today) + "This is my final message!\n" + r"\d+\.\d+.*\n" + r"\w{3}, \d{2} \w{3} \d{4} \d{2}:\d{2}:\d{2} \+\d{4}\n" # Datetime + "DataSource.*\n" + r"\d+\.\d+" + ) assert re.search(expected, log) -- cgit v1.2.3 From 0fe96a44cde48cc688afe75beb8fd126c8892b8c Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 2 Dec 2021 21:25:43 -0700 Subject: jinja: provide and document jinja-safe key aliases in instance-data (SC-622) (#1123) Allow #cloud-config and cloud-init query to use underscore-delimited "jinja-safe" key aliases for any instance-data.json keys containing jinja operator characters. This provides a means to use Jinja's dot-notation instead of square brackets and quoting to reference "unsafe" obtain attribute names. Support for these aliased keys is available to both #cloud-config user-data and `cloud-init query`. For example #cloud-config alias access can look like: {{ ds.config.user_network_config }} - instead of - {{ ds.config["user.network-config"] }} --- cloudinit/cmd/query.py | 134 ++++++++++++++++----- cloudinit/cmd/tests/test_query.py | 71 +++++++++-- cloudinit/handlers/jinja_template.py | 48 ++++++-- cloudinit/sources/DataSourceLXD.py | 26 ++-- cloudinit/sources/tests/test_lxd.py | 71 +++++------ doc/rtd/topics/instancedata.rst | 16 ++- .../datasources/test_lxd_discovery.py | 17 ++- .../modules/test_jinja_templating.py | 12 +- tests/unittests/test_builtin_handlers.py | 68 +++++++---- 9 files changed, 326 insertions(+), 137 deletions(-) (limited to 'tests/integration_tests') diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index 07db9552..e53cd855 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -19,7 +19,10 @@ import os import sys from cloudinit.handlers.jinja_template import ( - convert_jinja_instance_data, render_jinja_payload) + convert_jinja_instance_data, + get_jinja_variable_alias, + render_jinja_payload +) from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths from cloudinit import log from cloudinit.sources import ( @@ -93,22 +96,24 @@ def load_userdata(ud_file_path): return util.decomp_gzip(bdata, quiet=False, decode=True) -def handle_args(name, args): - """Handle calls to 'cloud-init query' as a subcommand.""" - paths = None - addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) - if not any([args.list_keys, args.varname, args.format, args.dump_all]): - LOG.error( - 'Expected one of the options: --all, --format,' - ' --list-keys or varname') - get_parser().print_help() - return 1 +def _read_instance_data(instance_data, user_data, vendor_data) -> dict: + """Return a dict of merged instance-data, vendordata and userdata. + The dict will contain supplemental userdata and vendordata keys sourced + from default user-data and vendor-data files. + + Non-root users will have redacted INSTANCE_JSON_FILE content and redacted + vendordata and userdata values. + + :raise: IOError/OSError on absence of instance-data.json file or invalid + access perms. + """ + paths = None uid = os.getuid() - if not all([args.instance_data, args.user_data, args.vendor_data]): + if not all([instance_data, user_data, vendor_data]): paths = read_cfg_paths() - if args.instance_data: - instance_data_fn = args.instance_data + if instance_data: + instance_data_fn = instance_data else: redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: @@ -124,12 +129,12 @@ def handle_args(name, args): instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn - if args.user_data: - user_data_fn = args.user_data + if user_data: + user_data_fn = user_data else: user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') - if args.vendor_data: - vendor_data_fn = args.vendor_data + if vendor_data: + vendor_data_fn = vendor_data else: vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') @@ -140,7 +145,7 @@ def handle_args(name, args): LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) else: LOG.error('Missing instance-data file: %s', instance_data_fn) - return 1 + raise instance_data = util.load_json(instance_json) if uid != 0: @@ -151,6 +156,65 @@ def handle_args(name, args): else: instance_data['userdata'] = load_userdata(user_data_fn) instance_data['vendordata'] = load_userdata(vendor_data_fn) + return instance_data + + +def _find_instance_data_leaf_by_varname_path( + jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict, + varname: str, list_keys: bool +): + """Return the value of the dot-delimited varname path in instance-data + + Split a dot-delimited jinja variable name path into components, walk the + path components into the instance_data and look up a matching jinja + variable name or cloud-init's underscore-delimited key aliases. + + :raises: ValueError when varname represents an invalid key name or path or + if list-keys is provided by varname isn't a dict object. + """ + walked_key_path = "" + response = jinja_vars_without_aliases + for key_path_part in varname.split('.'): + try: + # Walk key path using complete aliases dict, yet response + # should only contain jinja_without_aliases + jinja_vars_with_aliases = jinja_vars_with_aliases[key_path_part] + except KeyError as e: + if walked_key_path: + msg = "instance-data '{key_path}' has no '{leaf}'".format( + leaf=key_path_part, key_path=walked_key_path + ) + else: + msg = "Undefined instance-data key '{}'".format(varname) + raise ValueError(msg) from e + if key_path_part in response: + response = response[key_path_part] + else: # We are an underscore_delimited key alias + for key in response: + if get_jinja_variable_alias(key) == key_path_part: + response = response[key] + break + if walked_key_path: + walked_key_path += "." + walked_key_path += key_path_part + return response + + +def handle_args(name, args): + """Handle calls to 'cloud-init query' as a subcommand.""" + addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) + if not any([args.list_keys, args.varname, args.format, args.dump_all]): + LOG.error( + 'Expected one of the options: --all, --format,' + ' --list-keys or varname') + get_parser().print_help() + return 1 + try: + instance_data = _read_instance_data( + args.instance_data, args.user_data, args.vendor_data + ) + except (IOError, OSError): + return 1 if args.format: payload = '## template: jinja\n{fmt}'.format(fmt=args.format) rendered_payload = render_jinja_payload( @@ -162,20 +226,32 @@ def handle_args(name, args): return 0 return 1 + # If not rendering a structured format above, query output will be either: + # - JSON dump of all instance-data/jinja variables + # - JSON dump of a value at an dict path into the instance-data dict. + # - a list of keys for a specific dict path into the instance-data dict. response = convert_jinja_instance_data(instance_data) if args.varname: + jinja_vars_with_aliases = convert_jinja_instance_data( + instance_data, include_key_aliases=True + ) try: - for var in args.varname.split('.'): - response = response[var] - except KeyError: - LOG.error('Undefined instance-data key %s', args.varname) + response = _find_instance_data_leaf_by_varname_path( + jinja_vars_without_aliases=response, + jinja_vars_with_aliases=jinja_vars_with_aliases, + varname=args.varname, + list_keys=args.list_keys + ) + except (KeyError, ValueError) as e: + LOG.error(e) + return 1 + if args.list_keys: + if not isinstance(response, dict): + LOG.error( + "--list-keys provided but '%s' is not a dict", + args.varname + ) return 1 - if args.list_keys: - if not isinstance(response, dict): - LOG.error("--list-keys provided but '%s' is not a dict", var) - return 1 - response = '\n'.join(sorted(response.keys())) - elif args.list_keys: response = '\n'.join(sorted(response.keys())) if not isinstance(response, str): response = util.json_dumps(response) diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py index c258d321..d96c3945 100644 --- a/cloudinit/cmd/tests/test_query.py +++ b/cloudinit/cmd/tests/test_query.py @@ -75,6 +75,40 @@ class TestQuery: assert 'usage: query' in out assert 1 == m_cli_log.call_count + @pytest.mark.parametrize( + "inst_data,varname,expected_error", ( + ( + '{"v1": {"key-2": "value-2"}}', + 'v1.absent_leaf', + "instance-data 'v1' has no 'absent_leaf'\n" + ), + ( + '{"v1": {"key-2": "value-2"}}', + 'absent_key', + "Undefined instance-data key 'absent_key'\n" + ), + ) + ) + def test_handle_args_error_on_invalid_vaname_paths( + self, inst_data, varname, expected_error, caplog, tmpdir + ): + """Error when varname is not a valid instance-data variable path.""" + instance_data = tmpdir.join('instance-data') + instance_data.write(inst_data) + args = self.args( + debug=False, dump_all=False, format=None, + instance_data=instance_data.strpath, + list_keys=False, user_data=None, vendor_data=None, varname=varname + ) + paths, _, _, _ = self._setup_paths(tmpdir) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch( + "cloudinit.cmd.query.addLogHandlerCLI", return_value="" + ): + assert 1 == query.handle_args('anyname', args) + assert expected_error in caplog.text + def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): """When instance_data file path does not exist, log an error.""" absent_fn = tmpdir.join('absent') @@ -166,7 +200,7 @@ class TestQuery: assert 0 == query.handle_args('anyname', args) out, _err = capsys.readouterr() cmd_output = json.loads(out) - assert "it worked" == cmd_output['my_var'] + assert "it worked" == cmd_output['my-var'] if ud_expected == "ci-b64:": ud_expected = "ci-b64:{}".format(b64e(ud_src)) if vd_expected == "ci-b64:": @@ -193,8 +227,8 @@ class TestQuery: m_getuid.return_value = 0 assert 0 == query.handle_args('anyname', args) expected = ( - '{\n "my_var": "it worked",\n "userdata": "ud",\n ' - '"vendordata": "vd"\n}\n' + '{\n "my-var": "it worked",\n ' + '"userdata": "ud",\n "vendordata": "vd"\n}\n' ) out, _err = capsys.readouterr() assert expected == out @@ -211,7 +245,7 @@ class TestQuery: m_getuid.return_value = 100 assert 0 == query.handle_args('anyname', args) expected = ( - '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n' + '{\n "my-var": "it worked",\n "userdata": "<%s> file:ud",\n' ' "vendordata": "<%s> file:vd"\n}\n' % ( REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE ) @@ -233,21 +267,38 @@ class TestQuery: out, _err = capsys.readouterr() assert 'it worked\n' == out - def test_handle_args_returns_nested_varname(self, capsys, tmpdir): + @pytest.mark.parametrize( + 'inst_data,varname,expected', + ( + ( + '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}', + 'v1.key_2', + 'value-2\n' + ), + # Assert no jinja underscore-delimited aliases are reported on CLI + ( + '{"v1": {"something-hyphenated": {"no.underscores":"x",' + ' "no-alias": "y"}}, "my-var": "it worked"}', + 'v1.something_hyphenated', + '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n' + ), + ) + ) + def test_handle_args_returns_nested_varname( + self, inst_data, varname, expected, capsys, tmpdir + ): """If user_data file is a jinja template render instance-data vars.""" instance_data = tmpdir.join('instance-data') - instance_data.write( - '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}' - ) + instance_data.write(inst_data) args = self.args( debug=False, dump_all=False, format=None, instance_data=instance_data.strpath, user_data='ud', - vendor_data='vd', list_keys=False, varname='v1.key_2') + vendor_data='vd', list_keys=False, varname=varname) with mock.patch('os.getuid') as m_getuid: m_getuid.return_value = 100 assert 0 == query.handle_args('anyname', args) out, _err = capsys.readouterr() - assert 'value-2\n' == out + assert expected == out def test_handle_args_returns_standardized_vars_to_top_level_aliases( self, capsys, tmpdir diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py index 5033abbb..de88a5ea 100644 --- a/cloudinit/handlers/jinja_template.py +++ b/cloudinit/handlers/jinja_template.py @@ -1,14 +1,18 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy from errno import EACCES import os import re +from typing import Optional try: from jinja2.exceptions import UndefinedError as JUndefinedError + from jinja2.lexer import operator_re except ImportError: # No jinja2 dependency JUndefinedError = Exception + operator_re = re.compile(r'[-.]') from cloudinit import handlers from cloudinit import log as logging @@ -97,7 +101,9 @@ def render_jinja_payload_from_file( def render_jinja_payload(payload, payload_fn, instance_data, debug=False): instance_jinja_vars = convert_jinja_instance_data( instance_data, - decode_paths=instance_data.get('base64-encoded-keys', [])) + decode_paths=instance_data.get('base64-encoded-keys', []), + include_key_aliases=True + ) if debug: LOG.debug('Converted jinja variables\n%s', json_dumps(instance_jinja_vars)) @@ -118,7 +124,30 @@ def render_jinja_payload(payload, payload_fn, instance_data, debug=False): return rendered_payload -def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()): +def get_jinja_variable_alias(orig_name: str) -> Optional[str]: + """Return a jinja variable alias, replacing any operators with underscores. + + Provide underscore-delimited key aliases to simplify dot-notation + attribute references for keys which contain operators "." or "-". + This provides for simpler short-hand jinja attribute notation + allowing one to avoid quoting keys which contain operators. + {{ ds.v1_0.config.user_network_config }} instead of + {{ ds['v1.0'].config["user.network-config"] }}. + + :param orig_name: String representing a jinja variable name to scrub/alias. + + :return: A string with any jinja operators replaced if needed. Otherwise, + none if no alias required. + """ + alias_name = re.sub(operator_re, '_', orig_name) + if alias_name != orig_name: + return alias_name + return None + + +def convert_jinja_instance_data( + data, prefix='', sep='/', decode_paths=(), include_key_aliases=False +): """Process instance-data.json dict for use in jinja templates. Replace hyphens with underscores for jinja templates and decode any @@ -127,21 +156,24 @@ def convert_jinja_instance_data(data, prefix='', sep='/', decode_paths=()): result = {} decode_paths = [path.replace('-', '_') for path in decode_paths] for key, value in sorted(data.items()): - if '-' in key: - # Standardize keys for use in #cloud-config/shell templates - key = key.replace('-', '_') key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key if key_path in decode_paths: value = b64d(value) if isinstance(value, dict): result[key] = convert_jinja_instance_data( - value, key_path, sep=sep, decode_paths=decode_paths) - if re.match(r'v\d+', key): + value, key_path, sep=sep, decode_paths=decode_paths, + include_key_aliases=include_key_aliases + ) + if re.match(r'v\d+$', key): # Copy values to top-level aliases for subkey, subvalue in result[key].items(): - result[subkey] = subvalue + result[subkey] = copy.deepcopy(subvalue) else: result[key] = value + if include_key_aliases: + alias_name = get_jinja_variable_alias(key) + if alias_name: + result[alias_name] = copy.deepcopy(result[key]) return result # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py index 55ae52a2..469707d2 100644 --- a/cloudinit/sources/DataSourceLXD.py +++ b/cloudinit/sources/DataSourceLXD.py @@ -190,19 +190,16 @@ class DataSourceLXD(sources.DataSource): self.metadata = _raw_instance_data_to_dict( "meta-data", self._crawled_metadata.get("meta-data") ) - if LXD_SOCKET_API_VERSION in self._crawled_metadata: - config = self._crawled_metadata[LXD_SOCKET_API_VERSION].get( - "config", {} + config = self._crawled_metadata.get("config", {}) + user_metadata = config.get("user.meta-data", {}) + if user_metadata: + user_metadata = _raw_instance_data_to_dict( + "user.meta-data", user_metadata + ) + if not isinstance(self.metadata, dict): + self.metadata = util.mergemanydict( + [util.load_yaml(self.metadata), user_metadata] ) - user_metadata = config.get("user.meta-data", {}) - if user_metadata: - user_metadata = _raw_instance_data_to_dict( - "user.meta-data", user_metadata - ) - if not isinstance(self.metadata, dict): - self.metadata = util.mergemanydict( - [util.load_yaml(self.metadata), user_metadata] - ) if "user-data" in self._crawled_metadata: self.userdata_raw = self._crawled_metadata["user-data"] if "network-config" in self._crawled_metadata: @@ -304,7 +301,8 @@ def read_metadata( if metadata_only: return md # Skip network-data, vendor-data, user-data - md[LXD_SOCKET_API_VERSION] = { + md = { + "_metadata_api_version": api_version, # Document API version read "config": {}, "meta-data": md["meta-data"] } @@ -345,7 +343,7 @@ def read_metadata( # Leave raw data values/format unchanged to represent it in # instance-data.json for cloud-init query or jinja template # use. - md[LXD_SOCKET_API_VERSION]["config"][cfg_key] = response.text + md["config"][cfg_key] = response.text # Promote common CONFIG_KEY_ALIASES to top-level keys. if cfg_key in CONFIG_KEY_ALIASES: # Due to sort of config_routes, promote cloud-init.* diff --git a/cloudinit/sources/tests/test_lxd.py b/cloudinit/sources/tests/test_lxd.py index fc2a41df..a6e51f3b 100644 --- a/cloudinit/sources/tests/test_lxd.py +++ b/cloudinit/sources/tests/test_lxd.py @@ -42,15 +42,12 @@ LXD_V1_METADATA = { "network-config": NETWORK_V1, "user-data": "#cloud-config\npackages: [sl]\n", "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", - "1.0": { - "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", - "config": { - "user.user-data": - "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", - "user.vendor-data": - "#cloud-config\nruncmd: ['echo vendor-data']\n", - "user.network-config": yaml.safe_dump(NETWORK_V1), - } + "config": { + "user.user-data": + "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "user.vendor-data": + "#cloud-config\nruncmd: ['echo vendor-data']\n", + "user.network-config": yaml.safe_dump(NETWORK_V1), } } @@ -190,8 +187,10 @@ class TestReadMetadata: "http://lxd/1.0/meta-data": "local-hostname: md\n", "http://lxd/1.0/config": "[]", }, - {"1.0": {"config": {}, "meta-data": "local-hostname: md\n"}, - "meta-data": "local-hostname: md\n"}, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": {}, "meta-data": "local-hostname: md\n" + }, ["[GET] [HTTP:200] http://lxd/1.0/meta-data", "[GET] [HTTP:200] http://lxd/1.0/config"], ), @@ -211,12 +210,10 @@ class TestReadMetadata: "http://lxd/1.0/config/user.vendor-data": "", # 404 }, { - "1.0": { - "config": { - "user.custom1": "custom1", # Not promoted - "user.network-config": "net-config", - }, - "meta-data": "local-hostname: md\n", + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.network-config": "net-config", }, "meta-data": "local-hostname: md\n", "network-config": "net-config", @@ -250,15 +247,13 @@ class TestReadMetadata: "http://lxd/1.0/config/user.vendor-data": "vendor-data", }, { - "1.0": { - "config": { - "user.custom1": "custom1", # Not promoted - "user.meta-data": "meta-data", - "user.network-config": "net-config", - "user.user-data": "user-data", - "user.vendor-data": "vendor-data", - }, - "meta-data": "local-hostname: md\n", + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.meta-data": "meta-data", + "user.network-config": "net-config", + "user.user-data": "user-data", + "user.vendor-data": "vendor-data", }, "meta-data": "local-hostname: md\n", "network-config": "net-config", @@ -303,19 +298,17 @@ class TestReadMetadata: "cloud-init.vendor-data", }, { - "1.0": { - "config": { - "user.meta-data": "user.meta-data", - "user.network-config": "user.network-config", - "user.user-data": "user.user-data", - "user.vendor-data": "user.vendor-data", - "cloud-init.network-config": - "cloud-init.network-config", - "cloud-init.user-data": "cloud-init.user-data", - "cloud-init.vendor-data": - "cloud-init.vendor-data", - }, - "meta-data": "local-hostname: md\n", + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.meta-data": "user.meta-data", + "user.network-config": "user.network-config", + "user.user-data": "user.user-data", + "user.vendor-data": "user.vendor-data", + "cloud-init.network-config": + "cloud-init.network-config", + "cloud-init.user-data": "cloud-init.user-data", + "cloud-init.vendor-data": + "cloud-init.vendor-data", }, "meta-data": "local-hostname: md\n", "network-config": "cloud-init.network-config", diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 6c17139f..c33b907a 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -530,12 +530,18 @@ Both user-data scripts and **#cloud-config** data support jinja template rendering. When the first line of the provided user-data begins with, **## template: jinja** cloud-init will use jinja to render that file. -Any instance-data-sensitive.json variables are surfaced as dot-delimited -jinja template variables because cloud-config modules are run as 'root' -user. +Any instance-data-sensitive.json variables are surfaced as jinja template +variables because cloud-config modules are run as 'root' user. - -Below are some examples of providing these types of user-data: +.. note:: + cloud-init also provides jinja-safe key aliases for any instance-data.json + keys which contain jinja operator characters such as +, -, ., /, etc. Any + jinja operator will be replaced with underscores in the jinja-safe key + alias. This allows for cloud-init templates to use aliased variable + references which allow for jinja's dot-notation reference such as + ``{{ ds.v1_0.my_safe_key }}`` instead of ``{{ ds["v1.0"]["my/safe-key"] }}``. + +Below are some other examples of using jinja templates in user-data: * Cloud config calling home with the ec2 public hostname and availability-zone diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py index 93200962..3f05e906 100644 --- a/tests/integration_tests/datasources/test_lxd_discovery.py +++ b/tests/integration_tests/datasources/test_lxd_discovery.py @@ -53,7 +53,9 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): assert "lxd" == v1["platform"] assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] ds_cfg = json.loads(client.execute('cloud-init query ds').stdout) - assert ["config", "meta_data"] == sorted(list(ds_cfg["1.0"].keys())) + assert ["_doc", "_metadata_api_version", "config", "meta-data"] == sorted( + list(ds_cfg.keys()) + ) if ( client.settings.PLATFORM == "lxd_vm" and ImageSpecification.from_os_image().release in ("xenial", "bionic") @@ -62,15 +64,18 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): # to start the lxd-agent. # https://github.com/canonical/pycloudlib/blob/main/pycloudlib/\ # lxd/defaults.py#L13-L27 - lxd_config_keys = ["user.meta_data", "user.vendor_data"] + # Underscore-delimited aliases exist for any keys containing hyphens or + # dots. + lxd_config_keys = ["user.meta-data", "user.vendor-data"] else: - lxd_config_keys = ["user.meta_data"] - assert lxd_config_keys == list(ds_cfg["1.0"]["config"].keys()) + lxd_config_keys = ["user.meta-data"] + assert "1.0" == ds_cfg["_metadata_api_version"] + assert lxd_config_keys == list(ds_cfg["config"].keys()) assert {"public-keys": v1["public_ssh_keys"][0]} == ( - yaml.safe_load(ds_cfg["1.0"]["config"]["user.meta_data"]) + yaml.safe_load(ds_cfg["config"]["user.meta-data"]) ) assert ( - "#cloud-config\ninstance-id" in ds_cfg["1.0"]["meta_data"] + "#cloud-config\ninstance-id" in ds_cfg["meta-data"] ) # Assert NoCloud seed data is still present in cloud image metadata # This will start failing if we redact metadata templates from diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py index 35b8ee2d..fe8eff1a 100644 --- a/tests/integration_tests/modules/test_jinja_templating.py +++ b/tests/integration_tests/modules/test_jinja_templating.py @@ -11,6 +11,7 @@ USER_DATA = """\ runcmd: - echo {{v1.local_hostname}} > /var/tmp/runcmd_output - echo {{merged_cfg._doc}} >> /var/tmp/runcmd_output + - echo {{v1['local-hostname']}} >> /var/tmp/runcmd_output """ @@ -18,13 +19,16 @@ runcmd: def test_runcmd_with_variable_substitution(client: IntegrationInstance): """Test jinja substitution. - Ensure we can also substitute variables from instance-data-sensitive - LP: #1931392 + Ensure underscore-delimited aliases exist for hyphenated key and + we can also substitute variables from instance-data-sensitive + LP: #1931392. """ + hostname = client.execute('hostname').stdout.strip() expected = [ - client.execute('hostname').stdout.strip(), + hostname, ('Merged cloud-init system config from /etc/cloud/cloud.cfg and ' - '/etc/cloud/cloud.cfg.d/') + '/etc/cloud/cloud.cfg.d/'), + hostname ] output = client.read_from_file('/var/tmp/runcmd_output') verify_ordered_items_in_text(expected, output) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 30293e9e..230866b9 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -5,6 +5,7 @@ import copy import errno import os +import pytest import shutil import tempfile from textwrap import dedent @@ -281,17 +282,44 @@ class TestJinjaTemplatePartHandler(CiTestCase): self.logs.getvalue()) -class TestConvertJinjaInstanceData(CiTestCase): - - def test_convert_instance_data_hyphens_to_underscores(self): - """Replace hyphenated keys with underscores in instance-data.""" - data = {'hyphenated-key': 'hyphenated-val', - 'underscore_delim_key': 'underscore_delimited_val'} - expected_data = {'hyphenated_key': 'hyphenated-val', - 'underscore_delim_key': 'underscore_delimited_val'} - self.assertEqual( - expected_data, - convert_jinja_instance_data(data=data)) +class TestConvertJinjaInstanceData: + + @pytest.mark.parametrize( + "include_key_aliases,data,expected", ( + ( + False, + {'my-key': 'my-val'}, + {'my-key': 'my-val'} + ), + ( + True, + {'my-key': 'my-val'}, + {'my-key': 'my-val', 'my_key': 'my-val'} + ), + ( + False, + {'my.key': 'my.val'}, + {'my.key': 'my.val'} + ), + ( + True, + {'my.key': 'my.val'}, + {'my.key': 'my.val', 'my_key': 'my.val'} + ), + ( + True, + {'my/key': 'my/val'}, + {'my/key': 'my/val', 'my_key': 'my/val'} + ), + ) + ) + def test_convert_instance_data_operators_to_underscores( + self, include_key_aliases, data, expected + ): + """Replace Jinja operators keys with underscores in instance-data.""" + assert expected == convert_jinja_instance_data( + data=data, include_key_aliases=include_key_aliases + ) def test_convert_instance_data_promotes_versioned_keys_to_top_level(self): """Any versioned keys are promoted as top-level keys @@ -307,11 +335,10 @@ class TestConvertJinjaInstanceData(CiTestCase): expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'}) converted_data = convert_jinja_instance_data(data=data) - self.assertCountEqual( - ['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys()) - self.assertEqual( - expected_data, - converted_data) + assert sorted(['ds', 'v1', 'v2', 'v1key1', 'v2key1']) == sorted( + converted_data.keys() + ) + assert expected_data == converted_data def test_convert_instance_data_most_recent_version_of_promoted_keys(self): """The most-recent versioned key value is promoted to top-level.""" @@ -324,9 +351,7 @@ class TestConvertJinjaInstanceData(CiTestCase): 'key3': 'newer v2 key3'}) converted_data = convert_jinja_instance_data(data=data) - self.assertEqual( - expected_data, - converted_data) + assert expected_data == converted_data def test_convert_instance_data_decodes_decode_paths(self): """Any decode_paths provided are decoded by convert_instance_data.""" @@ -336,9 +361,7 @@ class TestConvertJinjaInstanceData(CiTestCase): converted_data = convert_jinja_instance_data( data=data, decode_paths=('key1/subkey1',)) - self.assertEqual( - expected_data, - converted_data) + assert expected_data == converted_data class TestRenderJinjaPayload(CiTestCase): @@ -355,6 +378,7 @@ class TestRenderJinjaPayload(CiTestCase): DEBUG: Converted jinja variables { "hostname": "foo", + "instance-id": "iid", "instance_id": "iid", "v1": { "hostname": "foo" -- cgit v1.2.3 From bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 15 Dec 2021 20:16:38 -0600 Subject: Adopt Black and isort (SC-700) (#1157) Applied Black and isort, fixed any linting issues, updated tox.ini and CI. --- .travis.yml | 4 + CONTRIBUTING.rst | 5 + cloudinit/analyze/__main__.py | 269 +- cloudinit/analyze/dump.py | 71 +- cloudinit/analyze/show.py | 192 +- cloudinit/apport.py | 153 +- cloudinit/atomic_helper.py | 25 +- cloudinit/cloud.py | 14 +- cloudinit/cmd/clean.py | 59 +- cloudinit/cmd/cloud_id.py | 68 +- cloudinit/cmd/devel/__init__.py | 3 +- cloudinit/cmd/devel/hotplug_hook.py | 138 +- cloudinit/cmd/devel/logs.py | 120 +- cloudinit/cmd/devel/make_mime.py | 76 +- cloudinit/cmd/devel/net_convert.py | 145 +- cloudinit/cmd/devel/parser.py | 48 +- cloudinit/cmd/devel/render.py | 54 +- cloudinit/cmd/main.py | 595 ++- cloudinit/cmd/query.py | 170 +- cloudinit/cmd/status.py | 101 +- cloudinit/config/__init__.py | 20 +- cloudinit/config/cc_apk_configure.py | 195 +- cloudinit/config/cc_apt_configure.py | 618 +-- cloudinit/config/cc_apt_pipelining.py | 13 +- cloudinit/config/cc_bootcmd.py | 65 +- cloudinit/config/cc_byobu.py | 27 +- cloudinit/config/cc_ca_certs.py | 84 +- cloudinit/config/cc_chef.py | 659 +-- cloudinit/config/cc_debug.py | 21 +- cloudinit/config/cc_disable_ec2_metadata.py | 25 +- cloudinit/config/cc_disk_setup.py | 334 +- cloudinit/config/cc_emit_upstart.py | 24 +- cloudinit/config/cc_fan.py | 34 +- cloudinit/config/cc_final_message.py | 24 +- cloudinit/config/cc_foo.py | 1 + cloudinit/config/cc_growpart.py | 134 +- cloudinit/config/cc_grub_dpkg.py | 54 +- cloudinit/config/cc_install_hotplug.py | 48 +- cloudinit/config/cc_keys_to_console.py | 36 +- cloudinit/config/cc_landscape.py | 24 +- cloudinit/config/cc_locale.py | 51 +- cloudinit/config/cc_lxd.py | 186 +- cloudinit/config/cc_mcollective.py | 50 +- cloudinit/config/cc_migrator.py | 25 +- cloudinit/config/cc_mounts.py | 172 +- cloudinit/config/cc_ntp.py | 546 ++- .../config/cc_package_update_upgrade_install.py | 30 +- cloudinit/config/cc_phone_home.py | 98 +- cloudinit/config/cc_power_state_change.py | 58 +- cloudinit/config/cc_puppet.py | 194 +- cloudinit/config/cc_refresh_rmc_and_interface.py | 51 +- cloudinit/config/cc_reset_rmc.py | 43 +- cloudinit/config/cc_resizefs.py | 183 +- cloudinit/config/cc_resolv_conf.py | 41 +- cloudinit/config/cc_rh_subscription.py | 240 +- cloudinit/config/cc_rightscale_userdata.py | 31 +- cloudinit/config/cc_rsyslog.py | 86 +- cloudinit/config/cc_runcmd.py | 70 +- cloudinit/config/cc_salt_minion.py | 69 +- cloudinit/config/cc_scripts_per_boot.py | 14 +- cloudinit/config/cc_scripts_per_instance.py | 14 +- cloudinit/config/cc_scripts_per_once.py | 14 +- cloudinit/config/cc_scripts_user.py | 12 +- cloudinit/config/cc_scripts_vendor.py | 22 +- cloudinit/config/cc_seed_random.py | 41 +- cloudinit/config/cc_set_hostname.py | 30 +- cloudinit/config/cc_set_passwords.py | 65 +- cloudinit/config/cc_snap.py | 168 +- cloudinit/config/cc_spacewalk.py | 67 +- cloudinit/config/cc_ssh.py | 106 +- cloudinit/config/cc_ssh_authkey_fingerprints.py | 73 +- cloudinit/config/cc_ssh_import_id.py | 23 +- cloudinit/config/cc_timezone.py | 2 +- cloudinit/config/cc_ubuntu_advantage.py | 154 +- cloudinit/config/cc_ubuntu_drivers.py | 133 +- cloudinit/config/cc_update_etc_hosts.py | 42 +- cloudinit/config/cc_update_hostname.py | 25 +- cloudinit/config/cc_users_groups.py | 39 +- cloudinit/config/cc_write_files.py | 242 +- cloudinit/config/cc_write_files_deferred.py | 22 +- cloudinit/config/cc_yum_add_repo.py | 65 +- cloudinit/config/cc_zypper_add_repo.py | 159 +- cloudinit/config/schema.py | 239 +- cloudinit/cs_utils.py | 20 +- cloudinit/dhclient_hook.py | 21 +- cloudinit/distros/__init__.py | 420 +- cloudinit/distros/almalinux.py | 1 + cloudinit/distros/alpine.py | 45 +- cloudinit/distros/amazon.py | 1 - cloudinit/distros/arch.py | 147 +- cloudinit/distros/bsd.py | 66 +- cloudinit/distros/bsd_utils.py | 18 +- cloudinit/distros/centos.py | 1 + cloudinit/distros/cloudlinux.py | 1 + cloudinit/distros/debian.py | 168 +- cloudinit/distros/dragonflybsd.py | 2 +- cloudinit/distros/eurolinux.py | 1 + cloudinit/distros/fedora.py | 1 + cloudinit/distros/freebsd.py | 93 +- cloudinit/distros/gentoo.py | 140 +- cloudinit/distros/miraclelinux.py | 2 + cloudinit/distros/net_util.py | 68 +- cloudinit/distros/netbsd.py | 85 +- cloudinit/distros/networking.py | 13 +- cloudinit/distros/openEuler.py | 1 + cloudinit/distros/openbsd.py | 20 +- cloudinit/distros/opensuse.py | 119 +- cloudinit/distros/parsers/__init__.py | 3 +- cloudinit/distros/parsers/hostname.py | 24 +- cloudinit/distros/parsers/hosts.py | 24 +- cloudinit/distros/parsers/networkmanager_conf.py | 6 +- cloudinit/distros/parsers/resolv_conf.py | 73 +- cloudinit/distros/parsers/sys_conf.py | 38 +- cloudinit/distros/photon.py | 86 +- cloudinit/distros/rhel.py | 76 +- cloudinit/distros/rhel_util.py | 4 +- cloudinit/distros/rocky.py | 1 + cloudinit/distros/sles.py | 1 + cloudinit/distros/ubuntu.py | 33 +- cloudinit/distros/ug_util.py | 106 +- cloudinit/distros/virtuozzo.py | 1 + cloudinit/dmi.py | 68 +- cloudinit/ec2_utils.py | 165 +- cloudinit/event.py | 8 +- cloudinit/filters/launch_index.py | 12 +- cloudinit/gpg.py | 48 +- cloudinit/handlers/__init__.py | 152 +- cloudinit/handlers/boot_hook.py | 21 +- cloudinit/handlers/cloud_config.py | 29 +- cloudinit/handlers/jinja_template.py | 87 +- cloudinit/handlers/shell_script.py | 15 +- cloudinit/handlers/upstart_job.py | 22 +- cloudinit/helpers.py | 111 +- cloudinit/importer.py | 3 +- cloudinit/log.py | 21 +- cloudinit/mergers/__init__.py | 43 +- cloudinit/mergers/m_dict.py | 34 +- cloudinit/mergers/m_list.py | 37 +- cloudinit/mergers/m_str.py | 5 +- cloudinit/net/__init__.py | 579 ++- cloudinit/net/activators.py | 87 +- cloudinit/net/bsd.py | 112 +- cloudinit/net/cmdline.py | 97 +- cloudinit/net/dhcp.py | 194 +- cloudinit/net/eni.py | 454 +- cloudinit/net/freebsd.py | 44 +- cloudinit/net/netbsd.py | 27 +- cloudinit/net/netplan.py | 313 +- cloudinit/net/network_state.py | 734 +-- cloudinit/net/networkd.py | 208 +- cloudinit/net/openbsd.py | 33 +- cloudinit/net/renderer.py | 31 +- cloudinit/net/renderers.py | 40 +- cloudinit/net/sysconfig.py | 886 ++-- cloudinit/net/udev.py | 23 +- cloudinit/netinfo.py | 403 +- cloudinit/patcher.py | 9 +- cloudinit/registry.py | 4 +- cloudinit/reporting/__init__.py | 9 +- cloudinit/reporting/events.py | 97 +- cloudinit/reporting/handlers.py | 128 +- cloudinit/safeyaml.py | 25 +- cloudinit/serial.py | 25 +- cloudinit/settings.py | 82 +- cloudinit/signal_handler.py | 12 +- cloudinit/simpletable.py | 26 +- cloudinit/sources/DataSourceAliYun.py | 18 +- cloudinit/sources/DataSourceAltCloud.py | 113 +- cloudinit/sources/DataSourceAzure.py | 1350 +++--- cloudinit/sources/DataSourceBigstep.py | 9 +- cloudinit/sources/DataSourceCloudSigma.py | 39 +- cloudinit/sources/DataSourceCloudStack.py | 135 +- cloudinit/sources/DataSourceConfigDrive.py | 117 +- cloudinit/sources/DataSourceDigitalOcean.py | 65 +- cloudinit/sources/DataSourceEc2.py | 461 +- cloudinit/sources/DataSourceExoscale.py | 171 +- cloudinit/sources/DataSourceGCE.py | 221 +- cloudinit/sources/DataSourceHetzner.py | 74 +- cloudinit/sources/DataSourceIBMCloud.py | 128 +- cloudinit/sources/DataSourceLXD.py | 61 +- cloudinit/sources/DataSourceMAAS.py | 180 +- cloudinit/sources/DataSourceNoCloud.py | 154 +- cloudinit/sources/DataSourceNone.py | 15 +- cloudinit/sources/DataSourceOVF.py | 311 +- cloudinit/sources/DataSourceOpenNebula.py | 190 +- cloudinit/sources/DataSourceOpenStack.py | 129 +- cloudinit/sources/DataSourceOracle.py | 125 +- cloudinit/sources/DataSourceRbxCloud.py | 194 +- cloudinit/sources/DataSourceScaleway.py | 131 +- cloudinit/sources/DataSourceSmartOS.py | 555 ++- cloudinit/sources/DataSourceUpCloud.py | 7 +- cloudinit/sources/DataSourceVMware.py | 13 +- cloudinit/sources/DataSourceVultr.py | 86 +- cloudinit/sources/__init__.py | 385 +- cloudinit/sources/helpers/azure.py | 693 +-- cloudinit/sources/helpers/digitalocean.py | 195 +- cloudinit/sources/helpers/hetzner.py | 15 +- cloudinit/sources/helpers/netlink.py | 187 +- cloudinit/sources/helpers/openstack.py | 438 +- cloudinit/sources/helpers/upcloud.py | 12 +- cloudinit/sources/helpers/vmware/imc/boot_proto.py | 5 +- cloudinit/sources/helpers/vmware/imc/config.py | 59 +- .../helpers/vmware/imc/config_custom_script.py | 45 +- .../sources/helpers/vmware/imc/config_file.py | 7 +- .../sources/helpers/vmware/imc/config_namespace.py | 1 + cloudinit/sources/helpers/vmware/imc/config_nic.py | 84 +- .../sources/helpers/vmware/imc/config_passwd.py | 38 +- .../sources/helpers/vmware/imc/config_source.py | 1 + .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_event.py | 1 + .../sources/helpers/vmware/imc/guestcust_state.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 46 +- cloudinit/sources/helpers/vmware/imc/ipv4_mode.py | 11 +- cloudinit/sources/helpers/vmware/imc/nic.py | 33 +- cloudinit/sources/helpers/vmware/imc/nic_base.py | 29 +- cloudinit/sources/helpers/vultr.py | 172 +- cloudinit/ssh_util.py | 172 +- cloudinit/stages.py | 649 +-- cloudinit/subp.py | 165 +- cloudinit/temp_utils.py | 20 +- cloudinit/templater.py | 96 +- cloudinit/type_utils.py | 4 +- cloudinit/url_helper.py | 273 +- cloudinit/user_data.py | 121 +- cloudinit/util.py | 873 ++-- cloudinit/version.py | 9 +- cloudinit/warnings.py | 21 +- conftest.py | 5 +- doc/rtd/conf.py | 30 +- pyproject.toml | 8 + setup.py | 263 +- tests/integration_tests/__init__.py | 8 +- tests/integration_tests/bugs/test_gh570.py | 13 +- tests/integration_tests/bugs/test_gh626.py | 25 +- tests/integration_tests/bugs/test_gh632.py | 20 +- tests/integration_tests/bugs/test_gh668.py | 15 +- tests/integration_tests/bugs/test_gh671.py | 35 +- tests/integration_tests/bugs/test_gh868.py | 3 +- tests/integration_tests/bugs/test_lp1813396.py | 3 +- tests/integration_tests/bugs/test_lp1835584.py | 19 +- tests/integration_tests/bugs/test_lp1886531.py | 2 - tests/integration_tests/bugs/test_lp1897099.py | 13 +- tests/integration_tests/bugs/test_lp1898997.py | 14 +- tests/integration_tests/bugs/test_lp1900837.py | 2 +- tests/integration_tests/bugs/test_lp1901011.py | 49 +- tests/integration_tests/bugs/test_lp1910835.py | 1 - tests/integration_tests/bugs/test_lp1912844.py | 4 +- tests/integration_tests/clouds.py | 163 +- tests/integration_tests/conftest.py | 130 +- .../datasources/test_lxd_discovery.py | 43 +- .../datasources/test_network_dependency.py | 17 +- tests/integration_tests/instances.py | 83 +- tests/integration_tests/integration_settings.py | 13 +- tests/integration_tests/modules/test_apt.py | 88 +- tests/integration_tests/modules/test_ca_certs.py | 1 - tests/integration_tests/modules/test_cli.py | 9 +- tests/integration_tests/modules/test_combined.py | 155 +- .../modules/test_command_output.py | 5 +- tests/integration_tests/modules/test_disk_setup.py | 76 +- tests/integration_tests/modules/test_growpart.py | 38 +- tests/integration_tests/modules/test_hotplug.py | 55 +- .../modules/test_jinja_templating.py | 11 +- .../modules/test_keys_to_console.py | 9 +- tests/integration_tests/modules/test_lxd_bridge.py | 2 - .../integration_tests/modules/test_ntp_servers.py | 30 +- .../modules/test_package_update_upgrade_install.py | 18 +- .../integration_tests/modules/test_persistence.py | 26 +- .../modules/test_power_state_change.py | 48 +- tests/integration_tests/modules/test_puppet.py | 6 +- .../integration_tests/modules/test_set_hostname.py | 10 +- .../integration_tests/modules/test_set_password.py | 15 +- .../modules/test_ssh_auth_key_fingerprints.py | 13 +- .../integration_tests/modules/test_ssh_generate.py | 16 +- .../modules/test_ssh_keys_provided.py | 58 +- .../integration_tests/modules/test_ssh_keysfile.py | 159 +- .../integration_tests/modules/test_user_events.py | 50 +- .../integration_tests/modules/test_users_groups.py | 21 +- .../modules/test_version_change.py | 45 +- .../integration_tests/modules/test_write_files.py | 32 +- tests/integration_tests/test_upgrade.py | 120 +- tests/integration_tests/util.py | 39 +- tests/unittests/__init__.py | 1 + tests/unittests/analyze/test_boot.py | 135 +- tests/unittests/analyze/test_dump.py | 213 +- tests/unittests/cmd/devel/test_hotplug_hook.py | 162 +- tests/unittests/cmd/devel/test_logs.py | 232 +- tests/unittests/cmd/devel/test_render.py | 152 +- tests/unittests/cmd/test_clean.py | 179 +- tests/unittests/cmd/test_cloud_id.py | 99 +- tests/unittests/cmd/test_main.py | 223 +- tests/unittests/cmd/test_query.py | 403 +- tests/unittests/cmd/test_status.py | 561 ++- tests/unittests/config/test_apt_conf_v1.py | 68 +- .../config/test_apt_configure_sources_list_v1.py | 131 +- .../config/test_apt_configure_sources_list_v3.py | 158 +- tests/unittests/config/test_apt_key.py | 117 +- tests/unittests/config/test_apt_source_v1.py | 765 ++-- tests/unittests/config/test_apt_source_v3.py | 1220 +++-- tests/unittests/config/test_cc_apk_configure.py | 148 +- tests/unittests/config/test_cc_apt_pipelining.py | 12 +- tests/unittests/config/test_cc_bootcmd.py | 100 +- tests/unittests/config/test_cc_ca_certs.py | 220 +- tests/unittests/config/test_cc_chef.py | 202 +- tests/unittests/config/test_cc_debug.py | 39 +- .../config/test_cc_disable_ec2_metadata.py | 44 +- tests/unittests/config/test_cc_disk_setup.py | 270 +- tests/unittests/config/test_cc_growpart.py | 232 +- tests/unittests/config/test_cc_grub_dpkg.py | 121 +- tests/unittests/config/test_cc_install_hotplug.py | 58 +- tests/unittests/config/test_cc_keys_to_console.py | 18 +- tests/unittests/config/test_cc_landscape.py | 178 +- tests/unittests/config/test_cc_locale.py | 99 +- tests/unittests/config/test_cc_lxd.py | 250 +- tests/unittests/config/test_cc_mcollective.py | 104 +- tests/unittests/config/test_cc_mounts.py | 449 +- tests/unittests/config/test_cc_ntp.py | 682 +-- .../unittests/config/test_cc_power_state_change.py | 74 +- tests/unittests/config/test_cc_puppet.py | 432 +- .../config/test_cc_refresh_rmc_and_interface.py | 162 +- tests/unittests/config/test_cc_resizefs.py | 436 +- tests/unittests/config/test_cc_resolv_conf.py | 76 +- tests/unittests/config/test_cc_rh_subscription.py | 366 +- tests/unittests/config/test_cc_rsyslog.py | 112 +- tests/unittests/config/test_cc_runcmd.py | 74 +- tests/unittests/config/test_cc_seed_random.py | 158 +- tests/unittests/config/test_cc_set_hostname.py | 185 +- tests/unittests/config/test_cc_set_passwords.py | 111 +- tests/unittests/config/test_cc_snap.py | 445 +- tests/unittests/config/test_cc_spacewalk.py | 36 +- tests/unittests/config/test_cc_ssh.py | 356 +- tests/unittests/config/test_cc_timezone.py | 31 +- tests/unittests/config/test_cc_ubuntu_advantage.py | 311 +- tests/unittests/config/test_cc_ubuntu_drivers.py | 213 +- tests/unittests/config/test_cc_update_etc_hosts.py | 63 +- tests/unittests/config/test_cc_users_groups.py | 264 +- tests/unittests/config/test_cc_write_files.py | 148 +- .../config/test_cc_write_files_deferred.py | 62 +- tests/unittests/config/test_cc_yum_add_repo.py | 105 +- tests/unittests/config/test_cc_zypper_add_repo.py | 166 +- tests/unittests/config/test_schema.py | 301 +- tests/unittests/distros/__init__.py | 10 +- tests/unittests/distros/test_arch.py | 50 +- tests/unittests/distros/test_bsd_utils.py | 49 +- tests/unittests/distros/test_create_users.py | 252 +- tests/unittests/distros/test_debian.py | 155 +- tests/unittests/distros/test_freebsd.py | 28 +- tests/unittests/distros/test_generic.py | 300 +- tests/unittests/distros/test_gentoo.py | 11 +- tests/unittests/distros/test_hostname.py | 16 +- tests/unittests/distros/test_hosts.py | 36 +- tests/unittests/distros/test_init.py | 273 +- tests/unittests/distros/test_manage_service.py | 33 +- tests/unittests/distros/test_netbsd.py | 11 +- tests/unittests/distros/test_netconfig.py | 605 ++- tests/unittests/distros/test_networking.py | 30 +- tests/unittests/distros/test_opensuse.py | 3 +- tests/unittests/distros/test_photon.py | 42 +- tests/unittests/distros/test_resolv.py | 55 +- tests/unittests/distros/test_sles.py | 3 +- tests/unittests/distros/test_sysconfig.py | 62 +- .../unittests/distros/test_user_data_normalize.py | 383 +- tests/unittests/filters/test_launch_index.py | 23 +- tests/unittests/helpers.py | 191 +- tests/unittests/net/test_dhcp.py | 678 +-- tests/unittests/net/test_init.py | 1368 +++--- tests/unittests/net/test_network_state.py | 82 +- tests/unittests/net/test_networkd.py | 2 +- tests/unittests/runs/test_merge_run.py | 49 +- tests/unittests/runs/test_simple_run.py | 132 +- tests/unittests/sources/helpers/test_netlink.py | 357 +- tests/unittests/sources/helpers/test_openstack.py | 51 +- tests/unittests/sources/test_aliyun.py | 217 +- tests/unittests/sources/test_altcloud.py | 311 +- tests/unittests/sources/test_azure.py | 3174 +++++++------ tests/unittests/sources/test_azure_helper.py | 1138 +++-- tests/unittests/sources/test_cloudsigma.py | 72 +- tests/unittests/sources/test_cloudstack.py | 121 +- tests/unittests/sources/test_common.py | 86 +- tests/unittests/sources/test_configdrive.py | 1100 +++-- tests/unittests/sources/test_digitalocean.py | 283 +- tests/unittests/sources/test_ec2.py | 851 ++-- tests/unittests/sources/test_exoscale.py | 248 +- tests/unittests/sources/test_gce.py | 304 +- tests/unittests/sources/test_hetzner.py | 85 +- tests/unittests/sources/test_ibmcloud.py | 299 +- tests/unittests/sources/test_init.py | 879 ++-- tests/unittests/sources/test_lxd.py | 134 +- tests/unittests/sources/test_maas.py | 147 +- tests/unittests/sources/test_nocloud.py | 320 +- tests/unittests/sources/test_opennebula.py | 888 ++-- tests/unittests/sources/test_openstack.py | 652 +-- tests/unittests/sources/test_oracle.py | 412 +- tests/unittests/sources/test_ovf.py | 1053 +++-- tests/unittests/sources/test_rbx.py | 215 +- tests/unittests/sources/test_scaleway.py | 481 +- tests/unittests/sources/test_smartos.py | 956 ++-- tests/unittests/sources/test_upcloud.py | 161 +- tests/unittests/sources/test_vmware.py | 12 +- tests/unittests/sources/test_vultr.py | 375 +- .../unittests/sources/vmware/test_custom_script.py | 61 +- .../sources/vmware/test_guestcust_util.py | 79 +- .../sources/vmware/test_vmware_config_file.py | 430 +- tests/unittests/test__init__.py | 193 +- tests/unittests/test_atomic_helper.py | 4 +- tests/unittests/test_builtin_handlers.py | 405 +- tests/unittests/test_cli.py | 214 +- tests/unittests/test_conftest.py | 10 +- tests/unittests/test_cs_util.py | 39 +- tests/unittests/test_data.py | 526 ++- tests/unittests/test_dhclient_hook.py | 89 +- tests/unittests/test_dmi.py | 90 +- tests/unittests/test_ds_identify.py | 1609 ++++--- tests/unittests/test_ec2_util.py | 376 +- tests/unittests/test_event.py | 16 +- tests/unittests/test_features.py | 36 +- tests/unittests/test_gpg.py | 103 +- tests/unittests/test_helpers.py | 11 +- tests/unittests/test_log.py | 12 +- tests/unittests/test_merging.py | 123 +- tests/unittests/test_net.py | 4833 ++++++++++++-------- tests/unittests/test_net_activators.py | 154 +- tests/unittests/test_net_freebsd.py | 45 +- tests/unittests/test_netinfo.py | 193 +- tests/unittests/test_pathprefix2dict.py | 28 +- tests/unittests/test_registry.py | 21 +- tests/unittests/test_render_cloudcfg.py | 71 +- tests/unittests/test_reporting.py | 379 +- tests/unittests/test_reporting_hyperv.py | 193 +- tests/unittests/test_simpletable.py | 47 +- tests/unittests/test_sshutil.py | 817 ++-- tests/unittests/test_stages.py | 444 +- tests/unittests/test_subp.py | 289 +- tests/unittests/test_temp_utils.py | 118 +- tests/unittests/test_templating.py | 103 +- tests/unittests/test_url_helper.py | 134 +- tests/unittests/test_util.py | 934 ++-- tests/unittests/test_version.py | 11 +- tests/unittests/util.py | 14 +- tools/mock-meta.py | 301 +- tools/validate-yaml.py | 4 +- tox.ini | 28 +- 441 files changed, 43425 insertions(+), 31496 deletions(-) create mode 100644 pyproject.toml (limited to 'tests/integration_tests') diff --git a/.travis.yml b/.travis.yml index 9470cc31..c458db48 100644 --- a/.travis.yml +++ b/.travis.yml @@ -133,6 +133,10 @@ matrix: env: TOXENV=flake8 - python: 3.6 env: TOXENV=pylint + - python: 3.6 + env: TOXENV=black + - python: 3.6 + env: TOXENV=isort - python: 3.7 env: TOXENV=doc # Test all supported Python versions (but at the end, so we schedule diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 06b31497..aa09c61e 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,6 +19,7 @@ Before any pull request can be accepted, you must do the following: `tools/.github-cla-signers`_ * Add or update any `unit tests`_ accordingly * Add or update any `integration tests`_ (if applicable) +* Format code (using black and isort) with `tox -e format` * Ensure unit tests and linting pass using `tox`_ * Submit a PR against the `main` branch of the `cloud-init` repository @@ -133,6 +134,10 @@ Do these things for each feature or bug git commit +* Apply black and isort formatting rules with `tox`_:: + + tox -e format + * Run unit tests and lint/formatting checks with `tox`_:: tox diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 99e5c203..36a5be78 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -5,62 +5,111 @@ import argparse import re import sys +from datetime import datetime from cloudinit.util import json_dumps -from datetime import datetime -from . import dump -from . import show + +from . import dump, show def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-analyze', - description='Devel tool: Analyze cloud-init logs and data') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-analyze", + description="Devel tool: Analyze cloud-init logs and data", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True parser_blame = subparsers.add_parser( - 'blame', help='Print list of executed stages ordered by time to init') + "blame", help="Print list of executed stages ordered by time to init" + ) parser_blame.add_argument( - '-i', '--infile', action='store', dest='infile', - default='/var/log/cloud-init.log', - help='specify where to read input.') + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) parser_blame.add_argument( - '-o', '--outfile', action='store', dest='outfile', default='-', - help='specify where to write output. ') - parser_blame.set_defaults(action=('blame', analyze_blame)) + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_blame.set_defaults(action=("blame", analyze_blame)) parser_show = subparsers.add_parser( - 'show', help='Print list of in-order events during execution') - parser_show.add_argument('-f', '--format', action='store', - dest='print_format', default='%I%D @%Es +%ds', - help='specify formatting of output.') - parser_show.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input.') - parser_show.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_show.set_defaults(action=('show', analyze_show)) + "show", help="Print list of in-order events during execution" + ) + parser_show.add_argument( + "-f", + "--format", + action="store", + dest="print_format", + default="%I%D @%Es +%ds", + help="specify formatting of output.", + ) + parser_show.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) + parser_show.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_show.set_defaults(action=("show", analyze_show)) parser_dump = subparsers.add_parser( - 'dump', help='Dump cloud-init events in JSON format') - parser_dump.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_dump.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output. ') - parser_dump.set_defaults(action=('dump', analyze_dump)) + "dump", help="Dump cloud-init events in JSON format" + ) + parser_dump.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_dump.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_dump.set_defaults(action=("dump", analyze_dump)) parser_boot = subparsers.add_parser( - 'boot', help='Print list of boot times for kernel and cloud-init') - parser_boot.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_boot.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_boot.set_defaults(action=('boot', analyze_boot)) + "boot", help="Print list of boot times for kernel and cloud-init" + ) + parser_boot.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_boot.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_boot.set_defaults(action=("boot", analyze_boot)) return parser @@ -78,61 +127,68 @@ def analyze_boot(name, args): """ infh, outfh = configure_io(args) kernel_info = show.dist_check_timestamp() - status_code, kernel_start, kernel_end, ci_sysd_start = \ - kernel_info + status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start) kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end) ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start) try: - last_init_local = \ - [e for e in _get_events(infh) if e['name'] == 'init-local' and - 'starting search' in e['description']][-1] - ci_start = datetime.utcfromtimestamp(last_init_local['timestamp']) + last_init_local = [ + e + for e in _get_events(infh) + if e["name"] == "init-local" + and "starting search" in e["description"] + ][-1] + ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"]) except IndexError: - ci_start = 'Could not find init-local log-line in cloud-init.log' + ci_start = "Could not find init-local log-line in cloud-init.log" status_code = show.FAIL_CODE - FAILURE_MSG = 'Your Linux distro or container does not support this ' \ - 'functionality.\n' \ - 'You must be running a Kernel Telemetry supported ' \ - 'distro.\nPlease check ' \ - 'https://cloudinit.readthedocs.io/en/latest' \ - '/topics/analyze.html for more ' \ - 'information on supported distros.\n' - - SUCCESS_MSG = '-- Most Recent Boot Record --\n' \ - ' Kernel Started at: {k_s_t}\n' \ - ' Kernel ended boot at: {k_e_t}\n' \ - ' Kernel time to boot (seconds): {k_r}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Time between Kernel end boot and Cloud-init ' \ - 'activation (seconds): {bt_r}\n' \ - ' Cloud-init start: {ci_start}\n' - - CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \ - ' Container started at: {k_s_t}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Cloud-init start: {ci_start}\n' \ - + FAILURE_MSG = ( + "Your Linux distro or container does not support this " + "functionality.\n" + "You must be running a Kernel Telemetry supported " + "distro.\nPlease check " + "https://cloudinit.readthedocs.io/en/latest" + "/topics/analyze.html for more " + "information on supported distros.\n" + ) + + SUCCESS_MSG = ( + "-- Most Recent Boot Record --\n" + " Kernel Started at: {k_s_t}\n" + " Kernel ended boot at: {k_e_t}\n" + " Kernel time to boot (seconds): {k_r}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Time between Kernel end boot and Cloud-init " + "activation (seconds): {bt_r}\n" + " Cloud-init start: {ci_start}\n" + ) + + CONTAINER_MSG = ( + "-- Most Recent Container Boot Record --\n" + " Container started at: {k_s_t}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Cloud-init start: {ci_start}\n" + ) status_map = { show.FAIL_CODE: FAILURE_MSG, show.CONTAINER_CODE: CONTAINER_MSG, - show.SUCCESS_CODE: SUCCESS_MSG + show.SUCCESS_CODE: SUCCESS_MSG, } kernel_runtime = kernel_end - kernel_start between_process_runtime = ci_sysd_start - kernel_end kwargs = { - 'k_s_t': kernel_start_timestamp, - 'k_e_t': kernel_end_timestamp, - 'k_r': kernel_runtime, - 'bt_r': between_process_runtime, - 'k_e': kernel_end, - 'k_s': kernel_start, - 'ci_sysd': ci_sysd_start, - 'ci_sysd_t': ci_sysd_start_timestamp, - 'ci_start': ci_start + "k_s_t": kernel_start_timestamp, + "k_e_t": kernel_end_timestamp, + "k_r": kernel_runtime, + "bt_r": between_process_runtime, + "k_e": kernel_end, + "k_s": kernel_start, + "ci_sysd": ci_sysd_start, + "ci_sysd_t": ci_sysd_start_timestamp, + "ci_start": ci_start, } outfh.write(status_map[status_code].format(**kwargs)) @@ -152,15 +208,16 @@ def analyze_blame(name, args): and sorting by record data ('delta') """ (infh, outfh) = configure_io(args) - blame_format = ' %ds (%n)' - r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) - for idx, record in enumerate(show.show_events(_get_events(infh), - blame_format)): + blame_format = " %ds (%n)" + r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE) + for idx, record in enumerate( + show.show_events(_get_events(infh), blame_format) + ): srecs = sorted(filter(r.match, record), reverse=True) - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('\n'.join(srecs) + '\n') - outfh.write('\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write("\n".join(srecs) + "\n") + outfh.write("\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_show(name, args): @@ -184,21 +241,25 @@ def analyze_show(name, args): Finished stage: (modules-final) 0.NNN seconds """ (infh, outfh) = configure_io(args) - for idx, record in enumerate(show.show_events(_get_events(infh), - args.print_format)): - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('The total time elapsed since completing an event is' - ' printed after the "@" character.\n') - outfh.write('The time the event takes is printed after the "+" ' - 'character.\n\n') - outfh.write('\n'.join(record) + '\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + for idx, record in enumerate( + show.show_events(_get_events(infh), args.print_format) + ): + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write( + "The total time elapsed since completing an event is" + ' printed after the "@" character.\n' + ) + outfh.write( + 'The time the event takes is printed after the "+" character.\n\n' + ) + outfh.write("\n".join(record) + "\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + "\n") def _get_events(infile): @@ -211,28 +272,28 @@ def _get_events(infile): def configure_io(args): """Common parsing and setup of input/output files""" - if args.infile == '-': + if args.infile == "-": infh = sys.stdin else: try: - infh = open(args.infile, 'r') + infh = open(args.infile, "r") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.infile) + sys.stderr.write("Cannot open file %s\n" % args.infile) sys.exit(1) - if args.outfile == '-': + if args.outfile == "-": outfh = sys.stdout else: try: - outfh = open(args.outfile, 'w') + outfh = open(args.outfile, "w") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.outfile) + sys.stderr.write("Cannot open file %s\n" % args.outfile) sys.exit(1) return (infh, outfh) -if __name__ == '__main__': +if __name__ == "__main__": parser = get_parser() args = parser.parse_args() (name, action_functor) = args.action diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index 62ad51fe..8e6e3c6a 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -1,21 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. import calendar -from datetime import datetime import sys +from datetime import datetime -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util stage_to_description = { - 'finished': 'finished running cloud-init', - 'init-local': 'starting search for local datasources', - 'init-network': 'searching for network datasources', - 'init': 'searching for network datasources', - 'modules-config': 'running config modules', - 'modules-final': 'finalizing modules', - 'modules': 'running modules for', - 'single': 'running single module ', + "finished": "finished running cloud-init", + "init-local": "starting search for local datasources", + "init-network": "searching for network datasources", + "init": "searching for network datasources", + "modules-config": "running config modules", + "modules-final": "finalizing modules", + "modules": "running modules for", + "single": "running single module ", } # logger's asctime format @@ -34,11 +33,11 @@ def parse_timestamp(timestampstr): if timestampstr.split()[0] in months: # Aug 29 22:55:26 FMT = DEFAULT_FMT - if '.' in timestampstr: + if "." in timestampstr: FMT = CLOUD_INIT_JOURNALCTL_FMT - dt = datetime.strptime(timestampstr + " " + - str(datetime.now().year), - FMT) + dt = datetime.strptime( + timestampstr + " " + str(datetime.now().year), FMT + ) timestamp = dt.strftime("%s.%f") elif "," in timestampstr: # 2016-09-12 14:39:20,839 @@ -52,7 +51,7 @@ def parse_timestamp(timestampstr): def parse_timestamp_from_date(timestampstr): - out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr]) + out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr]) timestamp = out.strip() return float(timestamp) @@ -79,8 +78,8 @@ def parse_ci_logline(line): # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \ # init-local/check-cache: attempting to read from cache [check] - amazon_linux_2_sep = ' cloud-init[' - separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep] + amazon_linux_2_sep = " cloud-init[" + separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep] found = False for sep in separators: if sep in line: @@ -99,7 +98,7 @@ def parse_ci_logline(line): if "," in timehost: timestampstr, extra = timehost.split(",") timestampstr += ",%s" % extra.split()[0] - if ' ' in extra: + if " " in extra: hostname = extra.split()[-1] else: hostname = timehost.split()[-1] @@ -111,11 +110,11 @@ def parse_ci_logline(line): eventstr = eventstr.split(maxsplit=1)[1] else: timestampstr = timehost.split(hostname)[0].strip() - if 'Cloud-init v.' in eventstr: - event_type = 'start' - if 'running' in eventstr: - stage_and_timestamp = eventstr.split('running')[1].lstrip() - event_name, _ = stage_and_timestamp.split(' at ') + if "Cloud-init v." in eventstr: + event_type = "start" + if "running" in eventstr: + stage_and_timestamp = eventstr.split("running")[1].lstrip() + event_name, _ = stage_and_timestamp.split(" at ") event_name = event_name.replace("'", "").replace(":", "-") if event_name == "init": event_name = "init-network" @@ -128,17 +127,17 @@ def parse_ci_logline(line): event_description = eventstr.split(event_name)[1].strip() event = { - 'name': event_name.rstrip(":"), - 'description': event_description, - 'timestamp': parse_timestamp(timestampstr), - 'origin': 'cloudinit', - 'event_type': event_type.rstrip(":"), + "name": event_name.rstrip(":"), + "description": event_description, + "timestamp": parse_timestamp(timestampstr), + "origin": "cloudinit", + "event_type": event_type.rstrip(":"), } - if event['event_type'] == "finish": + if event["event_type"] == "finish": result = event_description.split(":")[0] - desc = event_description.split(result)[1].lstrip(':').strip() - event['result'] = result - event['description'] = desc.strip() + desc = event_description.split(result)[1].lstrip(":").strip() + event["result"] = result + event["description"] = desc.strip() return event @@ -146,10 +145,10 @@ def parse_ci_logline(line): def dump_events(cisource=None, rawdata=None): events = [] event = None - CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.'] + CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."] if not any([cisource, rawdata]): - raise ValueError('Either cisource or rawdata parameters are required') + raise ValueError("Either cisource or rawdata parameters are required") if rawdata: data = rawdata.splitlines() @@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None): try: event = parse_ci_logline(line) except ValueError: - sys.stderr.write('Skipping invalid entry\n') + sys.stderr.write("Skipping invalid entry\n") if event: events.append(event) diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 01a4d3e5..5fd9cdfd 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -8,11 +8,10 @@ import base64 import datetime import json import os -import time import sys +import time -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.distros import uses_systemd # Example events: @@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd # } format_key = { - '%d': 'delta', - '%D': 'description', - '%E': 'elapsed', - '%e': 'event_type', - '%I': 'indent', - '%l': 'level', - '%n': 'name', - '%o': 'origin', - '%r': 'result', - '%t': 'timestamp', - '%T': 'total_time', + "%d": "delta", + "%D": "description", + "%E": "elapsed", + "%e": "event_type", + "%I": "indent", + "%l": "level", + "%n": "name", + "%o": "origin", + "%r": "result", + "%t": "timestamp", + "%T": "total_time", } -formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) - for k, v in format_key.items()]) -SUCCESS_CODE = 'successful' -FAIL_CODE = 'failure' -CONTAINER_CODE = 'container' +formatting_help = " ".join( + ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()] +) +SUCCESS_CODE = "successful" +FAIL_CODE = "failure" +CONTAINER_CODE = "container" TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1) @@ -60,7 +60,7 @@ def format_record(msg, event): for i, j in format_key.items(): if i in msg: # ensure consistent formatting of time values - if j in ['delta', 'elapsed', 'timestamp']: + if j in ["delta", "elapsed", "timestamp"]: msg = msg.replace(i, "{%s:08.5f}" % j) else: msg = msg.replace(i, "{%s}" % j) @@ -68,13 +68,13 @@ def format_record(msg, event): def dump_event_files(event): - content = dict((k, v) for k, v in event.items() if k not in ['content']) - files = content['files'] + content = dict((k, v) for k, v in event.items() if k not in ["content"]) + files = content["files"] saved = [] for f in files: - fname = f['path'] + fname = f["path"] fn_local = os.path.basename(fname) - fcontent = base64.b64decode(f['content']).decode('ascii') + fcontent = base64.b64decode(f["content"]).decode("ascii") util.write_file(fn_local, fcontent) saved.append(fn_local) @@ -83,13 +83,13 @@ def dump_event_files(event): def event_name(event): if event: - return event.get('name') + return event.get("name") return None def event_type(event): if event: - return event.get('event_type') + return event.get("event_type") return None @@ -100,7 +100,7 @@ def event_parent(event): def event_timestamp(event): - return float(event.get('timestamp')) + return float(event.get("timestamp")) def event_datetime(event): @@ -117,41 +117,44 @@ def event_duration(start, finish): def event_record(start_time, start, finish): record = finish.copy() - record.update({ - 'delta': event_duration(start, finish), - 'elapsed': delta_seconds(start_time, event_datetime(start)), - 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->', - }) + record.update( + { + "delta": event_duration(start, finish), + "elapsed": delta_seconds(start_time, event_datetime(start)), + "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->", + } + ) return record def total_time_record(total_time): - return 'Total Time: %3.5f seconds\n' % total_time + return "Total Time: %3.5f seconds\n" % total_time class SystemctlReader(object): - ''' + """ Class for dealing with all systemctl subp calls in a consistent manner. - ''' + """ + def __init__(self, property, parameter=None): self.epoch = None - self.args = ['/bin/systemctl', 'show'] + self.args = ["/bin/systemctl", "show"] if parameter: self.args.append(parameter) - self.args.extend(['-p', property]) + self.args.extend(["-p", property]) # Don't want the init of our object to break. Instead of throwing # an exception, set an error code that gets checked when data is # requested from the object self.failure = self.subp() def subp(self): - ''' + """ Make a subp call based on set args and handle errors by setting failure code :return: whether the subp call failed or not - ''' + """ try: value, err = subp.subp(self.args, capture=True) if err: @@ -162,41 +165,41 @@ class SystemctlReader(object): return systemctl_fail def parse_epoch_as_float(self): - ''' + """ If subp call succeeded, return the timestamp from subp as a float. :return: timestamp as a float - ''' + """ # subp has 2 ways to fail: it either fails and throws an exception, # or returns an error code. Raise an exception here in order to make # sure both scenarios throw exceptions if self.failure: - raise RuntimeError('Subprocess call to systemctl has failed, ' - 'returning error code ({})' - .format(self.failure)) + raise RuntimeError( + "Subprocess call to systemctl has failed, " + "returning error code ({})".format(self.failure) + ) # Output from systemctl show has the format Property=Value. # For example, UserspaceMonotonic=1929304 - timestamp = self.epoch.split('=')[1] + timestamp = self.epoch.split("=")[1] # Timestamps reported by systemctl are in microseconds, converting return float(timestamp) / 1000000 def dist_check_timestamp(): - ''' + """ Determine which init system a particular linux distro is using. Each init system (systemd, upstart, etc) has a different way of providing timestamps. :return: timestamps of kernelboot, kernelendboot, and cloud-initstart or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved. - ''' + """ if uses_systemd(): return gather_timestamps_using_systemd() # Use dmesg to get timestamps if the distro does not have systemd - if util.is_FreeBSD() or 'gentoo' in \ - util.system_info()['system'].lower(): + if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower(): return gather_timestamps_using_dmesg() # this distro doesn't fit anything that is supported by cloud-init. just @@ -205,20 +208,20 @@ def dist_check_timestamp(): def gather_timestamps_using_dmesg(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization using dmesg as opposed to systemctl :return: the two timestamps plus a dummy timestamp to keep consistency with gather_timestamps_using_systemd - ''' + """ try: - data, _ = subp.subp(['dmesg'], capture=True) + data, _ = subp.subp(["dmesg"], capture=True) split_entries = data[0].splitlines() for i in split_entries: - if i.decode('UTF-8').find('user') != -1: - splitup = i.decode('UTF-8').split() - stripped = splitup[1].strip(']') + if i.decode("UTF-8").find("user") != -1: + splitup = i.decode("UTF-8").split() + stripped = splitup[1].strip("]") # kernel timestamp from dmesg is equal to 0, # with the userspace timestamp relative to it. @@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg(): # systemd wont start cloud-init in this case, # so we cannot get that timestamp - return SUCCESS_CODE, kernel_start, kernel_end, \ - kernel_end + return SUCCESS_CODE, kernel_start, kernel_end, kernel_end except Exception: pass @@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg(): def gather_timestamps_using_systemd(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization. and cloud-init systemd unit activation :return: the three timestamps - ''' + """ kernel_start = float(time.time()) - float(util.uptime()) try: - delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\ - .parse_epoch_as_float() - delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic', - 'cloud-init-local').parse_epoch_as_float() + delta_k_end = SystemctlReader( + "UserspaceTimestampMonotonic" + ).parse_epoch_as_float() + delta_ci_s = SystemctlReader( + "InactiveExitTimestampMonotonic", "cloud-init-local" + ).parse_epoch_as_float() base_time = kernel_start status = SUCCESS_CODE # lxc based containers do not set their monotonic zero point to be when @@ -262,12 +266,13 @@ def gather_timestamps_using_systemd(): # in containers when https://github.com/lxc/lxcfs/issues/292 # is fixed, util.uptime() should be used instead of stat on try: - file_stat = os.stat('/proc/1/cmdline') + file_stat = os.stat("/proc/1/cmdline") kernel_start = file_stat.st_atime except OSError as err: - raise RuntimeError('Could not determine container boot ' - 'time from /proc/1/cmdline. ({})' - .format(err)) from err + raise RuntimeError( + "Could not determine container boot " + "time from /proc/1/cmdline. ({})".format(err) + ) from err status = CONTAINER_CODE else: status = FAIL_CODE @@ -283,10 +288,14 @@ def gather_timestamps_using_systemd(): return status, kernel_start, kernel_end, cloudinit_sysd -def generate_records(events, blame_sort=False, - print_format="(%n) %d seconds in %I%D", - dump_files=False, log_datafiles=False): - ''' +def generate_records( + events, + blame_sort=False, + print_format="(%n) %d seconds in %I%D", + dump_files=False, + log_datafiles=False, +): + """ Take in raw events and create parent-child dependencies between events in order to order events in chronological order. @@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False, :param log_datafiles: whether or not to log events generated :return: boot records ordered chronologically - ''' + """ - sorted_events = sorted(events, key=lambda x: x['timestamp']) + sorted_events = sorted(events, key=lambda x: x["timestamp"]) records = [] start_time = None total_time = 0.0 @@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False, except IndexError: next_evt = None - if event_type(event) == 'start': - if event.get('name') in stages_seen: + if event_type(event) == "start": + if event.get("name") in stages_seen: records.append(total_time_record(total_time)) boot_records.append(records) records = [] @@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False, # see if we have a pair if event_name(event) == event_name(next_evt): - if event_type(next_evt) == 'finish': - records.append(format_record(print_format, - event_record(start_time, - event, - next_evt))) + if event_type(next_evt) == "finish": + records.append( + format_record( + print_format, + event_record(start_time, event, next_evt), + ) + ) else: # This is a parent event - records.append("Starting stage: %s" % event.get('name')) + records.append("Starting stage: %s" % event.get("name")) unprocessed.append(event) - stages_seen.append(event.get('name')) + stages_seen.append(event.get("name")) continue else: prev_evt = unprocessed.pop() if event_name(event) == event_name(prev_evt): record = event_record(start_time, prev_evt, event) - records.append(format_record("Finished stage: " - "(%n) %d seconds", - record) + "\n") - total_time += record.get('delta') + records.append( + format_record("Finished stage: (%n) %d seconds", record) + + "\n" + ) + total_time += record.get("delta") else: # not a match, put it back unprocessed.append(prev_evt) @@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False, def show_events(events, print_format): - ''' + """ A passthrough method that makes it easier to call generate_records() :param events: JSONs from dump that represents events taken from logs @@ -368,18 +380,18 @@ def show_events(events, print_format): and time taken by the event in one line :return: boot records ordered chronologically - ''' + """ return generate_records(events, print_format=print_format) def load_events_infile(infile): - ''' + """ Takes in a log file, read it, and convert to json. :param infile: The Log file to be read :return: json version of logfile, raw file - ''' + """ data = infile.read() try: return json.loads(data), data diff --git a/cloudinit/apport.py b/cloudinit/apport.py index aadc638f..92068aa9 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -2,127 +2,143 @@ # # This file is part of cloud-init. See LICENSE file for license information. -'''Cloud-init apport interface''' +"""Cloud-init apport interface""" try: from apport.hookutils import ( - attach_file, attach_root_command_outputs, root_command_output) + attach_file, + attach_root_command_outputs, + root_command_output, + ) + has_apport = True except ImportError: has_apport = False KNOWN_CLOUD_NAMES = [ - 'AliYun', - 'AltCloud', - 'Amazon - Ec2', - 'Azure', - 'Bigstep', - 'Brightbox', - 'CloudSigma', - 'CloudStack', - 'DigitalOcean', - 'E24Cloud', - 'GCE - Google Compute Engine', - 'Exoscale', - 'Hetzner Cloud', - 'IBM - (aka SoftLayer or BlueMix)', - 'LXD', - 'MAAS', - 'NoCloud', - 'OpenNebula', - 'OpenStack', - 'Oracle', - 'OVF', - 'RbxCloud - (HyperOne, Rootbox, Rubikon)', - 'OpenTelekomCloud', - 'SAP Converged Cloud', - 'Scaleway', - 'SmartOS', - 'UpCloud', - 'VMware', - 'Vultr', - 'ZStack', - 'Other' + "AliYun", + "AltCloud", + "Amazon - Ec2", + "Azure", + "Bigstep", + "Brightbox", + "CloudSigma", + "CloudStack", + "DigitalOcean", + "E24Cloud", + "GCE - Google Compute Engine", + "Exoscale", + "Hetzner Cloud", + "IBM - (aka SoftLayer or BlueMix)", + "LXD", + "MAAS", + "NoCloud", + "OpenNebula", + "OpenStack", + "Oracle", + "OVF", + "RbxCloud - (HyperOne, Rootbox, Rubikon)", + "OpenTelekomCloud", + "SAP Converged Cloud", + "Scaleway", + "SmartOS", + "UpCloud", + "VMware", + "Vultr", + "ZStack", + "Other", ] # Potentially clear text collected logs -CLOUDINIT_LOG = '/var/log/cloud-init.log' -CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOG = "/var/log/cloud-init.log" +CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def attach_cloud_init_logs(report, ui=None): - '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.''' - attach_root_command_outputs(report, { - 'cloud-init-log-warnings': - 'egrep -i "warn|error" /var/log/cloud-init.log', - 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'}) + """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.""" + attach_root_command_outputs( + report, + { + "cloud-init-log-warnings": ( + 'egrep -i "warn|error" /var/log/cloud-init.log' + ), + "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log", + }, + ) root_command_output( - ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz']) - attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz') + ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"] + ) + attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz") def attach_hwinfo(report, ui=None): - '''Optionally attach hardware info from lshw.''' + """Optionally attach hardware info from lshw.""" prompt = ( - 'Your device details (lshw) may be useful to developers when' - ' addressing this bug, but gathering it requires admin privileges.' - ' Would you like to include this info?') + "Your device details (lshw) may be useful to developers when" + " addressing this bug, but gathering it requires admin privileges." + " Would you like to include this info?" + ) if ui and ui.yesno(prompt): - attach_root_command_outputs(report, {'lshw.txt': 'lshw'}) + attach_root_command_outputs(report, {"lshw.txt": "lshw"}) def attach_cloud_info(report, ui=None): - '''Prompt for cloud details if available.''' + """Prompt for cloud details if available.""" if ui: - prompt = 'Is this machine running in a cloud environment?' + prompt = "Is this machine running in a cloud environment?" response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - prompt = ('Please select the cloud vendor or environment in which' - ' this instance is running') + prompt = ( + "Please select the cloud vendor or environment in which" + " this instance is running" + ) response = ui.choice(prompt, KNOWN_CLOUD_NAMES) if response: - report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]] + report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]] else: - report['CloudName'] = 'None' + report["CloudName"] = "None" def attach_user_data(report, ui=None): - '''Optionally provide user-data if desired.''' + """Optionally provide user-data if desired.""" if ui: prompt = ( - 'Your user-data or cloud-config file can optionally be provided' - ' from {0} and could be useful to developers when addressing this' - ' bug. Do you wish to attach user-data to this bug?'.format( - USER_DATA_FILE)) + "Your user-data or cloud-config file can optionally be provided" + " from {0} and could be useful to developers when addressing this" + " bug. Do you wish to attach user-data to this bug?".format( + USER_DATA_FILE + ) + ) response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - attach_file(report, USER_DATA_FILE, 'user_data.txt') + attach_file(report, USER_DATA_FILE, "user_data.txt") def add_bug_tags(report): - '''Add any appropriate tags to the bug.''' - if 'JournalErrors' in report.keys(): - errors = report['JournalErrors'] - if 'Breaking ordering cycle' in errors: - report['Tags'] = 'systemd-ordering' + """Add any appropriate tags to the bug.""" + if "JournalErrors" in report.keys(): + errors = report["JournalErrors"] + if "Breaking ordering cycle" in errors: + report["Tags"] = "systemd-ordering" def add_info(report, ui): - '''This is an entry point to run cloud-init's apport functionality. + """This is an entry point to run cloud-init's apport functionality. Distros which want apport support will have a cloud-init package-hook at /usr/share/apport/package-hooks/cloud-init.py which defines an add_info function and returns the result of cloudinit.apport.add_info(report, ui). - ''' + """ if not has_apport: raise RuntimeError( - 'No apport imports discovered. Apport functionality disabled') + "No apport imports discovered. Apport functionality disabled" + ) attach_cloud_init_logs(report, ui) attach_hwinfo(report, ui) attach_cloud_info(report, ui) @@ -130,4 +146,5 @@ def add_info(report, ui): add_bug_tags(report) return True + # vi: ts=4 expandtab diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index 485ff92f..ae117fad 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -10,8 +10,9 @@ _DEF_PERMS = 0o644 LOG = logging.getLogger(__name__) -def write_file(filename, content, mode=_DEF_PERMS, - omode="wb", preserve_mode=False): +def write_file( + filename, content, mode=_DEF_PERMS, omode="wb", preserve_mode=False +): # open filename in mode 'omode', write content, set permissions to 'mode' if preserve_mode: @@ -23,12 +24,18 @@ def write_file(filename, content, mode=_DEF_PERMS, tf = None try: - tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), - delete=False, mode=omode) + tf = tempfile.NamedTemporaryFile( + dir=os.path.dirname(filename), delete=False, mode=omode + ) LOG.debug( "Atomically writing to file %s (via temporary file %s) - %s: [%o]" " %d bytes/chars", - filename, tf.name, omode, mode, len(content)) + filename, + tf.name, + omode, + mode, + len(content), + ) tf.write(content) tf.close() os.chmod(tf.name, mode) @@ -42,7 +49,11 @@ def write_file(filename, content, mode=_DEF_PERMS, def write_json(filename, data, mode=_DEF_PERMS): # dump json representation of data to file filename. return write_file( - filename, json.dumps(data, indent=1, sort_keys=True) + "\n", - omode="w", mode=mode) + filename, + json.dumps(data, indent=1, sort_keys=True) + "\n", + omode="w", + mode=mode, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 7ae98e1c..91e48103 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -35,7 +35,8 @@ class Cloud(object): reporter = events.ReportEventStack( name="unnamed-cloud-reporter", description="unnamed-cloud-reporter", - reporting_enabled=False) + reporting_enabled=False, + ) self.reporter = reporter # If a 'user' manipulates logging or logging services @@ -56,8 +57,11 @@ class Cloud(object): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warning("No template found in %s for template named %s", - os.path.dirname(fn), name) + LOG.warning( + "No template found in %s for template named %s", + os.path.dirname(fn), + name, + ) return None return fn @@ -80,7 +84,8 @@ class Cloud(object): def get_hostname(self, fqdn=False, metadata_only=False): return self.datasource.get_hostname( - fqdn=fqdn, metadata_only=metadata_only) + fqdn=fqdn, metadata_only=metadata_only + ) def device_name_to_device(self, name): return self.datasource.device_name_to_device(name) @@ -94,4 +99,5 @@ class Cloud(object): def get_ipath(self, name=None): return self.paths.get_ipath(name) + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 3502dd56..0e1db118 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -10,9 +10,13 @@ import os import sys from cloudinit.stages import Init -from cloudinit.subp import (ProcessExecutionError, subp) +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.util import ( - del_dir, del_file, get_config_logfiles, is_link, error + del_dir, + del_file, + error, + get_config_logfiles, + is_link, ) @@ -27,18 +31,35 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='clean', - description=('Remove logs and artifacts so cloud-init re-runs on ' - 'a clean system')) + prog="clean", + description=( + "Remove logs and artifacts so cloud-init re-runs on " + "a clean system" + ), + ) parser.add_argument( - '-l', '--logs', action='store_true', default=False, dest='remove_logs', - help='Remove cloud-init logs.') + "-l", + "--logs", + action="store_true", + default=False, + dest="remove_logs", + help="Remove cloud-init logs.", + ) parser.add_argument( - '-r', '--reboot', action='store_true', default=False, - help='Reboot system after logs are cleaned so cloud-init re-runs.') + "-r", + "--reboot", + action="store_true", + default=False, + help="Reboot system after logs are cleaned so cloud-init re-runs.", + ) parser.add_argument( - '-s', '--seed', action='store_true', default=False, dest='remove_seed', - help='Remove cloud-init seed directory /var/lib/cloud/seed.') + "-s", + "--seed", + action="store_true", + default=False, + dest="remove_seed", + help="Remove cloud-init seed directory /var/lib/cloud/seed.", + ) return parser @@ -59,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - seed_path = os.path.join(init.paths.cloud_dir, 'seed') - for path in glob.glob('%s/*' % init.paths.cloud_dir): + seed_path = os.path.join(init.paths.cloud_dir, "seed") + for path in glob.glob("%s/*" % init.paths.cloud_dir): if path == seed_path and not remove_seed: continue try: @@ -69,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False): else: del_file(path) except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) + error("Could not remove {0}: {1}".format(path, str(e))) return 1 return 0 @@ -78,13 +99,15 @@ def handle_clean_args(name, args): """Handle calls to 'cloud-init clean' as a subcommand.""" exit_code = remove_artifacts(args.remove_logs, args.remove_seed) if exit_code == 0 and args.reboot: - cmd = ['shutdown', '-r', 'now'] + cmd = ["shutdown", "-r", "now"] try: subp(cmd, capture=False) except ProcessExecutionError as e: error( 'Could not reboot this system using "{0}": {1}'.format( - cmd, str(e))) + cmd, str(e) + ) + ) exit_code = 1 return exit_code @@ -92,10 +115,10 @@ def handle_clean_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - sys.exit(handle_clean_args('clean', parser.parse_args())) + sys.exit(handle_clean_args("clean", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 0cdc9675..b92b03a8 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,13 +6,16 @@ import argparse import json import sys -from cloudinit.util import error from cloudinit.sources import ( - INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) + INSTANCE_JSON_FILE, + METADATA_UNKNOWN, + canonical_cloud_id, +) +from cloudinit.util import error -DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE +DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE -NAME = 'cloud-id' +NAME = "cloud-id" def get_parser(parser=None): @@ -27,17 +30,30 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( prog=NAME, - description='Report the canonical cloud-id for this instance') + description="Report the canonical cloud-id for this instance", + ) parser.add_argument( - '-j', '--json', action='store_true', default=False, - help='Report all standardized cloud-id information as json.') + "-j", + "--json", + action="store_true", + default=False, + help="Report all standardized cloud-id information as json.", + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help='Report extended cloud-id information as tab-delimited string.') + "-l", + "--long", + action="store_true", + default=False, + help="Report extended cloud-id information as tab-delimited string.", + ) parser.add_argument( - '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON, - help=('Path to instance-data.json file. Default is %s' % - DEFAULT_INSTANCE_JSON)) + "-i", + "--instance-data", + type=str, + default=DEFAULT_INSTANCE_JSON, + help="Path to instance-data.json file. Default is %s" + % DEFAULT_INSTANCE_JSON, + ) return parser @@ -53,24 +69,28 @@ def handle_args(name, args): except IOError: return error( "File not found '%s'. Provide a path to instance data json file" - ' using --instance-data' % args.instance_data) + " using --instance-data" % args.instance_data + ) except ValueError as e: return error( - "File '%s' is not valid json. %s" % (args.instance_data, e)) - v1 = instance_data.get('v1', {}) + "File '%s' is not valid json. %s" % (args.instance_data, e) + ) + v1 = instance_data.get("v1", {}) cloud_id = canonical_cloud_id( - v1.get('cloud_name', METADATA_UNKNOWN), - v1.get('region', METADATA_UNKNOWN), - v1.get('platform', METADATA_UNKNOWN)) + v1.get("cloud_name", METADATA_UNKNOWN), + v1.get("region", METADATA_UNKNOWN), + v1.get("platform", METADATA_UNKNOWN), + ) if args.json: - v1['cloud_id'] = cloud_id - response = json.dumps( # Pretty, sorted json - v1, indent=1, sort_keys=True, separators=(',', ': ')) + v1["cloud_id"] = cloud_id + response = json.dumps( # Pretty, sorted json + v1, indent=1, sort_keys=True, separators=(",", ": ") + ) elif args.long: - response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN)) + response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN)) else: response = cloud_id - sys.stdout.write('%s\n' % response) + sys.stdout.write("%s\n" % response) return 0 @@ -80,7 +100,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index 3ae28b69..ead5f7a9 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -11,7 +11,7 @@ from cloudinit.stages import Init def addLogHandlerCLI(logger, log_level): """Add a commandline logging handler to emit messages to stderr.""" - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") log.setupBasicLogging(log_level, formatter=formatter) return logger @@ -22,4 +22,5 @@ def read_cfg_paths(): init.read_cfg() return init.paths + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index f6f36a00..a9be0379 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -6,20 +6,17 @@ import os import sys import time -from cloudinit import log -from cloudinit import reporting -from cloudinit import stages +from cloudinit import log, reporting, stages from cloudinit.event import EventScope, EventType from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events -from cloudinit.stages import Init from cloudinit.sources import DataSource # noqa: F401 from cloudinit.sources import DataSourceNotFoundException - +from cloudinit.stages import Init LOG = log.getLogger(__name__) -NAME = 'hotplug-hook' +NAME = "hotplug-hook" def get_parser(parser=None): @@ -35,33 +32,38 @@ def get_parser(parser=None): parser.description = __doc__ parser.add_argument( - "-s", "--subsystem", required=True, + "-s", + "--subsystem", + required=True, help="subsystem to act on", - choices=['net'] + choices=["net"], ) subparsers = parser.add_subparsers( - title='Hotplug Action', - dest='hotplug_action' + title="Hotplug Action", dest="hotplug_action" ) subparsers.required = True subparsers.add_parser( - 'query', - help='query if hotplug is enabled for given subsystem' + "query", help="query if hotplug is enabled for given subsystem" ) parser_handle = subparsers.add_parser( - 'handle', help='handle the hotplug event') + "handle", help="handle the hotplug event" + ) parser_handle.add_argument( - "-d", "--devpath", required=True, + "-d", + "--devpath", + required=True, metavar="PATH", - help="sysfs path to hotplugged device" + help="sysfs path to hotplugged device", ) parser_handle.add_argument( - "-u", "--udevaction", required=True, + "-u", + "--udevaction", + required=True, help="action to take", - choices=['add', 'remove'] + choices=["add", "remove"], ) return parser @@ -90,27 +92,29 @@ class UeventHandler(abc.ABC): def detect_hotplugged_device(self): detect_presence = None - if self.action == 'add': + if self.action == "add": detect_presence = True - elif self.action == 'remove': + elif self.action == "remove": detect_presence = False else: - raise ValueError('Unknown action: %s' % self.action) + raise ValueError("Unknown action: %s" % self.action) if detect_presence != self.device_detected(): raise RuntimeError( - 'Failed to detect %s in updated metadata' % self.id) + "Failed to detect %s in updated metadata" % self.id + ) def success(self): return self.success_fn() def update_metadata(self): - result = self.datasource.update_metadata_if_supported([ - EventType.HOTPLUG]) + result = self.datasource.update_metadata_if_supported( + [EventType.HOTPLUG] + ) if not result: raise RuntimeError( - 'Datasource %s not updated for ' - 'event %s' % (self.datasource, EventType.HOTPLUG) + "Datasource %s not updated for event %s" + % (self.datasource, EventType.HOTPLUG) ) return result @@ -118,7 +122,7 @@ class UeventHandler(abc.ABC): class NetHandler(UeventHandler): def __init__(self, datasource, devpath, action, success_fn): # convert devpath to mac address - id = read_sys_net_safe(os.path.basename(devpath), 'address') + id = read_sys_net_safe(os.path.basename(devpath), "address") super().__init__(id, datasource, devpath, action, success_fn) def apply(self): @@ -128,14 +132,16 @@ class NetHandler(UeventHandler): ) interface_name = os.path.basename(self.devpath) activator = activators.select_activator() - if self.action == 'add': + if self.action == "add": if not activator.bring_up_interface(interface_name): raise RuntimeError( - 'Failed to bring up device: {}'.format(self.devpath)) - elif self.action == 'remove': + "Failed to bring up device: {}".format(self.devpath) + ) + elif self.action == "remove": if not activator.bring_down_interface(interface_name): raise RuntimeError( - 'Failed to bring down device: {}'.format(self.devpath)) + "Failed to bring down device: {}".format(self.devpath) + ) @property def config(self): @@ -144,15 +150,16 @@ class NetHandler(UeventHandler): def device_detected(self) -> bool: netstate = parse_net_config_data(self.config) found = [ - iface for iface in netstate.iter_interfaces() - if iface.get('mac_address') == self.id + iface + for iface in netstate.iter_interfaces() + if iface.get("mac_address") == self.id ] - LOG.debug('Ifaces with ID=%s : %s', self.id, found) + LOG.debug("Ifaces with ID=%s : %s", self.id, found) return len(found) > 0 SUBSYSTEM_PROPERTES_MAP = { - 'net': (NetHandler, EventScope.NETWORK), + "net": (NetHandler, EventScope.NETWORK), } @@ -161,66 +168,65 @@ def is_enabled(hotplug_init, subsystem): scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] except KeyError as e: raise Exception( - 'hotplug-hook: cannot handle events for subsystem: {}'.format( - subsystem) + "hotplug-hook: cannot handle events for subsystem: {}".format( + subsystem + ) ) from e return stages.update_event_enabled( datasource=hotplug_init.datasource, cfg=hotplug_init.cfg, event_source_type=EventType.HOTPLUG, - scope=scope + scope=scope, ) def initialize_datasource(hotplug_init, subsystem): - LOG.debug('Fetching datasource') + LOG.debug("Fetching datasource") datasource = hotplug_init.fetch(existing="trust") if not datasource.get_supported_events([EventType.HOTPLUG]): - LOG.debug('hotplug not supported for event of type %s', subsystem) + LOG.debug("hotplug not supported for event of type %s", subsystem) return if not is_enabled(hotplug_init, subsystem): - LOG.debug('hotplug not enabled for event of type %s', subsystem) + LOG.debug("hotplug not enabled for event of type %s", subsystem) return return datasource -def handle_hotplug( - hotplug_init: Init, devpath, subsystem, udevaction -): +def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction): datasource = initialize_datasource(hotplug_init, subsystem) if not datasource: return handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] - LOG.debug('Creating %s event handler', subsystem) + LOG.debug("Creating %s event handler", subsystem) event_handler = handler_cls( datasource=datasource, devpath=devpath, action=udevaction, - success_fn=hotplug_init._write_to_cache + success_fn=hotplug_init._write_to_cache, ) # type: UeventHandler wait_times = [1, 3, 5, 10, 30] for attempt, wait in enumerate(wait_times): LOG.debug( - 'subsystem=%s update attempt %s/%s', + "subsystem=%s update attempt %s/%s", subsystem, attempt, - len(wait_times) + len(wait_times), ) try: - LOG.debug('Refreshing metadata') + LOG.debug("Refreshing metadata") event_handler.update_metadata() - LOG.debug('Detecting device in updated metadata') + LOG.debug("Detecting device in updated metadata") event_handler.detect_hotplugged_device() - LOG.debug('Applying config change') + LOG.debug("Applying config change") event_handler.apply() - LOG.debug('Updating cache') + LOG.debug("Updating cache") event_handler.success() break except Exception as e: - LOG.debug('Exception while processing hotplug event. %s', e) + LOG.debug("Exception while processing hotplug event. %s", e) time.sleep(wait) last_exception = e else: @@ -238,31 +244,33 @@ def handle_args(name, args): hotplug_init.read_cfg() log.setupLogging(hotplug_init.cfg) - if 'reporting' in hotplug_init.cfg: - reporting.update_configuration(hotplug_init.cfg.get('reporting')) + if "reporting" in hotplug_init.cfg: + reporting.update_configuration(hotplug_init.cfg.get("reporting")) # Logging isn't going to be setup until now LOG.debug( - '%s called with the following arguments: {' - 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}', + "%s called with the following arguments: {" + "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}", name, args.hotplug_action, args.subsystem, - args.udevaction if 'udevaction' in args else None, - args.devpath if 'devpath' in args else None, + args.udevaction if "udevaction" in args else None, + args.devpath if "devpath" in args else None, ) with hotplug_reporter: try: - if args.hotplug_action == 'query': + if args.hotplug_action == "query": try: datasource = initialize_datasource( - hotplug_init, args.subsystem) + hotplug_init, args.subsystem + ) except DataSourceNotFoundException: print( "Unable to determine hotplug state. No datasource " - "detected") + "detected" + ) sys.exit(1) - print('enabled' if datasource else 'disabled') + print("enabled" if datasource else "disabled") else: handle_hotplug( hotplug_init=hotplug_init, @@ -271,13 +279,13 @@ def handle_args(name, args): udevaction=args.udevaction, ) except Exception: - LOG.exception('Received fatal exception handling hotplug!') + LOG.exception("Received fatal exception handling hotplug!") raise - LOG.debug('Exiting hotplug handler') + LOG.debug("Exiting hotplug handler") reporting.flush_events() -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 31ade73d..d54b809a 100644 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -5,20 +5,19 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse -from datetime import datetime import os import shutil import sys +from datetime import datetime from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir -from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (chdir, copy, ensure_dir, write_file) +from cloudinit.util import chdir, copy, ensure_dir, write_file - -CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] -CLOUDINIT_RUN_DIR = '/run/cloud-init' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"] +CLOUDINIT_RUN_DIR = "/run/cloud-init" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def get_parser(parser=None): @@ -32,26 +31,44 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='collect-logs', - description='Collect and tar all cloud-init debug info') - parser.add_argument('--verbose', '-v', action='count', default=0, - dest='verbosity', help="Be more verbose.") + prog="collect-logs", + description="Collect and tar all cloud-init debug info", + ) + parser.add_argument( + "--verbose", + "-v", + action="count", + default=0, + dest="verbosity", + help="Be more verbose.", + ) parser.add_argument( - "--tarfile", '-t', default='cloud-init.tar.gz', - help=('The tarfile to create containing all collected logs.' - ' Default: cloud-init.tar.gz')) + "--tarfile", + "-t", + default="cloud-init.tar.gz", + help=( + "The tarfile to create containing all collected logs." + " Default: cloud-init.tar.gz" + ), + ) parser.add_argument( - "--include-userdata", '-u', default=False, action='store_true', - dest='userdata', help=( - 'Optionally include user-data from {0} which could contain' - ' sensitive information.'.format(USER_DATA_FILE))) + "--include-userdata", + "-u", + default=False, + action="store_true", + dest="userdata", + help=( + "Optionally include user-data from {0} which could contain" + " sensitive information.".format(USER_DATA_FILE) + ), + ) return parser def _copytree_rundir_ignore_files(curdir, files): """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ - 'hook-hotplug-cmd', # named pipe for hotplug + "hook-hotplug-cmd", # named pipe for hotplug ] if os.getuid() != 0: # Ignore root-permissioned files @@ -94,52 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0): if include_userdata and os.getuid() != 0: sys.stderr.write( "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n") + " Try sudo cloud-init collect-logs\n" + ) return 1 tarfile = os.path.abspath(tarfile) - date = datetime.utcnow().date().strftime('%Y-%m-%d') - log_dir = 'cloud-init-logs-{0}'.format(date) - with tempdir(dir='/tmp') as tmp_dir: + date = datetime.utcnow().date().strftime("%Y-%m-%d") + log_dir = "cloud-init-logs-{0}".format(date) + with tempdir(dir="/tmp") as tmp_dir: log_dir = os.path.join(tmp_dir, log_dir) version = _write_command_output_to_file( - ['cloud-init', '--version'], - os.path.join(log_dir, 'version'), - "cloud-init --version", verbosity) + ["cloud-init", "--version"], + os.path.join(log_dir, "version"), + "cloud-init --version", + verbosity, + ) dpkg_ver = _write_command_output_to_file( - ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], - os.path.join(log_dir, 'dpkg-version'), - "dpkg version", verbosity) + ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], + os.path.join(log_dir, "dpkg-version"), + "dpkg version", + verbosity, + ) if not version: version = dpkg_ver if dpkg_ver else "not-available" _debug("collected cloud-init version: %s\n" % version, 1, verbosity) _write_command_output_to_file( - ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), - "dmesg output", verbosity) + ["dmesg"], + os.path.join(log_dir, "dmesg.txt"), + "dmesg output", + verbosity, + ) _write_command_output_to_file( - ['journalctl', '--boot=0', '-o', 'short-precise'], - os.path.join(log_dir, 'journal.txt'), - "systemd journal of current boot", verbosity) + ["journalctl", "--boot=0", "-o", "short-precise"], + os.path.join(log_dir, "journal.txt"), + "systemd journal of current boot", + verbosity, + ) for log in CLOUDINIT_LOGS: _collect_file(log, log_dir, verbosity) if include_userdata: _collect_file(USER_DATA_FILE, log_dir, verbosity) - run_dir = os.path.join(log_dir, 'run') + run_dir = os.path.join(log_dir, "run") ensure_dir(run_dir) if os.path.exists(CLOUDINIT_RUN_DIR): try: - shutil.copytree(CLOUDINIT_RUN_DIR, - os.path.join(run_dir, 'cloud-init'), - ignore=_copytree_rundir_ignore_files) + shutil.copytree( + CLOUDINIT_RUN_DIR, + os.path.join(run_dir, "cloud-init"), + ignore=_copytree_rundir_ignore_files, + ) except shutil.Error as e: sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + '\n') + sys.stderr.write(str(e) + "\n") _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) else: - _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, - verbosity) + _debug( + "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, + 1, + verbosity, + ) with chdir(tmp_dir): - subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) + subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")]) sys.stderr.write("Wrote %s\n" % tarfile) return 0 @@ -152,10 +184,10 @@ def handle_collect_logs_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - return handle_collect_logs_args('collect-logs', parser.parse_args()) + return handle_collect_logs_args("collect-logs", parser.parse_args()) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 4e6a5778..a7493c74 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -9,19 +9,22 @@ from email.mime.text import MIMEText from cloudinit import log from cloudinit.handlers import INCLUSION_TYPES_MAP + from . import addLogHandlerCLI -NAME = 'make-mime' +NAME = "make-mime" LOG = log.getLogger(NAME) -EPILOG = ("Example: make-mime -a config.yaml:cloud-config " - "-a script.sh:x-shellscript > user-data") +EPILOG = ( + "Example: make-mime -a config.yaml:cloud-config " + "-a script.sh:x-shellscript > user-data" +) def file_content_type(text): - """ Return file content type by reading the first line of the input. """ + """Return file content type by reading the first line of the input.""" try: filename, content_type = text.split(":", 1) - return (open(filename, 'r'), filename, content_type.strip()) + return (open(filename, "r"), filename, content_type.strip()) except ValueError as e: raise argparse.ArgumentError( text, "Invalid value for %r" % (text) @@ -41,26 +44,43 @@ def get_parser(parser=None): # update the parser's doc and add an epilog to show an example parser.description = __doc__ parser.epilog = EPILOG - parser.add_argument("-a", "--attach", dest="files", type=file_content_type, - action='append', default=[], - metavar=":", - help=("attach the given file as the specified " - "content-type")) - parser.add_argument('-l', '--list-types', action='store_true', - default=False, - help='List support cloud-init content types.') - parser.add_argument('-f', '--force', action='store_true', - default=False, - help='Ignore unknown content-type warnings') + parser.add_argument( + "-a", + "--attach", + dest="files", + type=file_content_type, + action="append", + default=[], + metavar=":", + help="attach the given file as the specified content-type", + ) + parser.add_argument( + "-l", + "--list-types", + action="store_true", + default=False, + help="List support cloud-init content types.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="Ignore unknown content-type warnings", + ) return parser def get_content_types(strip_prefix=False): - """ Return a list of cloud-init supported content types. Optionally - strip out the leading 'text/' of the type if strip_prefix=True. + """Return a list of cloud-init supported content types. Optionally + strip out the leading 'text/' of the type if strip_prefix=True. """ - return sorted([ctype.replace("text/", "") if strip_prefix else ctype - for ctype in INCLUSION_TYPES_MAP.values()]) + return sorted( + [ + ctype.replace("text/", "") if strip_prefix else ctype + for ctype in INCLUSION_TYPES_MAP.values() + ] + ) def handle_args(name, args): @@ -82,14 +102,16 @@ def handle_args(name, args): for i, (fh, filename, format_type) in enumerate(args.files): contents = fh.read() sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) - sub_message.add_header('Content-Disposition', - 'attachment; filename="%s"' % (filename)) + sub_message.add_header( + "Content-Disposition", 'attachment; filename="%s"' % (filename) + ) content_type = sub_message.get_content_type().lower() if content_type not in get_content_types(): level = "WARNING" if args.force else "ERROR" - msg = (level + ": content type %r for attachment %s " - "may be incorrect!") % (content_type, i + 1) - sys.stderr.write(msg + '\n') + msg = ( + level + ": content type %r for attachment %s may be incorrect!" + ) % (content_type, i + 1) + sys.stderr.write(msg + "\n") errors.append(msg) sub_messages.append(sub_message) if len(errors) and not args.force: @@ -104,10 +126,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index f4a98e5e..18b1e7ff 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -6,15 +6,13 @@ import json import os import sys -from cloudinit.sources.helpers import openstack +from cloudinit import distros, log, safeyaml +from cloudinit.net import eni, netplan, network_state, networkd, sysconfig from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf +from cloudinit.sources.helpers import openstack -from cloudinit import distros, safeyaml -from cloudinit.net import eni, netplan, networkd, network_state, sysconfig -from cloudinit import log - -NAME = 'net-convert' +NAME = "net-convert" def get_parser(parser=None): @@ -27,33 +25,59 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) - parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True, - help="The network configuration to read") - parser.add_argument("-k", "--kind", - choices=['eni', 'network_data.json', 'yaml', - 'azure-imds', 'vmware-imc'], - required=True, - help="The format of the given network config") - parser.add_argument("-d", "--directory", - metavar="PATH", - help="directory to place output in", - required=True) - parser.add_argument("-D", "--distro", - choices=[item for sublist in - distros.OSFAMILIES.values() - for item in sublist], - required=True) - parser.add_argument("-m", "--mac", - metavar="name,mac", - action='append', - help="interface name to mac mapping") - parser.add_argument("--debug", action='store_true', - help='enable debug logging to stderr.') - parser.add_argument("-O", "--output-kind", - choices=['eni', 'netplan', 'networkd', 'sysconfig'], - required=True, - help="The network config format to emit") + parser.add_argument( + "-p", + "--network-data", + type=open, + metavar="PATH", + required=True, + help="The network configuration to read", + ) + parser.add_argument( + "-k", + "--kind", + choices=[ + "eni", + "network_data.json", + "yaml", + "azure-imds", + "vmware-imc", + ], + required=True, + help="The format of the given network config", + ) + parser.add_argument( + "-d", + "--directory", + metavar="PATH", + help="directory to place output in", + required=True, + ) + parser.add_argument( + "-D", + "--distro", + choices=[ + item for sublist in distros.OSFAMILIES.values() for item in sublist + ], + required=True, + ) + parser.add_argument( + "-m", + "--mac", + metavar="name,mac", + action="append", + help="interface name to mac mapping", + ) + parser.add_argument( + "--debug", action="store_true", help="enable debug logging to stderr." + ) + parser.add_argument( + "-O", + "--output-kind", + choices=["eni", "netplan", "networkd", "sysconfig"], + required=True, + help="The network config format to emit", + ) return parser @@ -81,59 +105,68 @@ def handle_args(name, args): pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": pre_ns = safeyaml.load(net_data) - if 'network' in pre_ns: - pre_ns = pre_ns.get('network') + if "network" in pre_ns: + pre_ns = pre_ns.get("network") if args.debug: - sys.stderr.write('\n'.join( - ["Input YAML", safeyaml.dumps(pre_ns), ""])) - elif args.kind == 'network_data.json': + sys.stderr.write( + "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""]) + ) + elif args.kind == "network_data.json": pre_ns = openstack.convert_net_json( - json.loads(net_data), known_macs=known_macs) - elif args.kind == 'azure-imds': + json.loads(net_data), known_macs=known_macs + ) + elif args.kind == "azure-imds": pre_ns = azure.parse_network_config(json.loads(net_data)) - elif args.kind == 'vmware-imc': + elif args.kind == "vmware-imc": config = ovf.Config(ovf.ConfigFile(args.network_data.name)) pre_ns = ovf.get_network_config_from_conf(config, False) ns = network_state.parse_net_config_data(pre_ns) if args.debug: - sys.stderr.write('\n'.join( - ["", "Internal State", safeyaml.dumps(ns), ""])) + sys.stderr.write( + "\n".join(["", "Internal State", safeyaml.dumps(ns), ""]) + ) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} if args.output_kind == "eni": r_cls = eni.Renderer - config = distro.renderer_configs.get('eni') + config = distro.renderer_configs.get("eni") elif args.output_kind == "netplan": r_cls = netplan.Renderer - config = distro.renderer_configs.get('netplan') + config = distro.renderer_configs.get("netplan") # don't run netplan generate/apply - config['postcmds'] = False + config["postcmds"] = False # trim leading slash - config['netplan_path'] = config['netplan_path'][1:] + config["netplan_path"] = config["netplan_path"][1:] # enable some netplan features - config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] + config["features"] = ["dhcp-use-domains", "ipv6-mtu"] elif args.output_kind == "networkd": r_cls = networkd.Renderer - config = distro.renderer_configs.get('networkd') + config = distro.renderer_configs.get("networkd") elif args.output_kind == "sysconfig": r_cls = sysconfig.Renderer - config = distro.renderer_configs.get('sysconfig') + config = distro.renderer_configs.get("sysconfig") else: raise RuntimeError("Invalid output_kind") r = r_cls(config=config) - sys.stderr.write(''.join([ - "Read input format '%s' from '%s'.\n" % ( - args.kind, args.network_data.name), - "Wrote output format '%s' to '%s'\n" % ( - args.output_kind, args.directory)]) + "\n") + sys.stderr.write( + "".join( + [ + "Read input format '%s' from '%s'.\n" + % (args.kind, args.network_data.name), + "Wrote output format '%s' to '%s'\n" + % (args.output_kind, args.directory), + ] + ) + + "\n" + ) r.render_network_state(network_state=ns, target=args.directory) -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index be304630..76b16c2e 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -5,33 +5,47 @@ """Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" import argparse + from cloudinit.config import schema -from . import hotplug_hook -from . import net_convert -from . import render -from . import make_mime +from . import hotplug_hook, make_mime, net_convert, render def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-devel', - description='Run development cloud-init tools') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-devel", + description="Run development cloud-init tools", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True subcmds = [ - (hotplug_hook.NAME, hotplug_hook.__doc__, - hotplug_hook.get_parser, hotplug_hook.handle_args), - ('schema', 'Validate cloud-config files for document schema', - schema.get_parser, schema.handle_schema_args), - (net_convert.NAME, net_convert.__doc__, - net_convert.get_parser, net_convert.handle_args), - (render.NAME, render.__doc__, - render.get_parser, render.handle_args), - (make_mime.NAME, make_mime.__doc__, - make_mime.get_parser, make_mime.handle_args), + ( + hotplug_hook.NAME, + hotplug_hook.__doc__, + hotplug_hook.get_parser, + hotplug_hook.handle_args, + ), + ( + "schema", + "Validate cloud-config files for document schema", + schema.get_parser, + schema.handle_schema_args, + ), + ( + net_convert.NAME, + net_convert.__doc__, + net_convert.get_parser, + net_convert.handle_args, + ), + (render.NAME, render.__doc__, render.get_parser, render.handle_args), + ( + make_mime.NAME, + make_mime.__doc__, + make_mime.get_parser, + make_mime.handle_args, + ), ] for (subcmd, helpmsg, get_parser, handler) in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 1090aa16..2f9a22a8 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -6,12 +6,13 @@ import argparse import os import sys -from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit import log +from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE + from . import addLogHandlerCLI, read_cfg_paths -NAME = 'render' +NAME = "render" LOG = log.getLogger(NAME) @@ -27,13 +28,24 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - 'user_data', type=str, help='Path to the user-data file to render') + "user_data", type=str, help="Path to the user-data file to render" + ) + parser.add_argument( + "-i", + "--instance-data", + type=str, + help=( + "Optional path to instance-data.json file. Defaults to" + " /run/cloud-init/instance-data.json" + ), + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Optional path to instance-data.json file. Defaults to' - ' /run/cloud-init/instance-data.json')) - parser.add_argument('-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) return parser @@ -54,34 +66,38 @@ def handle_args(name, args): redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: instance_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if not os.path.exists(instance_data_fn): LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - instance_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + instance_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn if not os.path.exists(instance_data_fn): - LOG.error('Missing instance-data.json file: %s', instance_data_fn) + LOG.error("Missing instance-data.json file: %s", instance_data_fn) return 1 try: with open(args.user_data) as stream: user_data = stream.read() except IOError: - LOG.error('Missing user-data file: %s', args.user_data) + LOG.error("Missing user-data file: %s", args.user_data) return 1 try: rendered_payload = render_jinja_payload_from_file( - payload=user_data, payload_fn=args.user_data, + payload=user_data, + payload_fn=args.user_data, instance_data_file=instance_data_fn, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) except RuntimeError as e: - LOG.error('Cannot render from instance data: %s', str(e)) + LOG.error("Cannot render from instance data: %s", str(e)) return 1 if not rendered_payload: - LOG.error('Unable to render user-data file: %s', args.user_data) + LOG.error("Unable to render user-data file: %s", args.user_data) return 1 sys.stdout.write(rendered_payload) return 0 @@ -89,10 +105,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 63186d34..e67edbc3 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -19,6 +19,7 @@ import time import traceback from cloudinit import patcher + patcher.patch_logging() from cloudinit import log as logging @@ -34,8 +35,7 @@ from cloudinit import warnings from cloudinit import reporting from cloudinit.reporting import events -from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, - CLOUD_CONFIG) +from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG from cloudinit import atomic_helper @@ -44,8 +44,10 @@ from cloudinit import dhclient_hook # Welcome message template -WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at " - "{timestamp}. Up {uptime} seconds.") +WELCOME_MSG_TPL = ( + "Cloud-init v. {version} running '{action}' at " + "{timestamp}. Up {uptime} seconds." +) # Module section template MOD_SECTION_TPL = "cloud_%s_modules" @@ -53,9 +55,9 @@ MOD_SECTION_TPL = "cloud_%s_modules" # Frequency shortname to full name # (so users don't have to remember the full name...) FREQ_SHORT_NAMES = { - 'instance': PER_INSTANCE, - 'always': PER_ALWAYS, - 'once': PER_ONCE, + "instance": PER_INSTANCE, + "always": PER_ALWAYS, + "once": PER_ONCE, } LOG = logging.getLogger() @@ -63,21 +65,20 @@ LOG = logging.getLogger() # Used for when a logger may not be active # and we still want to print exceptions... -def print_exc(msg=''): +def print_exc(msg=""): if msg: sys.stderr.write("%s\n" % (msg)) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") traceback.print_exc(file=sys.stderr) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") def welcome(action, msg=None): if not msg: msg = welcome_format(action) - util.multi_log("%s\n" % (msg), - console=False, stderr=True, log=LOG) + util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG) return msg @@ -86,7 +87,8 @@ def welcome_format(action): version=version.version_string(), uptime=util.uptime(), timestamp=util.time_rfc2822(), - action=action) + action=action, + ) def extract_fns(args): @@ -107,29 +109,31 @@ def run_module_section(mods, action_name, section): (which_ran, failures) = mods.run_section(full_section_name) total_attempted = len(which_ran) + len(failures) if total_attempted == 0: - msg = ("No '%s' modules to run" - " under section '%s'") % (action_name, full_section_name) + msg = "No '%s' modules to run under section '%s'" % ( + action_name, + full_section_name, + ) sys.stderr.write("%s\n" % (msg)) LOG.debug(msg) return [] else: - LOG.debug("Ran %s modules with %s failures", - len(which_ran), len(failures)) + LOG.debug( + "Ran %s modules with %s failures", len(which_ran), len(failures) + ) return failures def apply_reporting_cfg(cfg): - if cfg.get('reporting'): - reporting.update_configuration(cfg.get('reporting')) + if cfg.get("reporting"): + reporting.update_configuration(cfg.get("reporting")) -def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): +def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")): data = util.keyval_str_to_dict(cmdline) for key in names: if key in data: return key, data[key] - raise KeyError("No keys (%s) found in string '%s'" % - (cmdline, names)) + raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names)) def attempt_cmdline_url(path, network=True, cmdline=None): @@ -163,51 +167,60 @@ def attempt_cmdline_url(path, network=True, cmdline=None): if path_is_local and os.path.exists(path): if network: - m = ("file '%s' existed, possibly from local stage download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from local stage download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.INFO if path_is_local: level = logging.DEBUG else: - m = ("file '%s' existed, possibly from previous boot download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from previous boot download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.WARN return (level, m) - kwargs = {'url': url, 'timeout': 10, 'retries': 2} + kwargs = {"url": url, "timeout": 10, "retries": 2} if network or path_is_local: level = logging.WARN - kwargs['sec_between'] = 1 + kwargs["sec_between"] = 1 else: level = logging.DEBUG - kwargs['sec_between'] = .1 + kwargs["sec_between"] = 0.1 data = None - header = b'#cloud-config' + header = b"#cloud-config" try: resp = url_helper.read_file_or_url(**kwargs) if resp.ok(): data = resp.contents if not resp.contents.startswith(header): - if cmdline_name == 'cloud-config-url': + if cmdline_name == "cloud-config-url": level = logging.WARN else: level = logging.INFO return ( level, - "contents of '%s' did not start with %s" % (url, header)) + "contents of '%s' did not start with %s" % (url, header), + ) else: - return (level, - "url '%s' returned code %s. Ignoring." % (url, resp.code)) + return ( + level, + "url '%s' returned code %s. Ignoring." % (url, resp.code), + ) except url_helper.UrlError as e: return (level, "retrieving url '%s' failed: %s" % (url, e)) util.write_file(path, data, mode=0o600) - return (logging.INFO, - "wrote cloud-config data from %s='%s' to %s" % - (cmdline_name, url, path)) + return ( + logging.INFO, + "wrote cloud-config data from %s='%s' to %s" + % (cmdline_name, url, path), + ) def purge_cache_on_python_version_change(init): @@ -216,31 +229,32 @@ def purge_cache_on_python_version_change(init): There could be changes not represented in our cache (obj.pkl) after we upgrade to a new version of python, so at that point clear the cache """ - current_python_version = '%d.%d' % ( - sys.version_info.major, sys.version_info.minor + current_python_version = "%d.%d" % ( + sys.version_info.major, + sys.version_info.minor, ) python_version_path = os.path.join( - init.paths.get_cpath('data'), 'python-version' + init.paths.get_cpath("data"), "python-version" ) if os.path.exists(python_version_path): cached_python_version = open(python_version_path).read() # The Python version has changed out from under us, anything that was # pickled previously is likely useless due to API changes. if cached_python_version != current_python_version: - LOG.debug('Python version change detected. Purging cache') + LOG.debug("Python version change detected. Purging cache") init.purge_cache(True) util.write_file(python_version_path, current_python_version) else: - if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): + if os.path.exists(init.paths.get_ipath_cur("obj_pkl")): LOG.info( - 'Writing python-version file. ' - 'Cache compatibility status is currently unknown.' + "Writing python-version file. " + "Cache compatibility status is currently unknown." ) util.write_file(python_version_path, current_python_version) def _should_bring_up_interfaces(init, args): - if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'): + if util.get_cfg_option_bool(init.cfg, "disable_network_activation"): return False return not args.local @@ -250,10 +264,14 @@ def main_init(name, args): if args.local: deps = [sources.DEP_FILESYSTEM] - early_logs = [attempt_cmdline_url( - path=os.path.join("%s.d" % CLOUD_CONFIG, - "91_kernel_cmdline_url.cfg"), - network=not args.local)] + early_logs = [ + attempt_cmdline_url( + path=os.path.join( + "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg" + ), + network=not args.local, + ) + ] # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors @@ -289,8 +307,9 @@ def main_init(name, args): early_logs.append((logging.WARN, msg)) if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(init.cfg) apply_reporting_cfg(init.cfg) @@ -317,9 +336,11 @@ def main_init(name, args): if mode == sources.DSMODE_NETWORK: existing = "trust" sys.stderr.write("%s\n" % (netinfo.debug_info())) - LOG.debug(("Checking to see if files that we need already" - " exist from a previous run that would allow us" - " to stop early.")) + LOG.debug( + "Checking to see if files that we need already" + " exist from a previous run that would allow us" + " to stop early." + ) # no-net is written by upstart cloud-init-nonet when network failed # to come up stop_files = [ @@ -331,15 +352,18 @@ def main_init(name, args): existing_files.append(fn) if existing_files: - LOG.debug("[%s] Exiting. stop file %s existed", - mode, existing_files) + LOG.debug( + "[%s] Exiting. stop file %s existed", mode, existing_files + ) return (None, []) else: - LOG.debug("Execution continuing, no previous run detected that" - " would allow us to stop early.") + LOG.debug( + "Execution continuing, no previous run detected that" + " would allow us to stop early." + ) else: existing = "check" - mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) + mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False) if mcfg: LOG.debug("manual cache clean set from config") existing = "trust" @@ -360,8 +384,11 @@ def main_init(name, args): # if in network mode, and the datasource is local # then work was done at that stage. if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s in local mode", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s in local mode", + mode, + init.datasource, + ) return (None, []) except sources.DataSourceNotFoundException: # In the case of 'cloud-init init' without '--local' it is a bit @@ -371,8 +398,9 @@ def main_init(name, args): if mode == sources.DSMODE_LOCAL: LOG.debug("No local datasource found") else: - util.logexc(LOG, ("No instance datasource found!" - " Likely bad things to come!")) + util.logexc( + LOG, "No instance datasource found! Likely bad things to come!" + ) if not args.force: init.apply_network_config(bring_up=bring_up_interfaces) LOG.debug("[%s] Exiting without datasource", mode) @@ -381,46 +409,60 @@ def main_init(name, args): else: return (None, ["No instance datasource found."]) else: - LOG.debug("[%s] barreling on in force mode without datasource", - mode) + LOG.debug( + "[%s] barreling on in force mode without datasource", mode + ) _maybe_persist_instance_data(init) # Stage 6 iid = init.instancify() - LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", - mode, name, iid, init.is_new_instance()) + LOG.debug( + "[%s] %s will now be targeting instance id: %s. new=%s", + mode, + name, + iid, + init.is_new_instance(), + ) if mode == sources.DSMODE_LOCAL: # Before network comes up, set any configured hostname to allow # dhcp clients to advertize this hostname to any DDNS services # LP: #1746455. - _maybe_set_hostname(init, stage='local', retry_stage='network') + _maybe_set_hostname(init, stage="local", retry_stage="network") init.apply_network_config(bring_up=bring_up_interfaces) if mode == sources.DSMODE_LOCAL: if init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s not in local mode.", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s not in local mode.", + mode, + init.datasource, + ) return (init.datasource, []) else: - LOG.debug("[%s] %s is in local mode, will apply init modules now.", - mode, init.datasource) + LOG.debug( + "[%s] %s is in local mode, will apply init modules now.", + mode, + init.datasource, + ) # Give the datasource a chance to use network resources. # This is used on Azure to communicate with the fabric over network. init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() - _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') + _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config") # Stage 7 try: # Attempt to consume the data per instance. # This may run user-data handlers and/or perform # url downloads and such as needed. - (ran, _results) = init.cloudify().run('consume_data', - init.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + (ran, _results) = init.cloudify().run( + "consume_data", + init.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) if not ran: # Just consume anything that is set to run per-always # if nothing ran in the per-instance code @@ -442,8 +484,7 @@ def main_init(name, args): errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warning("Stdout, stderr changing to (%s, %s)", - outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -459,11 +500,11 @@ def main_init(name, args): def di_report_warn(datasource, cfg): - if 'di_report' not in cfg: + if "di_report" not in cfg: LOG.debug("no di_report found in config.") return - dicfg = cfg['di_report'] + dicfg = cfg["di_report"] if dicfg is None: # ds-identify may write 'di_report:\n #comment\n' # which reads as {'di_report': None} @@ -474,7 +515,7 @@ def di_report_warn(datasource, cfg): LOG.warning("di_report config not a dictionary: %s", dicfg) return - dslist = dicfg.get('datasource_list') + dslist = dicfg.get("datasource_list") if dslist is None: LOG.warning("no 'datasource_list' found in di_report.") return @@ -486,18 +527,26 @@ def di_report_warn(datasource, cfg): # where Name is the thing that shows up in datasource_list. modname = datasource.__module__.rpartition(".")[2] if modname.startswith(sources.DS_PREFIX): - modname = modname[len(sources.DS_PREFIX):] + modname = modname[len(sources.DS_PREFIX) :] else: - LOG.warning("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning( + "Datasource '%s' came from unexpected module '%s'.", + datasource, + modname, + ) if modname in dslist: - LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", - datasource, modname, dslist) + LOG.debug( + "used datasource '%s' from '%s' was in di_report's list: %s", + datasource, + modname, + dslist, + ) return - warnings.show_warning('dsid_missing_source', cfg, - source=modname, dslist=str(dslist)) + warnings.show_warning( + "dsid_missing_source", cfg, source=modname, dslist=str(dslist) + ) def main_modules(action_name, args): @@ -521,8 +570,10 @@ def main_modules(action_name, args): init.fetch(existing="trust") except sources.DataSourceNotFoundException: # There was no datasource found, theres nothing to do - msg = ('Can not apply stage %s, no datasource found! Likely bad ' - 'things to come!' % name) + msg = ( + "Can not apply stage %s, no datasource found! Likely bad " + "things to come!" % name + ) util.logexc(LOG, msg) print_exc(msg) if not args.force: @@ -539,8 +590,9 @@ def main_modules(action_name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -573,10 +625,12 @@ def main_single(name, args): # There was no datasource found, # that might be bad (or ok) depending on # the module being ran (so continue on) - util.logexc(LOG, ("Failed to fetch your datasource," - " likely bad things to come!")) - print_exc(("Failed to fetch your datasource," - " likely bad things to come!")) + util.logexc( + LOG, "Failed to fetch your datasource, likely bad things to come!" + ) + print_exc( + "Failed to fetch your datasource, likely bad things to come!" + ) if not args.force: return 1 _maybe_persist_instance_data(init) @@ -598,8 +652,9 @@ def main_single(name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -608,9 +663,7 @@ def main_single(name, args): welcome(name, msg=w_msg) # Stage 5 - (which_ran, failures) = mods.run_single(mod_name, - mod_args, - mod_freq) + (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq) if failures: LOG.warning("Ran %s but it failed!", mod_name) return 1 @@ -633,7 +686,12 @@ def status_wrapper(name, args, data_d=None, link_d=None): result_path = os.path.join(data_d, "result.json") result_link = os.path.join(link_d, "result.json") - util.ensure_dirs((data_d, link_d,)) + util.ensure_dirs( + ( + data_d, + link_d, + ) + ) (_name, functor) = args.action @@ -647,14 +705,20 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-init', 'modules-config', - 'modules-final') + modes = ( + "init", + "init-local", + "modules-init", + "modules-config", + "modules-final", + ) if mode not in modes: raise ValueError( - "Invalid cloud init mode specified '{0}'".format(mode)) + "Invalid cloud init mode specified '{0}'".format(mode) + ) status = None - if mode == 'init-local': + if mode == "init-local": for f in (status_link, result_link, status_path, result_path): util.del_file(f) else: @@ -664,45 +728,46 @@ def status_wrapper(name, args, data_d=None, link_d=None): pass nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, + "errors": [], + "start": None, + "finished": None, } if status is None: - status = {'v1': {}} - status['v1']['datasource'] = None + status = {"v1": {}} + status["v1"]["datasource"] = None for m in modes: - if m not in status['v1']: - status['v1'][m] = nullstatus.copy() + if m not in status["v1"]: + status["v1"][m] = nullstatus.copy() - v1 = status['v1'] - v1['stage'] = mode - v1[mode]['start'] = time.time() + v1 = status["v1"] + v1["stage"] = mode + v1[mode]["start"] = time.time() atomic_helper.write_json(status_path, status) - util.sym_link(os.path.relpath(status_path, link_d), status_link, - force=True) + util.sym_link( + os.path.relpath(status_path, link_d), status_link, force=True + ) try: ret = functor(name, args) - if mode in ('init', 'init-local'): + if mode in ("init", "init-local"): (datasource, errors) = ret if datasource is not None: - v1['datasource'] = str(datasource) + v1["datasource"] = str(datasource) else: errors = ret - v1[mode]['errors'] = [str(e) for e in errors] + v1[mode]["errors"] = [str(e) for e in errors] except Exception as e: util.logexc(LOG, "failed stage %s", mode) print_exc("failed run of stage %s" % mode) - v1[mode]['errors'] = [str(e)] + v1[mode]["errors"] = [str(e)] - v1[mode]['finished'] = time.time() - v1['stage'] = None + v1[mode]["finished"] = time.time() + v1["stage"] = None atomic_helper.write_json(status_path, status) @@ -710,23 +775,26 @@ def status_wrapper(name, args, data_d=None, link_d=None): # write the 'finished' file errors = [] for m in modes: - if v1[m]['errors']: - errors.extend(v1[m].get('errors', [])) + if v1[m]["errors"]: + errors.extend(v1[m].get("errors", [])) atomic_helper.write_json( - result_path, {'v1': {'datasource': v1['datasource'], - 'errors': errors}}) - util.sym_link(os.path.relpath(result_path, link_d), result_link, - force=True) + result_path, + {"v1": {"datasource": v1["datasource"], "errors": errors}}, + ) + util.sym_link( + os.path.relpath(result_path, link_d), result_link, force=True + ) - return len(v1[mode]['errors']) + return len(v1[mode]["errors"]) def _maybe_persist_instance_data(init): """Write instance-data.json file if absent and datasource is restored.""" if init.ds_restored: instance_data_file = os.path.join( - init.paths.run_dir, sources.INSTANCE_JSON_FILE) + init.paths.run_dir, sources.INSTANCE_JSON_FILE + ) if not os.path.exists(instance_data_file): init.datasource.persist_instance_data() @@ -739,18 +807,23 @@ def _maybe_set_hostname(init, stage, retry_stage): """ cloud = init.cloudify() (hostname, _fqdn) = util.get_hostname_fqdn( - init.cfg, cloud, metadata_only=True) + init.cfg, cloud, metadata_only=True + ) if hostname: # meta-data or user-data hostname content try: - cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) + cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None) except cc_set_hostname.SetHostnameError as e: LOG.debug( - 'Failed setting hostname in %s stage. Will' - ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + "Failed setting hostname in %s stage. Will" + " retry in %s stage. Error: %s.", + stage, + retry_stage, + str(e), + ) def main_features(name, args): - sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') + sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n") def main(sysv_args=None): @@ -760,129 +833,182 @@ def main(sysv_args=None): sysv_args = sysv_args[1:] # Top level args - parser.add_argument('--version', '-v', action='version', - version='%(prog)s ' + (version.version_string())) - parser.add_argument('--file', '-f', action='append', - dest='files', - help=('additional yaml configuration' - ' files to use'), - type=argparse.FileType('rb')) - parser.add_argument('--debug', '-d', action='store_true', - help=('show additional pre-action' - ' logging (default: %(default)s)'), - default=False) - parser.add_argument('--force', action='store_true', - help=('force running even if no datasource is' - ' found (use at your own risk)'), - dest='force', - default=False) + parser.add_argument( + "--version", + "-v", + action="version", + version="%(prog)s " + (version.version_string()), + ) + parser.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="additional yaml configuration files to use", + type=argparse.FileType("rb"), + ) + parser.add_argument( + "--debug", + "-d", + action="store_true", + help="show additional pre-action logging (default: %(default)s)", + default=False, + ) + parser.add_argument( + "--force", + action="store_true", + help=( + "force running even if no datasource is" + " found (use at your own risk)" + ), + dest="force", + default=False, + ) parser.set_defaults(reporter=None) - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True # Each action and its sub-options (if any) - parser_init = subparsers.add_parser('init', - help=('initializes cloud-init and' - ' performs initial modules')) - parser_init.add_argument("--local", '-l', action='store_true', - help="start in local mode (default: %(default)s)", - default=False) + parser_init = subparsers.add_parser( + "init", help="initializes cloud-init and performs initial modules" + ) + parser_init.add_argument( + "--local", + "-l", + action="store_true", + help="start in local mode (default: %(default)s)", + default=False, + ) # This is used so that we can know which action is selected + # the functor to use to run this subcommand - parser_init.set_defaults(action=('init', main_init)) + parser_init.set_defaults(action=("init", main_init)) # These settings are used for the 'config' and 'final' stages - parser_mod = subparsers.add_parser('modules', - help=('activates modules using ' - 'a given configuration key')) - parser_mod.add_argument("--mode", '-m', action='store', - help=("module configuration name " - "to use (default: %(default)s)"), - default='config', - choices=('init', 'config', 'final')) - parser_mod.set_defaults(action=('modules', main_modules)) + parser_mod = subparsers.add_parser( + "modules", help="activates modules using a given configuration key" + ) + parser_mod.add_argument( + "--mode", + "-m", + action="store", + help="module configuration name to use (default: %(default)s)", + default="config", + choices=("init", "config", "final"), + ) + parser_mod.set_defaults(action=("modules", main_modules)) # This subcommand allows you to run a single module - parser_single = subparsers.add_parser('single', - help=('run a single module ')) - parser_single.add_argument("--name", '-n', action="store", - help="module name to run", - required=True) - parser_single.add_argument("--frequency", action="store", - help=("frequency of the module"), - required=False, - choices=list(FREQ_SHORT_NAMES.keys())) - parser_single.add_argument("--report", action="store_true", - help="enable reporting", - required=False) - parser_single.add_argument("module_args", nargs="*", - metavar='argument', - help=('any additional arguments to' - ' pass to this module')) - parser_single.set_defaults(action=('single', main_single)) + parser_single = subparsers.add_parser( + "single", help="run a single module " + ) + parser_single.add_argument( + "--name", + "-n", + action="store", + help="module name to run", + required=True, + ) + parser_single.add_argument( + "--frequency", + action="store", + help="frequency of the module", + required=False, + choices=list(FREQ_SHORT_NAMES.keys()), + ) + parser_single.add_argument( + "--report", + action="store_true", + help="enable reporting", + required=False, + ) + parser_single.add_argument( + "module_args", + nargs="*", + metavar="argument", + help="any additional arguments to pass to this module", + ) + parser_single.set_defaults(action=("single", main_single)) parser_query = subparsers.add_parser( - 'query', - help='Query standardized instance metadata from the command line.') + "query", + help="Query standardized instance metadata from the command line.", + ) parser_dhclient = subparsers.add_parser( - dhclient_hook.NAME, help=dhclient_hook.__doc__) + dhclient_hook.NAME, help=dhclient_hook.__doc__ + ) dhclient_hook.get_parser(parser_dhclient) - parser_features = subparsers.add_parser('features', - help=('list defined features')) - parser_features.set_defaults(action=('features', main_features)) + parser_features = subparsers.add_parser( + "features", help="list defined features" + ) + parser_features.set_defaults(action=("features", main_features)) parser_analyze = subparsers.add_parser( - 'analyze', help='Devel tool: Analyze cloud-init logs and data') + "analyze", help="Devel tool: Analyze cloud-init logs and data" + ) - parser_devel = subparsers.add_parser( - 'devel', help='Run development tools') + parser_devel = subparsers.add_parser("devel", help="Run development tools") parser_collect_logs = subparsers.add_parser( - 'collect-logs', help='Collect and tar all cloud-init debug info') + "collect-logs", help="Collect and tar all cloud-init debug info" + ) parser_clean = subparsers.add_parser( - 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + "clean", help="Remove logs and artifacts so cloud-init can re-run." + ) parser_status = subparsers.add_parser( - 'status', help='Report cloud-init status or wait on completion.') + "status", help="Report cloud-init status or wait on completion." + ) if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost - if sysv_args[0] == 'analyze': + if sysv_args[0] == "analyze": from cloudinit.analyze.__main__ import get_parser as analyze_parser + # Construct analyze subcommand parser analyze_parser(parser_analyze) - elif sysv_args[0] == 'devel': + elif sysv_args[0] == "devel": from cloudinit.cmd.devel.parser import get_parser as devel_parser + # Construct devel subcommand parser devel_parser(parser_devel) - elif sysv_args[0] == 'collect-logs': + elif sysv_args[0] == "collect-logs": from cloudinit.cmd.devel.logs import ( - get_parser as logs_parser, handle_collect_logs_args) + get_parser as logs_parser, + handle_collect_logs_args, + ) + logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( - action=('collect-logs', handle_collect_logs_args)) - elif sysv_args[0] == 'clean': + action=("collect-logs", handle_collect_logs_args) + ) + elif sysv_args[0] == "clean": from cloudinit.cmd.clean import ( - get_parser as clean_parser, handle_clean_args) + get_parser as clean_parser, + handle_clean_args, + ) + clean_parser(parser_clean) - parser_clean.set_defaults( - action=('clean', handle_clean_args)) - elif sysv_args[0] == 'query': + parser_clean.set_defaults(action=("clean", handle_clean_args)) + elif sysv_args[0] == "query": from cloudinit.cmd.query import ( - get_parser as query_parser, handle_args as handle_query_args) + get_parser as query_parser, + handle_args as handle_query_args, + ) + query_parser(parser_query) - parser_query.set_defaults( - action=('render', handle_query_args)) - elif sysv_args[0] == 'status': + parser_query.set_defaults(action=("render", handle_query_args)) + elif sysv_args[0] == "status": from cloudinit.cmd.status import ( - get_parser as status_parser, handle_status_args) + get_parser as status_parser, + handle_status_args, + ) + status_parser(parser_status) - parser_status.set_defaults( - action=('status', handle_status_args)) + parser_status.set_defaults(action=("status", handle_status_args)) args = parser.parse_args(args=sysv_args) @@ -906,14 +1032,20 @@ def main(sysv_args=None): if args.local: rname, rdesc = ("init-local", "searching for local datasources") else: - rname, rdesc = ("init-network", - "searching for network datasources") + rname, rdesc = ( + "init-network", + "searching for network datasources", + ) elif name == "modules": - rname, rdesc = ("modules-%s" % args.mode, - "running modules for %s" % args.mode) + rname, rdesc = ( + "modules-%s" % args.mode, + "running modules for %s" % args.mode, + ) elif name == "single": - rname, rdesc = ("single/%s" % args.name, - "running single module %s" % args.name) + rname, rdesc = ( + "single/%s" % args.name, + "running single module %s" % args.name, + ) report_on = args.report else: rname = name @@ -921,19 +1053,24 @@ def main(sysv_args=None): report_on = False args.reporter = events.ReportEventStack( - rname, rdesc, reporting_enabled=report_on) + rname, rdesc, reporting_enabled=report_on + ) with args.reporter: retval = util.log_time( - logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, - get_uptime=True, func=functor, args=(name, args)) + logfunc=LOG.debug, + msg="cloud-init mode '%s'" % name, + get_uptime=True, + func=functor, + args=(name, args), + ) reporting.flush_events() return retval -if __name__ == '__main__': - if 'TZ' not in os.environ: - os.environ['TZ'] = ":/etc/localtime" +if __name__ == "__main__": + if "TZ" not in os.environ: + os.environ["TZ"] = ":/etc/localtime" return_value = main(sys.argv) if return_value: sys.exit(return_value) diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index e53cd855..46f17699 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -14,22 +14,24 @@ output; if this fails, they are treated as binary. """ import argparse -from errno import EACCES import os import sys +from errno import EACCES +from cloudinit import log, util +from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths from cloudinit.handlers.jinja_template import ( convert_jinja_instance_data, get_jinja_variable_alias, - render_jinja_payload + render_jinja_payload, ) -from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths -from cloudinit import log from cloudinit.sources import ( - INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE) -from cloudinit import util + INSTANCE_JSON_FILE, + INSTANCE_JSON_SENSITIVE_FILE, + REDACT_SENSITIVE_VALUE, +) -NAME = 'query' +NAME = "query" LOG = log.getLogger(NAME) @@ -43,41 +45,79 @@ def get_parser(parser=None): @returns: ArgumentParser with proper argument configuration. """ if not parser: - parser = argparse.ArgumentParser( - prog=NAME, description=__doc__) + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - '-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Path to instance-data.json file. Default is /run/cloud-init/%s' - % INSTANCE_JSON_FILE)) + "-i", + "--instance-data", + type=str, + help="Path to instance-data.json file. Default is /run/cloud-init/%s" + % INSTANCE_JSON_FILE, + ) parser.add_argument( - '-l', '--list-keys', action='store_true', default=False, - help=('List query keys available at the provided instance-data' - ' .')) + "-l", + "--list-keys", + action="store_true", + default=False, + help=( + "List query keys available at the provided instance-data" + " ." + ), + ) parser.add_argument( - '-u', '--user-data', type=str, - help=('Path to user-data file. Default is' - ' /var/lib/cloud/instance/user-data.txt')) + "-u", + "--user-data", + type=str, + help=( + "Path to user-data file. Default is" + " /var/lib/cloud/instance/user-data.txt" + ), + ) parser.add_argument( - '-v', '--vendor-data', type=str, - help=('Path to vendor-data file. Default is' - ' /var/lib/cloud/instance/vendor-data.txt')) + "-v", + "--vendor-data", + type=str, + help=( + "Path to vendor-data file. Default is" + " /var/lib/cloud/instance/vendor-data.txt" + ), + ) parser.add_argument( - 'varname', type=str, nargs='?', - help=('A dot-delimited specific variable to query from' - ' instance-data. For example: v1.local_hostname. If the' - ' value is not JSON serializable, it will be base64-encoded and' - ' will contain the prefix "ci-b64:". ')) + "varname", + type=str, + nargs="?", + help=( + "A dot-delimited specific variable to query from" + " instance-data. For example: v1.local_hostname. If the" + " value is not JSON serializable, it will be base64-encoded and" + ' will contain the prefix "ci-b64:". ' + ), + ) parser.add_argument( - '-a', '--all', action='store_true', default=False, dest='dump_all', - help='Dump all available instance-data') + "-a", + "--all", + action="store_true", + default=False, + dest="dump_all", + help="Dump all available instance-data", + ) parser.add_argument( - '-f', '--format', type=str, dest='format', - help=('Optionally specify a custom output format string. Any' - ' instance-data variable can be specified between double-curly' - ' braces. For example -f "{{ v2.cloud_name }}"')) + "-f", + "--format", + type=str, + dest="format", + help=( + "Optionally specify a custom output format string. Any" + " instance-data variable can be specified between double-curly" + ' braces. For example -f "{{ v2.cloud_name }}"' + ), + ) return parser @@ -91,7 +131,7 @@ def load_userdata(ud_file_path): """ bdata = util.load_file(ud_file_path, decode=False) try: - return bdata.decode('utf-8') + return bdata.decode("utf-8") except UnicodeDecodeError: return util.decomp_gzip(bdata, quiet=False, decode=True) @@ -118,13 +158,15 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: sensitive_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if os.path.exists(sensitive_data_fn): instance_data_fn = sensitive_data_fn else: LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - sensitive_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + sensitive_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: @@ -132,11 +174,11 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if user_data: user_data_fn = user_data else: - user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') + user_data_fn = os.path.join(paths.instance_link, "user-data.txt") if vendor_data: vendor_data_fn = vendor_data else: - vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') + vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt") try: instance_json = util.load_file(instance_data_fn) @@ -144,24 +186,30 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if e.errno == EACCES: LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) else: - LOG.error('Missing instance-data file: %s', instance_data_fn) + LOG.error("Missing instance-data file: %s", instance_data_fn) raise instance_data = util.load_json(instance_json) if uid != 0: - instance_data['userdata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn)) - instance_data['vendordata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn)) + instance_data["userdata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + user_data_fn, + ) + instance_data["vendordata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + vendor_data_fn, + ) else: - instance_data['userdata'] = load_userdata(user_data_fn) - instance_data['vendordata'] = load_userdata(vendor_data_fn) + instance_data["userdata"] = load_userdata(user_data_fn) + instance_data["vendordata"] = load_userdata(vendor_data_fn) return instance_data def _find_instance_data_leaf_by_varname_path( - jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict, - varname: str, list_keys: bool + jinja_vars_without_aliases: dict, + jinja_vars_with_aliases: dict, + varname: str, + list_keys: bool, ): """Return the value of the dot-delimited varname path in instance-data @@ -174,7 +222,7 @@ def _find_instance_data_leaf_by_varname_path( """ walked_key_path = "" response = jinja_vars_without_aliases - for key_path_part in varname.split('.'): + for key_path_part in varname.split("."): try: # Walk key path using complete aliases dict, yet response # should only contain jinja_without_aliases @@ -205,8 +253,9 @@ def handle_args(name, args): addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) if not any([args.list_keys, args.varname, args.format, args.dump_all]): LOG.error( - 'Expected one of the options: --all, --format,' - ' --list-keys or varname') + "Expected one of the options: --all, --format," + " --list-keys or varname" + ) get_parser().print_help() return 1 try: @@ -216,11 +265,13 @@ def handle_args(name, args): except (IOError, OSError): return 1 if args.format: - payload = '## template: jinja\n{fmt}'.format(fmt=args.format) + payload = "## template: jinja\n{fmt}".format(fmt=args.format) rendered_payload = render_jinja_payload( - payload=payload, payload_fn='query commandline', + payload=payload, + payload_fn="query commandline", instance_data=instance_data, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) if rendered_payload: print(rendered_payload) return 0 @@ -240,7 +291,7 @@ def handle_args(name, args): jinja_vars_without_aliases=response, jinja_vars_with_aliases=jinja_vars_with_aliases, varname=args.varname, - list_keys=args.list_keys + list_keys=args.list_keys, ) except (KeyError, ValueError) as e: LOG.error(e) @@ -248,11 +299,10 @@ def handle_args(name, args): if args.list_keys: if not isinstance(response, dict): LOG.error( - "--list-keys provided but '%s' is not a dict", - args.varname + "--list-keys provided but '%s' is not a dict", args.varname ) return 1 - response = '\n'.join(sorted(response.keys())) + response = "\n".join(sorted(response.keys())) if not isinstance(response, str): response = util.json_dumps(response) print(response) @@ -265,7 +315,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index ea79a85b..cff16c34 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -7,20 +7,20 @@ import argparse import os import sys -from time import gmtime, strftime, sleep +from time import gmtime, sleep, strftime from cloudinit.distros import uses_systemd from cloudinit.stages import Init from cloudinit.util import get_cmdline, load_file, load_json -CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' +CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled" # customer visible status messages -STATUS_ENABLED_NOT_RUN = 'not run' -STATUS_RUNNING = 'running' -STATUS_DONE = 'done' -STATUS_ERROR = 'error' -STATUS_DISABLED = 'disabled' +STATUS_ENABLED_NOT_RUN = "not run" +STATUS_RUNNING = "running" +STATUS_DONE = "done" +STATUS_ERROR = "error" +STATUS_DISABLED = "disabled" def get_parser(parser=None): @@ -34,15 +34,25 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='status', - description='Report run status of cloud init') + prog="status", description="Report run status of cloud init" + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help=('Report long format of statuses including run stage name and' - ' error messages')) + "-l", + "--long", + action="store_true", + default=False, + help=( + "Report long format of statuses including run stage name and" + " error messages" + ), + ) parser.add_argument( - '-w', '--wait', action='store_true', default=False, - help='Block waiting on cloud-init to complete') + "-w", + "--wait", + action="store_true", + default=False, + help="Block waiting on cloud-init to complete", + ) return parser @@ -55,18 +65,18 @@ def handle_status_args(name, args): status, status_detail, time = _get_status_details(init.paths) if args.wait: while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): - sys.stdout.write('.') + sys.stdout.write(".") sys.stdout.flush() status, status_detail, time = _get_status_details(init.paths) sleep(0.25) - sys.stdout.write('\n') + sys.stdout.write("\n") if args.long: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) if time: - print('time: {0}'.format(time)) - print('detail:\n{0}'.format(status_detail)) + print("time: {0}".format(time)) + print("detail:\n{0}".format(status_detail)) else: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) return 1 if status == STATUS_ERROR else 0 @@ -81,20 +91,20 @@ def _is_cloudinit_disabled(disable_file, paths): is_disabled = False cmdline_parts = get_cmdline().split() if not uses_systemd(): - reason = 'Cloud-init enabled on sysvinit' - elif 'cloud-init=enabled' in cmdline_parts: - reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + reason = "Cloud-init enabled on sysvinit" + elif "cloud-init=enabled" in cmdline_parts: + reason = "Cloud-init enabled by kernel command line cloud-init=enabled" elif os.path.exists(disable_file): is_disabled = True - reason = 'Cloud-init disabled by {0}'.format(disable_file) - elif 'cloud-init=disabled' in cmdline_parts: + reason = "Cloud-init disabled by {0}".format(disable_file) + elif "cloud-init=disabled" in cmdline_parts: is_disabled = True - reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' - elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + reason = "Cloud-init disabled by kernel parameter cloud-init=disabled" + elif not os.path.exists(os.path.join(paths.run_dir, "enabled")): is_disabled = True - reason = 'Cloud-init disabled by cloud-init-generator' + reason = "Cloud-init disabled by cloud-init-generator" else: - reason = 'Cloud-init enabled by systemd cloud-init-generator' + reason = "Cloud-init enabled by systemd cloud-init-generator" return (is_disabled, reason) @@ -106,34 +116,35 @@ def _get_status_details(paths): Values are obtained from parsing paths.run_dir/status.json. """ status = STATUS_ENABLED_NOT_RUN - status_detail = '' + status_detail = "" status_v1 = {} - status_file = os.path.join(paths.run_dir, 'status.json') - result_file = os.path.join(paths.run_dir, 'result.json') + status_file = os.path.join(paths.run_dir, "status.json") + result_file = os.path.join(paths.run_dir, "result.json") (is_disabled, reason) = _is_cloudinit_disabled( - CLOUDINIT_DISABLED_FILE, paths) + CLOUDINIT_DISABLED_FILE, paths + ) if is_disabled: status = STATUS_DISABLED status_detail = reason if os.path.exists(status_file): if not os.path.exists(result_file): status = STATUS_RUNNING - status_v1 = load_json(load_file(status_file)).get('v1', {}) + status_v1 = load_json(load_file(status_file)).get("v1", {}) errors = [] latest_event = 0 for key, value in sorted(status_v1.items()): - if key == 'stage': + if key == "stage": if value: status = STATUS_RUNNING - status_detail = 'Running in stage: {0}'.format(value) - elif key == 'datasource': + status_detail = "Running in stage: {0}".format(value) + elif key == "datasource": status_detail = value elif isinstance(value, dict): - errors.extend(value.get('errors', [])) - start = value.get('start') or 0 - finished = value.get('finished') or 0 + errors.extend(value.get("errors", [])) + start = value.get("start") or 0 + finished = value.get("finished") or 0 if finished == 0 and start != 0: status = STATUS_RUNNING event_time = max(start, finished) @@ -141,23 +152,23 @@ def _get_status_details(paths): latest_event = event_time if errors: status = STATUS_ERROR - status_detail = '\n'.join(errors) + status_detail = "\n".join(errors) elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: status = STATUS_DONE if latest_event: - time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event)) else: - time = '' + time = "" return status, status_detail, time def main(): """Tool to report status of cloud-init.""" parser = get_parser() - sys.exit(handle_status_args('status', parser.parse_args())) + sys.exit(handle_status_args("status", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 0ef9a748..ed124180 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -6,9 +6,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES) - from cloudinit import log as logging +from cloudinit.settings import FREQUENCIES, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -22,26 +21,27 @@ MOD_PREFIX = "cc_" def form_module_name(name): canon_name = name.replace("-", "_") if canon_name.lower().endswith(".py"): - canon_name = canon_name[0:(len(canon_name) - 3)] + canon_name = canon_name[0 : (len(canon_name) - 3)] canon_name = canon_name.strip() if not canon_name: return None if not canon_name.startswith(MOD_PREFIX): - canon_name = '%s%s' % (MOD_PREFIX, canon_name) + canon_name = "%s%s" % (MOD_PREFIX, canon_name) return canon_name def fixup_module(mod, def_freq=PER_INSTANCE): - if not hasattr(mod, 'frequency'): - setattr(mod, 'frequency', def_freq) + if not hasattr(mod, "frequency"): + setattr(mod, "frequency", def_freq) else: freq = mod.frequency if freq and freq not in FREQUENCIES: LOG.warning("Module %s has an unknown frequency %s", mod, freq) - if not hasattr(mod, 'distros'): - setattr(mod, 'distros', []) - if not hasattr(mod, 'osfamilies'): - setattr(mod, 'osfamilies', []) + if not hasattr(mod, "distros"): + setattr(mod, "distros", []) + if not hasattr(mod, "osfamilies"): + setattr(mod, "osfamilies", []) return mod + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index d227a58d..a615c814 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -9,9 +9,7 @@ from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import util +from cloudinit import temp_utils, templater, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE @@ -54,34 +52,41 @@ REPOSITORIES_TEMPLATE = """\ frequency = PER_INSTANCE -distros = ['alpine'] +distros = ["alpine"] meta = { - 'id': 'cc_apk_configure', - 'name': 'APK Configure', - 'title': 'Configure apk repositories file', - 'description': dedent("""\ + "id": "cc_apk_configure", + "name": "APK Configure", + "title": "Configure apk repositories file", + "description": dedent( + """\ This module handles configuration of the /etc/apk/repositories file. .. note:: To ensure that apk configuration is valid yaml, any strings containing special characters, especially ``:`` should be quoted. - """), - 'distros': distros, - 'examples': [ - dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ # Keep the existing /etc/apk/repositories file unaltered. apk_repos: preserve_repositories: true - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine v3.12 main and community # using default mirror site. apk_repos: alpine_repo: community_enabled: true version: 'v3.12' - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine Edge main, community, and # testing using a specified mirror site and also a local repo. apk_repos: @@ -91,21 +96,23 @@ meta = { testing_enabled: true version: 'edge' local_repo_base_url: 'https://my-local-server/local-alpine' - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apk_repos': { - 'type': 'object', - 'properties': { - 'preserve_repositories': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apk_repos": { + "type": "object", + "properties": { + "preserve_repositories": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos @@ -116,33 +123,41 @@ schema = { The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``. - """) + """ + ), }, - 'alpine_repo': { - 'type': ['object', 'null'], - 'properties': { - 'base_url': { - 'type': 'string', - 'default': DEFAULT_MIRROR, - 'description': dedent("""\ + "alpine_repo": { + "type": ["object", "null"], + "properties": { + "base_url": { + "type": "string", + "default": DEFAULT_MIRROR, + "description": dedent( + """\ The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``{}`` - """.format(DEFAULT_MIRROR)) + """.format( + DEFAULT_MIRROR + ) + ), }, - 'community_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "community_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Community repo to the repositories file. By default the Community repo is not included. - """) + """ + ), }, - 'testing_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "testing_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended @@ -151,32 +166,37 @@ schema = { installed from Testing may have dependancies that conflict with those in non-Edge Main or Community repos." - """) + """ + ), }, - 'version': { - 'type': 'string', - 'description': dedent("""\ + "version": { + "type": "string", + "description": dedent( + """\ The Alpine version to use (e.g. ``v3.12`` or ``edge``) - """) + """ + ), }, }, - 'required': ['version'], - 'minProperties': 1, - 'additionalProperties': False, + "required": ["version"], + "minProperties": 1, + "additionalProperties": False, }, - 'local_repo_base_url': { - 'type': 'string', - 'description': dedent("""\ + "local_repo_base_url": { + "type": "string", + "description": dedent( + """\ The base URL of an Alpine repository containing unofficial packages - """) - } + """ + ), + }, }, - 'minProperties': 1, # Either preserve_repositories or alpine_repo - 'additionalProperties': False, + "minProperties": 1, # Either preserve_repositories or alpine_repo + "additionalProperties": False, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -195,38 +215,44 @@ def handle(name, cfg, cloud, log, _args): # If there is no "apk_repos" section in the configuration # then do nothing. - apk_section = cfg.get('apk_repos') + apk_section = cfg.get("apk_repos") if not apk_section: - LOG.debug(("Skipping module named %s," - " no 'apk_repos' section found"), name) + LOG.debug( + "Skipping module named %s, no 'apk_repos' section found", name + ) return validate_cloudconfig_schema(cfg, schema) # If "preserve_repositories" is explicitly set to True in # the configuration do nothing. - if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False): - LOG.debug(("Skipping module named %s," - " 'preserve_repositories' is set"), name) + if util.get_cfg_option_bool(apk_section, "preserve_repositories", False): + LOG.debug( + "Skipping module named %s, 'preserve_repositories' is set", name + ) return # If there is no "alpine_repo" subsection of "apk_repos" present in the # configuration then do nothing, as at least "version" is required to # create valid repositories entries. - alpine_repo = apk_section.get('alpine_repo') + alpine_repo = apk_section.get("alpine_repo") if not alpine_repo: - LOG.debug(("Skipping module named %s," - " no 'alpine_repo' configuration found"), name) + LOG.debug( + "Skipping module named %s, no 'alpine_repo' configuration found", + name, + ) return # If there is no "version" value present in configuration then do nothing. - alpine_version = alpine_repo.get('version') + alpine_version = alpine_repo.get("version") if not alpine_version: - LOG.debug(("Skipping module named %s," - " 'version' not specified in alpine_repo"), name) + LOG.debug( + "Skipping module named %s, 'version' not specified in alpine_repo", + name, + ) return - local_repo = apk_section.get('local_repo_base_url', '') + local_repo = apk_section.get("local_repo_base_url", "") _write_repositories_file(alpine_repo, alpine_version, local_repo) @@ -240,22 +266,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo): @param local_repo: A string containing the base URL of a local repo. """ - repo_file = '/etc/apk/repositories' + repo_file = "/etc/apk/repositories" - alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) + alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR) - params = {'alpine_baseurl': alpine_baseurl, - 'alpine_version': alpine_version, - 'community_enabled': alpine_repo.get('community_enabled'), - 'testing_enabled': alpine_repo.get('testing_enabled'), - 'local_repo': local_repo} + params = { + "alpine_baseurl": alpine_baseurl, + "alpine_version": alpine_version, + "community_enabled": alpine_repo.get("community_enabled"), + "testing_enabled": alpine_repo.get("testing_enabled"), + "local_repo": local_repo, + } - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # Filepath is second item in tuple util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) - LOG.debug('Generating Alpine repository configuration file: %s', - repo_file) + LOG.debug("Generating Alpine repository configuration file: %s", repo_file) templater.render_to_file(template_fn, repo_file, params) # Clean up temporary template util.del_file(template_fn) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 2e844c2c..b0728517 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -10,16 +10,14 @@ import glob import os -import re import pathlib +import re from textwrap import dedent -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging -from cloudinit import subp -from cloudinit import templater -from cloudinit import util +from cloudinit import subp, templater, util +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -27,59 +25,46 @@ LOG = logging.getLogger(__name__) # this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') ADD_APT_REPO_MATCH = r"^[\w-]+:\w" -APT_LOCAL_KEYS = '/etc/apt/trusted.gpg' -APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/' -CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/' +APT_LOCAL_KEYS = "/etc/apt/trusted.gpg" +APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/" +CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/" frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { - 'type': 'array', - 'items': { - 'type': 'object', - 'additionalProperties': False, - 'required': ['arches'], - 'properties': { - 'arches': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 - }, - 'uri': { - 'type': 'string', - 'format': 'uri' - }, - 'search': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'uri' - }, - 'minItems': 1 - }, - 'search_dns': { - 'type': 'boolean', + "type": "array", + "items": { + "type": "object", + "additionalProperties": False, + "required": ["arches"], + "properties": { + "arches": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1, }, - 'keyid': { - 'type': 'string' + "uri": {"type": "string", "format": "uri"}, + "search": { + "type": "array", + "items": {"type": "string", "format": "uri"}, + "minItems": 1, }, - 'key': { - 'type': 'string' + "search_dns": { + "type": "boolean", }, - 'keyserver': { - 'type': 'string' - } - } - } + "keyid": {"type": "string"}, + "key": {"type": "string"}, + "keyserver": {"type": "string"}, + }, + }, } meta = { - 'id': 'cc_apt_configure', - 'name': 'Apt Configure', - 'title': 'Configure apt for the user', - 'description': dedent("""\ + "id": "cc_apt_configure", + "name": "Apt Configure", + "title": "Configure apt for the user", + "description": dedent( + """\ This module handles both configuration of apt options and adding source lists. There are configuration options such as ``apt_get_wrapper`` and ``apt_get_command`` that control how @@ -94,9 +79,12 @@ meta = { .. note:: For more information about apt configuration, see the - ``Additional apt configuration`` example."""), - 'distros': distros, - 'examples': [dedent("""\ + ``Additional apt configuration`` example.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ apt: preserve_sources_list: false disable_suites: @@ -153,21 +141,24 @@ meta = { key: | ------BEGIN PGP PUBLIC KEY BLOCK------- - ------END PGP PUBLIC KEY BLOCK-------""")], - 'frequency': frequency, + ------END PGP PUBLIC KEY BLOCK-------""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apt': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'preserve_sources_list': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apt": { + "type": "object", + "additionalProperties": False, + "properties": { + "preserve_sources_list": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this @@ -179,15 +170,15 @@ schema = { all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added - to ``sources.list.d``.""") + to ``sources.list.d``.""" + ), }, - 'disable_suites': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "disable_suites": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is @@ -206,11 +197,13 @@ schema = { When a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it - is just commented out.""") + is just commented out.""" + ), }, - 'primary': { + "primary": { **mirror_property, - 'description': dedent("""\ + "description": dedent( + """\ The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the @@ -264,27 +257,35 @@ schema = { ``http://archive.ubuntu.com/ubuntu``. - ``security`` => \ ``http://security.ubuntu.com/ubuntu`` - """) + """ + ), }, - 'security': { + "security": { **mirror_property, - 'description': dedent("""\ - Please refer to the primary config documentation""") + "description": dedent( + """\ + Please refer to the primary config documentation""" + ), }, - 'add_apt_repo_match': { - 'type': 'string', - 'default': ADD_APT_REPO_MATCH, - 'description': dedent("""\ + "add_apt_repo_match": { + "type": "string", + "default": ADD_APT_REPO_MATCH, + "description": dedent( + """\ All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it - defaults to ``{}``""".format(ADD_APT_REPO_MATCH)) + defaults to ``{}``""".format( + ADD_APT_REPO_MATCH + ) + ), }, - 'debconf_selections': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "debconf_selections": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a @@ -308,11 +309,13 @@ schema = { For example: \ ``ippackage ippackage/ip string 127.0.01`` - """) + """ + ), }, - 'sources_list': { - 'type': 'string', - 'description': dedent("""\ + "sources_list": { + "type": "string", + "description": dedent( + """\ Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within @@ -323,45 +326,55 @@ schema = { - ``$RELEASE`` - ``$PRIMARY`` - ``$SECURITY`` - - ``$KEY_FILE``""") + - ``$KEY_FILE``""" + ), }, - 'conf': { - 'type': 'string', - 'description': dedent("""\ + "conf": { + "type": "string", + "description": dedent( + """\ Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline apt configuration, make sure - to follow yaml syntax.""") + to follow yaml syntax.""" + ), }, - 'https_proxy': { - 'type': 'string', - 'description': dedent("""\ + "https_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify https apt proxy. https proxy url is specified in the format - ``https://[[user][:pass]@]host[:port]/``.""") + ``https://[[user][:pass]@]host[:port]/``.""" + ), }, - 'http_proxy': { - 'type': 'string', - 'description': dedent("""\ + "http_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify http apt proxy. http proxy url is specified in the format - ``http://[[user][:pass]@]host[:port]/``.""") + ``http://[[user][:pass]@]host[:port]/``.""" + ), }, - 'proxy': { - 'type': 'string', - 'description': 'Alias for defining a http apt proxy.' + "proxy": { + "type": "string", + "description": "Alias for defining a http apt proxy.", }, - 'ftp_proxy': { - 'type': 'string', - 'description': dedent("""\ + "ftp_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify ftp apt proxy. ftp proxy url is specified in the format - ``ftp://[[user][:pass]@]host[:port]/``.""") + ``ftp://[[user][:pass]@]host[:port]/``.""" + ), }, - 'sources': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "sources": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source @@ -394,11 +407,12 @@ schema = { - ``$PRIMARY`` - ``$SECURITY`` - ``$RELEASE`` - - ``$KEY_FILE``""") - } - } + - ``$KEY_FILE``""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -415,18 +429,22 @@ APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy" DEFAULT_KEYSERVER = "keyserver.ubuntu.com" # Default archive mirrors -PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/", - "SECURITY": "http://security.ubuntu.com/ubuntu/"} -PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", - "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} -PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] +PRIMARY_ARCH_MIRRORS = { + "PRIMARY": "http://archive.ubuntu.com/ubuntu/", + "SECURITY": "http://security.ubuntu.com/ubuntu/", +} +PORTS_MIRRORS = { + "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", + "SECURITY": "http://ports.ubuntu.com/ubuntu-ports", +} +PRIMARY_ARCHES = ["amd64", "i386"] +PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"] def get_default_mirrors(arch=None, target=None): """returns the default mirrors for the target. These depend on the - architecture, for more see: - https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" + architecture, for more see: + https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: arch = util.get_dpkg_architecture(target) if arch in PRIMARY_ARCHES: @@ -438,8 +456,8 @@ def get_default_mirrors(arch=None, target=None): def handle(name, ocfg, cloud, log, _): """process the config for apt_config. This can be called from - curthooks if a global apt config was provided or via the "apt" - standalone command.""" + curthooks if a global apt config was provided or via the "apt" + standalone command.""" # keeping code close to curtin codebase via entry handler target = None if log is not None: @@ -447,12 +465,14 @@ def handle(name, ocfg, cloud, log, _): LOG = log # feed back converted config, but only work on the subset under 'apt' ocfg = convert_to_v3_apt_format(ocfg) - cfg = ocfg.get('apt', {}) + cfg = ocfg.get("apt", {}) if not isinstance(cfg, dict): raise ValueError( "Expected dictionary for 'apt' config, found {config_type}".format( - config_type=type(cfg))) + config_type=type(cfg) + ) + ) validate_cloudconfig_schema(cfg, schema) apply_debconf_selections(cfg, target) @@ -463,7 +483,7 @@ def _should_configure_on_empty_apt(): # if no config was provided, should apt configuration be done? if util.system_is_snappy(): return False, "system is snappy." - if not (subp.which('apt-get') or subp.which('apt')): + if not (subp.which("apt-get") or subp.which("apt")): return False, "no apt commands." return True, "Apt is available." @@ -478,12 +498,12 @@ def apply_apt(cfg, cloud, target): LOG.debug("handling apt config: %s", cfg) - release = util.lsb_release(target=target)['codename'] + release = util.lsb_release(target=target)["codename"] arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) - if util.is_false(cfg.get('preserve_sources_list', False)): + if util.is_false(cfg.get("preserve_sources_list", False)): add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) @@ -494,25 +514,34 @@ def apply_apt(cfg, cloud, target): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' - if 'sources' in cfg: + if "sources" in cfg: params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirrors["MIRROR"] + params["RELEASE"] = release + params["MIRROR"] = mirrors["MIRROR"] matcher = None - matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) + matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search - add_apt_sources(cfg['sources'], cloud, target=target, - template_params=params, aa_repo_match=matcher) + add_apt_sources( + cfg["sources"], + cloud, + target=target, + template_params=params, + aa_repo_match=matcher, + ) def debconf_set_selections(selections, target=None): - if not selections.endswith(b'\n'): - selections += b'\n' - subp.subp(['debconf-set-selections'], data=selections, target=target, - capture=True) + if not selections.endswith(b"\n"): + selections += b"\n" + subp.subp( + ["debconf-set-selections"], + data=selections, + target=target, + capture=True, + ) def dpkg_reconfigure(packages, target=None): @@ -532,12 +561,20 @@ def dpkg_reconfigure(packages, target=None): unhandled.append(pkg) if len(unhandled): - LOG.warning("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning( + "The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", + unhandled, + ) if len(to_config): - subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + - list(to_config), data=None, target=target, capture=True) + subp.subp( + ["dpkg-reconfigure", "--frontend=noninteractive"] + + list(to_config), + data=None, + target=target, + capture=True, + ) def apply_debconf_selections(cfg, target=None): @@ -546,13 +583,12 @@ def apply_debconf_selections(cfg, target=None): # set1: | # cloud-init cloud-init/datasources multiselect MAAS # set2: pkg pkg/value string bar - selsets = cfg.get('debconf_selections') + selsets = cfg.get("debconf_selections") if not selsets: LOG.debug("debconf_selections was not set in config") return - selections = '\n'.join( - [selsets[key] for key in sorted(selsets.keys())]) + selections = "\n".join([selsets[key] for key in sorted(selsets.keys())]) debconf_set_selections(selections.encode(), target=target) # get a complete list of packages listed in input @@ -579,7 +615,8 @@ def apply_debconf_selections(cfg, target=None): def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( - subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) + subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*") + ) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -588,18 +625,18 @@ def clean_cloud_init(target): def mirrorurl_to_apt_fileprefix(mirror): """mirrorurl_to_apt_fileprefix - Convert a mirror url to the file prefix used by apt on disk to - store cache information for that mirror. - To do so do: - - take off ???:// - - drop tailing / - - convert in string / to _""" + Convert a mirror url to the file prefix used by apt on disk to + store cache information for that mirror. + To do so do: + - take off ???:// + - drop tailing / + - convert in string / to _""" string = mirror if string.endswith("/"): string = string[0:-1] pos = string.find("://") if pos >= 0: - string = string[pos + 3:] + string = string[pos + 3 :] string = string.replace("/", "_") return string @@ -631,8 +668,8 @@ def rename_apt_lists(new_mirrors, target, arch): def mirror_to_placeholder(tmpl, mirror, placeholder): """mirror_to_placeholder - replace the specified mirror in a template with a placeholder string - Checks for existance of the expected mirror and warns if not found""" + replace the specified mirror in a template with a placeholder string + Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -640,13 +677,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder): def map_known_suites(suite): """there are a few default names which will be auto-extended. - This comes at the inability to use those names literally as suites, - but on the other hand increases readability of the cfg quite a lot""" - mapping = {'updates': '$RELEASE-updates', - 'backports': '$RELEASE-backports', - 'security': '$RELEASE-security', - 'proposed': '$RELEASE-proposed', - 'release': '$RELEASE'} + This comes at the inability to use those names literally as suites, + but on the other hand increases readability of the cfg quite a lot""" + mapping = { + "updates": "$RELEASE-updates", + "backports": "$RELEASE-backports", + "security": "$RELEASE-security", + "proposed": "$RELEASE-proposed", + "release": "$RELEASE", + } try: retsuite = mapping[suite] except KeyError: @@ -656,14 +695,14 @@ def map_known_suites(suite): def disable_suites(disabled, src, release): """reads the config for suites to be disabled and removes those - from the template""" + from the template""" if not disabled: return src retsrc = src for suite in disabled: suite = map_known_suites(suite) - releasesuite = templater.render_string(suite, {'RELEASE': release}) + releasesuite = templater.render_string(suite, {"RELEASE": release}) LOG.debug("Disabling suite %s as %s", suite, releasesuite) newsrc = "" @@ -685,7 +724,7 @@ def disable_suites(disabled, src, release): break if cols[pcol] == releasesuite: - line = '# suite disabled by cloud-init: %s' % line + line = "# suite disabled by cloud-init: %s" % line newsrc += line retsrc = newsrc @@ -694,36 +733,38 @@ def disable_suites(disabled, src, release): def add_mirror_keys(cfg, target): """Adds any keys included in the primary/security mirror clauses""" - for key in ('primary', 'security'): + for key in ("primary", "security"): for mirror in cfg.get(key, []): add_apt_key(mirror, target, file_name=key) def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list - create a source.list file based on a custom or default template - by replacing mirrors and release in the template""" + create a source.list file based on a custom or default template + by replacing mirrors and release in the template""" aptsrc = "/etc/apt/sources.list" - params = {'RELEASE': release, 'codename': release} + params = {"RELEASE": release, "codename": release} for k in mirrors: params[k] = mirrors[k] params[k.lower()] = mirrors[k] - tmpl = cfg.get('sources_list', None) + tmpl = cfg.get("sources_list", None) if tmpl is None: LOG.info("No custom template provided, fall back to builtin") - template_fn = cloud.get_template_filename('sources.list.%s' % - (cloud.distro.name)) + template_fn = cloud.get_template_filename( + "sources.list.%s" % (cloud.distro.name) + ) if not template_fn: - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename("sources.list") if not template_fn: - LOG.warning("No template found, " - "not rendering /etc/apt/sources.list") + LOG.warning( + "No template found, not rendering /etc/apt/sources.list" + ) return tmpl = util.load_file(template_fn) rendered = templater.render_string(tmpl, params) - disabled = disable_suites(cfg.get('disable_suites'), rendered, release) + disabled = disable_suites(cfg.get("disable_suites"), rendered, release) util.write_file(aptsrc, disabled, mode=0o644) @@ -735,7 +776,7 @@ def add_apt_key_raw(key, file_name, hardened=False, target=None): LOG.debug("Adding key:\n'%s'", key) try: name = pathlib.Path(file_name).stem - return apt_key('add', output_file=name, data=key, hardened=hardened) + return apt_key("add", output_file=name, data=key, hardened=hardened) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -747,26 +788,26 @@ def add_apt_key(ent, target=None, hardened=False, file_name=None): Supports raw keys or keyid's The latter will as a first step fetched to get the raw key """ - if 'keyid' in ent and 'key' not in ent: + if "keyid" in ent and "key" not in ent: keyserver = DEFAULT_KEYSERVER - if 'keyserver' in ent: - keyserver = ent['keyserver'] + if "keyserver" in ent: + keyserver = ent["keyserver"] - ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver) + ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver) - if 'key' in ent: + if "key" in ent: return add_apt_key_raw( - ent['key'], - file_name or ent['filename'], - hardened=hardened) + ent["key"], file_name or ent["filename"], hardened=hardened + ) def update_packages(cloud): cloud.distro.update_package_sources() -def add_apt_sources(srcdict, cloud, target=None, template_params=None, - aa_repo_match=None): +def add_apt_sources( + srcdict, cloud, target=None, template_params=None, aa_repo_match=None +): """ install keys and repo source .list files defined in 'sources' @@ -795,33 +836,34 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, template_params = {} if aa_repo_match is None: - raise ValueError('did not get a valid repo matcher') + raise ValueError("did not get a valid repo matcher") if not isinstance(srcdict, dict): - raise TypeError('unknown apt format: %s' % (srcdict)) + raise TypeError("unknown apt format: %s" % (srcdict)) for filename in srcdict: ent = srcdict[filename] LOG.debug("adding source/key '%s'", ent) - if 'filename' not in ent: - ent['filename'] = filename + if "filename" not in ent: + ent["filename"] = filename - if 'source' in ent and '$KEY_FILE' in ent['source']: + if "source" in ent and "$KEY_FILE" in ent["source"]: key_file = add_apt_key(ent, target, hardened=True) - template_params['KEY_FILE'] = key_file + template_params["KEY_FILE"] = key_file else: key_file = add_apt_key(ent, target) - if 'source' not in ent: + if "source" not in ent: continue - source = ent['source'] + source = ent["source"] source = templater.render_string(source, template_params) - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - if not ent['filename'].endswith(".list"): - ent['filename'] += ".list" + if not ent["filename"].startswith("/"): + ent["filename"] = os.path.join( + "/etc/apt/sources.list.d/", ent["filename"] + ) + if not ent["filename"].endswith(".list"): + ent["filename"] += ".list" if aa_repo_match(source): try: @@ -831,7 +873,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, raise continue - sourcefn = subp.target_path(target, ent['filename']) + sourcefn = subp.target_path(target, ent["filename"]) try: contents = "%s\n" % (source) util.write_file(sourcefn, contents, omode="a") @@ -850,14 +892,14 @@ def convert_v1_to_v2_apt_format(srclist): if isinstance(srclist, list): LOG.debug("apt config: convert V1 to V2 format (source list to dict)") for srcent in srclist: - if 'filename' not in srcent: + if "filename" not in srcent: # file collides for multiple !filename cases for compatibility # yet we need them all processed, so not same dictionary key - srcent['filename'] = "cloud_config_sources.list" + srcent["filename"] = "cloud_config_sources.list" key = util.rand_dict_key(srcdict, "cloud_config_sources.list") else: # all with filename use that as key (matching new format) - key = srcent['filename'] + key = srcent["filename"] srcdict[key] = srcent elif isinstance(srclist, dict): srcdict = srclist @@ -869,7 +911,7 @@ def convert_v1_to_v2_apt_format(srclist): def convert_key(oldcfg, aptcfg, oldkey, newkey): """convert an old key to the new one if the old one exists - returns true if a key was found and converted""" + returns true if a key was found and converted""" if oldcfg.get(oldkey, None) is not None: aptcfg[newkey] = oldcfg.get(oldkey) del oldcfg[oldkey] @@ -879,33 +921,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey): def convert_mirror(oldcfg, aptcfg): """convert old apt_mirror keys into the new more advanced mirror spec""" - keymap = [('apt_mirror', 'uri'), - ('apt_mirror_search', 'search'), - ('apt_mirror_search_dns', 'search_dns')] + keymap = [ + ("apt_mirror", "uri"), + ("apt_mirror_search", "search"), + ("apt_mirror_search_dns", "search_dns"), + ] converted = False - newmcfg = {'arches': ['default']} + newmcfg = {"arches": ["default"]} for oldkey, newkey in keymap: if convert_key(oldcfg, newmcfg, oldkey, newkey): converted = True # only insert new style config if anything was converted if converted: - aptcfg['primary'] = [newmcfg] + aptcfg["primary"] = [newmcfg] def convert_v2_to_v3_apt_format(oldcfg): """convert old to new keys and adapt restructured mirror spec""" - mapoldkeys = {'apt_sources': 'sources', - 'apt_mirror': None, - 'apt_mirror_search': None, - 'apt_mirror_search_dns': None, - 'apt_proxy': 'proxy', - 'apt_http_proxy': 'http_proxy', - 'apt_ftp_proxy': 'https_proxy', - 'apt_https_proxy': 'ftp_proxy', - 'apt_preserve_sources_list': 'preserve_sources_list', - 'apt_custom_sources_list': 'sources_list', - 'add_apt_repo_match': 'add_apt_repo_match'} + mapoldkeys = { + "apt_sources": "sources", + "apt_mirror": None, + "apt_mirror_search": None, + "apt_mirror_search_dns": None, + "apt_proxy": "proxy", + "apt_http_proxy": "http_proxy", + "apt_ftp_proxy": "https_proxy", + "apt_https_proxy": "ftp_proxy", + "apt_preserve_sources_list": "preserve_sources_list", + "apt_custom_sources_list": "sources_list", + "add_apt_repo_match": "add_apt_repo_match", + } needtoconvert = [] for oldkey in mapoldkeys: if oldkey in oldcfg: @@ -917,11 +963,13 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - LOG.debug("apt config: convert V2 to V3 format for keys '%s'", - ", ".join(needtoconvert)) + LOG.debug( + "apt config: convert V2 to V3 format for keys '%s'", + ", ".join(needtoconvert), + ) # if old AND new config are provided, prefer the new one (LP #1616831) - newaptcfg = oldcfg.get('apt', None) + newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: LOG.debug("apt config: V1/2 and V3 format specified, preferring V3") for oldkey in needtoconvert: @@ -932,10 +980,11 @@ def convert_v2_to_v3_apt_format(oldcfg): # no simple mapping or no collision on this particular key continue if verify != newaptcfg[newkey]: - raise ValueError("Old and New apt format defined with unequal " - "values %s vs %s @ %s" % (verify, - newaptcfg[newkey], - oldkey)) + raise ValueError( + "Old and New apt format defined with unequal " + "values %s vs %s @ %s" + % (verify, newaptcfg[newkey], oldkey) + ) # return conf after clearing conflicting V1/2 keys return oldcfg @@ -955,17 +1004,17 @@ def convert_v2_to_v3_apt_format(oldcfg): raise ValueError("old apt key '%s' left after conversion" % oldkey) # insert new format into config and return full cfg with only v3 content - oldcfg['apt'] = aptcfg + oldcfg["apt"] = aptcfg return oldcfg def convert_to_v3_apt_format(cfg): """convert the old list based format to the new dict based one. After that - convert the old dict keys/format to v3 a.k.a 'new apt config'""" + convert the old dict keys/format to v3 a.k.a 'new apt config'""" # V1 -> V2, the apt_sources entry from list to dict - apt_sources = cfg.get('apt_sources', None) + apt_sources = cfg.get("apt_sources", None) if apt_sources is not None: - cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources) + cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources) # V2 -> V3, move all former globals under the "apt" key # Restructure into new key names and mirror hierarchy @@ -997,7 +1046,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): if mydom: doms.append(".%s" % mydom) - doms.extend((".localdomain", "",)) + doms.extend( + ( + ".localdomain", + "", + ) + ) mirror_list = [] distro = cloud.distro.name @@ -1012,12 +1066,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): def update_mirror_info(pmirror, smirror, arch, cloud): """sets security mirror to primary if not defined. - returns defaults if no mirrors are defined""" + returns defaults if no mirrors are defined""" if pmirror is not None: if smirror is None: smirror = pmirror - return {'PRIMARY': pmirror, - 'SECURITY': smirror} + return {"PRIMARY": pmirror, "SECURITY": smirror} # None specified at all, get default mirrors from cloud mirror_info = cloud.datasource.get_package_mirror_info() @@ -1026,8 +1079,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud): # arbitrary key/value pairs including 'primary' and 'security' keys. # caller expects dict with PRIMARY and SECURITY. m = mirror_info.copy() - m['PRIMARY'] = m['primary'] - m['SECURITY'] = m['security'] + m["PRIMARY"] = m["primary"] + m["SECURITY"] = m["security"] return m @@ -1037,7 +1090,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud): def get_arch_mirrorconfig(cfg, mirrortype, arch): """out of a list of potential mirror configurations select - and return the one matching the architecture (or default)""" + and return the one matching the architecture (or default)""" # select the mirror specification (if-any) mirror_cfg_list = cfg.get(mirrortype, None) if mirror_cfg_list is None: @@ -1056,8 +1109,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): def get_mirror(cfg, mirrortype, arch, cloud): """pass the three potential stages of mirror specification - returns None is neither of them found anything otherwise the first - hit is returned""" + returns None is neither of them found anything otherwise the first + hit is returned""" mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch) if mcfg is None: return None @@ -1073,18 +1126,19 @@ def get_mirror(cfg, mirrortype, arch, cloud): # fallback to search_dns if specified if mirror is None: # list of mirrors to try to resolve - mirror = search_for_mirror_dns(mcfg.get("search_dns", None), - mirrortype, cfg, cloud) + mirror = search_for_mirror_dns( + mcfg.get("search_dns", None), mirrortype, cfg, cloud + ) return mirror def find_apt_mirror_info(cfg, cloud, arch=None): """find_apt_mirror_info - find an apt_mirror given the cfg provided. - It can check for separate config of primary and security mirrors - If only primary is given security is assumed to be equal to primary - If the generic apt_mirror is given that is defining for both + find an apt_mirror given the cfg provided. + It can check for separate config of primary and security mirrors + If only primary is given security is assumed to be equal to primary + If the generic apt_mirror is given that is defining for both """ if arch is None: @@ -1105,32 +1159,35 @@ def find_apt_mirror_info(cfg, cloud, arch=None): def apply_apt_config(cfg, proxy_fname, config_fname): """apply_apt_config - Applies any apt*proxy config from if specified + Applies any apt*proxy config from if specified """ # Set up any apt proxy - cfgs = (('proxy', 'Acquire::http::Proxy "%s";'), - ('http_proxy', 'Acquire::http::Proxy "%s";'), - ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'), - ('https_proxy', 'Acquire::https::Proxy "%s";')) + cfgs = ( + ("proxy", 'Acquire::http::Proxy "%s";'), + ("http_proxy", 'Acquire::http::Proxy "%s";'), + ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'), + ("https_proxy", 'Acquire::https::Proxy "%s";'), + ) proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] if len(proxies): LOG.debug("write apt proxy info to %s", proxy_fname) - util.write_file(proxy_fname, '\n'.join(proxies) + '\n') + util.write_file(proxy_fname, "\n".join(proxies) + "\n") elif os.path.isfile(proxy_fname): util.del_file(proxy_fname) LOG.debug("no apt proxy configured, removed %s", proxy_fname) - if cfg.get('conf', None): + if cfg.get("conf", None): LOG.debug("write apt config info to %s", config_fname) - util.write_file(config_fname, cfg.get('conf')) + util.write_file(config_fname, cfg.get("conf")) elif os.path.isfile(config_fname): util.del_file(config_fname) LOG.debug("no apt config configured, removed %s", config_fname) -def apt_key(command, output_file=None, data=None, hardened=False, - human_output=True): +def apt_key( + command, output_file=None, data=None, hardened=False, human_output=True +): """apt-key replacement commands implemented: 'add', 'list', 'finger' @@ -1153,32 +1210,36 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else [] for file in os.listdir(APT_TRUSTED_GPG_DIR): - if file.endswith('.gpg') or file.endswith('.asc'): + if file.endswith(".gpg") or file.endswith(".asc"): key_files.append(APT_TRUSTED_GPG_DIR + file) - return key_files if key_files else '' + return key_files if key_files else "" def apt_key_add(): """apt-key add returns filepath to new keyring, or '/dev/null' when an error occurs """ - file_name = '/dev/null' + file_name = "/dev/null" if not output_file: util.logexc( - LOG, 'Unknown filename, failed to add key: "{}"'.format(data)) + LOG, 'Unknown filename, failed to add key: "{}"'.format(data) + ) else: try: - key_dir = \ + key_dir = ( CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR + ) stdout = gpg.dearmor(data) - file_name = '{}{}.gpg'.format(key_dir, output_file) + file_name = "{}{}.gpg".format(key_dir, output_file) util.write_file(file_name, stdout) except subp.ProcessExecutionError: - util.logexc(LOG, 'Gpg error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Gpg error, failed to add key: {}".format(data) + ) except UnicodeDecodeError: - util.logexc(LOG, 'Decode error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Decode error, failed to add key: {}".format(data) + ) return file_name def apt_key_list(): @@ -1193,19 +1254,20 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_list.append(gpg.list(key_file, human_output=human_output)) except subp.ProcessExecutionError as error: LOG.warning('Failed to list key "%s": %s', key_file, error) - return '\n'.join(key_list) + return "\n".join(key_list) - if command == 'add': + if command == "add": return apt_key_add() - elif command == 'finger' or command == 'list': + elif command == "finger" or command == "list": return apt_key_list() else: raise ValueError( - 'apt_key() commands add, list, and finger are currently supported') + "apt_key() commands add, list, and finger are currently supported" + ) CONFIG_CLEANERS = { - 'cloud-init': clean_cloud_init, + "cloud-init": clean_cloud_init, } # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index aa186ce2..569849d1 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -29,17 +29,19 @@ not recommended. apt_pipelining: """ -from cloudinit.settings import PER_INSTANCE from cloudinit import util +from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" -APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" - 'Acquire::http::Pipeline-Depth "%s";\n') +APT_PIPE_TPL = ( + "//Written by cloud-init per 'apt_pipelining'\n" + 'Acquire::http::Pipeline-Depth "%s";\n' +) # Acquire::http::Pipeline-Depth can be a value # from 0 to 5 indicating how many outstanding requests APT should send. @@ -49,7 +51,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", "os") apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -69,4 +71,5 @@ def write_apt_snippet(setting, log, f_name): util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 06f7a26e..bff11a24 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,11 +12,9 @@ import os from textwrap import dedent +from cloudinit import subp, temp_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import temp_utils -from cloudinit import subp -from cloudinit import util frequency = PER_ALWAYS @@ -26,13 +24,14 @@ frequency = PER_ALWAYS # configuration options before actually attempting to deploy with said # configuration. -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_bootcmd', - 'name': 'Bootcmd', - 'title': 'Run arbitrary commands early in the boot process', - 'description': dedent("""\ + "id": "cc_bootcmd", + "name": "Bootcmd", + "title": "Run arbitrary commands early in the boot process", + "description": dedent( + """\ This module runs arbitrary commands very early in the boot process, only slightly after a boothook would run. This is very similar to a boothook, but more user friendly. The environment variable @@ -48,31 +47,37 @@ meta = { when writing files, do not use /tmp dir as it races with systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. - """), - 'distros': distros, - 'examples': [dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] - """)], - 'frequency': PER_ALWAYS, + """ + ) + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'bootcmd': { - 'type': 'array', - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] + "type": "object", + "properties": { + "bootcmd": { + "type": "array", + "items": { + "oneOf": [ + {"type": "array", "items": {"type": "string"}}, + {"type": "string"}, + ] }, - 'additionalItems': False, # Reject items of non-string non-list - 'additionalProperties': False, - 'minItems': 1, + "additionalItems": False, # Reject items of non-string non-list + "additionalProperties": False, + "minItems": 1, } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -81,8 +86,9 @@ __doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): if "bootcmd" not in cfg: - log.debug(("Skipping module named %s," - " no 'bootcmd' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'bootcmd' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) @@ -99,11 +105,12 @@ def handle(name, cfg, cloud, log, _args): env = os.environ.copy() iid = cloud.get_instance_id() if iid: - env['INSTANCE_ID'] = str(iid) - cmd = ['/bin/sh', tmpf.name] + env["INSTANCE_ID"] = str(iid) + cmd = ["/bin/sh", tmpf.name] subp.subp(cmd, env=env, capture=False) except Exception: util.logexc(log, "Failed to run bootcmd module %s", name) raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 9fdaeba1..53b6d0c8 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -38,11 +38,10 @@ Valid configuration options for this module are: byobu_by_default: """ +from cloudinit import subp, util from cloudinit.distros import ug_util -from cloudinit import subp -from cloudinit import util -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def handle(name, cfg, cloud, log, args): @@ -58,8 +57,14 @@ def handle(name, cfg, cloud, log, args): if value == "user" or value == "system": value = "enable-%s" % value - valid = ("enable-user", "enable-system", "enable", - "disable-user", "disable-system", "disable") + valid = ( + "enable-user", + "enable-system", + "enable", + "disable-user", + "disable-system", + "disable", + ) if value not in valid: log.warning("Unknown value %s for byobu_by_default", value) @@ -81,13 +86,16 @@ def handle(name, cfg, cloud, log, args): (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if not user: - log.warning(("No default byobu user provided, " - "can not launch %s for the default user"), bl_inst) + log.warning( + "No default byobu user provided, " + "can not launch %s for the default user", + bl_inst, + ) else: - shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) + shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: - shcmd += "echo \"%s\" | debconf-set-selections" % dc_val + shcmd += 'echo "%s" | debconf-set-selections' % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " @@ -96,4 +104,5 @@ def handle(name, cfg, cloud, log, args): log.debug("Setting byobu to %s", value) subp.subp(cmd, capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index bd7bead9..9de065ab 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -41,28 +41,27 @@ can be removed from the system with the configuration option import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util DEFAULT_CONFIG = { - 'ca_cert_path': '/usr/share/ca-certificates/', - 'ca_cert_filename': 'cloud-init-ca-certs.crt', - 'ca_cert_config': '/etc/ca-certificates.conf', - 'ca_cert_system_path': '/etc/ssl/certs/', - 'ca_cert_update_cmd': ['update-ca-certificates'] + "ca_cert_path": "/usr/share/ca-certificates/", + "ca_cert_filename": "cloud-init-ca-certs.crt", + "ca_cert_config": "/etc/ca-certificates.conf", + "ca_cert_system_path": "/etc/ssl/certs/", + "ca_cert_update_cmd": ["update-ca-certificates"], } DISTRO_OVERRIDES = { - 'rhel': { - 'ca_cert_path': '/usr/share/pki/ca-trust-source/', - 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', - 'ca_cert_config': None, - 'ca_cert_system_path': '/etc/pki/ca-trust/', - 'ca_cert_update_cmd': ['update-ca-trust'] + "rhel": { + "ca_cert_path": "/usr/share/pki/ca-trust-source/", + "ca_cert_filename": "anchors/cloud-init-ca-certs.crt", + "ca_cert_config": None, + "ca_cert_system_path": "/etc/pki/ca-trust/", + "ca_cert_update_cmd": ["update-ca-trust"], } } -distros = ['alpine', 'debian', 'ubuntu', 'rhel'] +distros = ["alpine", "debian", "ubuntu", "rhel"] def _distro_ca_certs_configs(distro_name): @@ -72,8 +71,9 @@ def _distro_ca_certs_configs(distro_name): @returns: Dict of distro configurations for ca-cert. """ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) - cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], - cfg['ca_cert_filename']) + cfg["ca_cert_full_path"] = os.path.join( + cfg["ca_cert_path"], cfg["ca_cert_filename"] + ) return cfg @@ -83,7 +83,7 @@ def update_ca_certs(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) + subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False) def add_ca_certs(distro_cfg, certs): @@ -98,9 +98,9 @@ def add_ca_certs(distro_cfg, certs): return # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(distro_cfg['ca_cert_full_path'], - cert_file_contents, - mode=0o644) + util.write_file( + distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644 + ) update_cert_config(distro_cfg) @@ -110,23 +110,27 @@ def update_cert_config(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - if distro_cfg['ca_cert_config'] is None: + if distro_cfg["ca_cert_config"] is None: return - if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + if os.stat(distro_cfg["ca_cert_config"]).st_size == 0: # If the CA_CERT_CONFIG file is empty (i.e. all existing # CA certs have been deleted) then simply output a single # line with the cloud-init cert filename. - out = "%s\n" % distro_cfg['ca_cert_filename'] + out = "%s\n" % distro_cfg["ca_cert_filename"] else: # Append cert filename to CA_CERT_CONFIG file. # We have to strip the content because blank lines in the file # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(distro_cfg['ca_cert_config']) - cr_cont = '\n'.join([line for line in orig.splitlines() - if line != distro_cfg['ca_cert_filename']]) - out = "%s\n%s\n" % (cr_cont.rstrip(), - distro_cfg['ca_cert_filename']) - util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + orig = util.load_file(distro_cfg["ca_cert_config"]) + cr_cont = "\n".join( + [ + line + for line in orig.splitlines() + if line != distro_cfg["ca_cert_filename"] + ] + ) + out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"]) + util.write_file(distro_cfg["ca_cert_config"], out, omode="wb") def remove_default_ca_certs(distro_name, distro_cfg): @@ -137,14 +141,15 @@ def remove_default_ca_certs(distro_name, distro_cfg): @param distro_name: String providing the distro class name. @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(distro_cfg['ca_cert_path']) - util.delete_dir_contents(distro_cfg['ca_cert_system_path']) - util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) + util.delete_dir_contents(distro_cfg["ca_cert_path"]) + util.delete_dir_contents(distro_cfg["ca_cert_system_path"]) + util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644) - if distro_name in ['debian', 'ubuntu']: + if distro_name in ["debian", "ubuntu"]: debconf_sel = ( - "ca-certificates ca-certificates/trust_new_crts " + "select no") - subp.subp(('debconf-set-selections', '-'), debconf_sel) + "ca-certificates ca-certificates/trust_new_crts " + "select no" + ) + subp.subp(("debconf-set-selections", "-"), debconf_sel) def handle(name, cfg, cloud, log, _args): @@ -159,11 +164,13 @@ def handle(name, cfg, cloud, log, _args): """ # If there isn't a ca-certs section in the configuration don't do anything if "ca-certs" not in cfg: - log.debug(("Skipping module named %s," - " no 'ca-certs' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'ca-certs' key in configuration", + name, + ) return - ca_cert_cfg = cfg['ca-certs'] + ca_cert_cfg = cfg["ca-certs"] distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system @@ -183,4 +190,5 @@ def handle(name, cfg, cloud, log, _args): log.debug("Updating certificates") update_ca_certs(distro_cfg) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index ed734d1c..67889683 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -13,87 +13,91 @@ import json import os from textwrap import dedent -from cloudinit import subp +from cloudinit import subp, temp_utils, templater, url_helper, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema -from cloudinit import templater -from cloudinit import temp_utils -from cloudinit import url_helper -from cloudinit import util from cloudinit.settings import PER_ALWAYS - RUBY_VERSION_DEFAULT = "1.8" -CHEF_DIRS = tuple([ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', -]) -REQUIRED_CHEF_DIRS = tuple([ - '/etc/chef', -]) +CHEF_DIRS = tuple( + [ + "/etc/chef", + "/var/log/chef", + "/var/lib/chef", + "/var/cache/chef", + "/var/backups/chef", + "/var/run/chef", + ] +) +REQUIRED_CHEF_DIRS = tuple( + [ + "/etc/chef", + ] +) # Used if fetching chef from a omnibus style package OMNIBUS_URL = "https://www.chef.io/chef/install.sh" OMNIBUS_URL_RETRIES = 5 -CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' -CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret' -CHEF_ENVIRONMENT = '_default' -CHEF_FB_PATH = '/etc/chef/firstboot.json' +CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem" +CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret" +CHEF_ENVIRONMENT = "_default" +CHEF_FB_PATH = "/etc/chef/firstboot.json" CHEF_RB_TPL_DEFAULTS = { # These are ruby symbols... - 'ssl_verify_mode': ':verify_none', - 'log_level': ':info', + "ssl_verify_mode": ":verify_none", + "log_level": ":info", # These are not symbols... - 'log_location': '/var/log/chef/client.log', - 'validation_key': CHEF_VALIDATION_PEM_PATH, - 'validation_cert': None, - 'client_key': '/etc/chef/client.pem', - 'json_attribs': CHEF_FB_PATH, - 'file_cache_path': '/var/cache/chef', - 'file_backup_path': '/var/backups/chef', - 'pid_file': '/var/run/chef/client.pid', - 'show_time': True, - 'encrypted_data_bag_secret': None, + "log_location": "/var/log/chef/client.log", + "validation_key": CHEF_VALIDATION_PEM_PATH, + "validation_cert": None, + "client_key": "/etc/chef/client.pem", + "json_attribs": CHEF_FB_PATH, + "file_cache_path": "/var/cache/chef", + "file_backup_path": "/var/backups/chef", + "pid_file": "/var/run/chef/client.pid", + "show_time": True, + "encrypted_data_bag_secret": None, } -CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) -CHEF_RB_TPL_PATH_KEYS = frozenset([ - 'log_location', - 'validation_key', - 'client_key', - 'file_cache_path', - 'json_attribs', - 'pid_file', - 'encrypted_data_bag_secret', -]) +CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"]) +CHEF_RB_TPL_PATH_KEYS = frozenset( + [ + "log_location", + "validation_key", + "client_key", + "file_cache_path", + "json_attribs", + "pid_file", + "encrypted_data_bag_secret", + ] +) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS) -CHEF_RB_TPL_KEYS.extend([ - 'server_url', - 'node_name', - 'environment', - 'validation_name', - 'chef_license', -]) +CHEF_RB_TPL_KEYS.extend( + [ + "server_url", + "node_name", + "environment", + "validation_name", + "chef_license", + ] +) CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) -CHEF_RB_PATH = '/etc/chef/client.rb' -CHEF_EXEC_PATH = '/usr/bin/chef-client' -CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) +CHEF_RB_PATH = "/etc/chef/client.rb" +CHEF_EXEC_PATH = "/usr/bin/chef-client" +CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"]) frequency = PER_ALWAYS distros = ["all"] meta = { - 'id': 'cc_chef', - 'name': 'Chef', - 'title': 'module that configures, starts and installs chef', - 'description': dedent("""\ + "id": "cc_chef", + "name": "Chef", + "title": "module that configures, starts and installs chef", + "description": dedent( + """\ This module enables chef to be installed (from packages, gems, or from omnibus). Before this occurs, chef configuration is written to disk (validation.pem, client.pem, firstboot.json, @@ -101,9 +105,12 @@ meta = { /var/log/chef and so-on). If configured, chef will be installed and started in either daemon or non-daemon mode. If run in non-daemon mode, post run actions are executed to do - finishing activities such as removing validation.pem."""), - 'distros': distros, - 'examples': [dedent(""" + finishing activities such as removing validation.pem.""" + ), + "distros": distros, + "examples": [ + dedent( + """ chef: directories: - /etc/chef @@ -124,180 +131,237 @@ meta = { omnibus_url_retries: 2 server_url: https://chef.yourorg.com:4000 ssl_verify_mode: :verify_peer - validation_name: yourorg-validator""")], - 'frequency': frequency, + validation_name: yourorg-validator""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'chef': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'directories': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "chef": { + "type": "object", + "additionalProperties": False, + "properties": { + "directories": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Create the necessary directories for chef to run. By default, it creates the following directories: - {chef_dirs}""").format( + {chef_dirs}""" + ).format( chef_dirs="\n".join( [" - ``{}``".format(d) for d in CHEF_DIRS] ) - ) + ), }, - 'validation_cert': { - 'type': 'string', - 'description': dedent("""\ + "validation_cert": { + "type": "string", + "description": dedent( + """\ Optional string to be written to file validation_key. Special value ``system`` means set use existing file. - """) + """ + ), }, - 'validation_key': { - 'type': 'string', - 'default': CHEF_VALIDATION_PEM_PATH, - 'description': dedent("""\ + "validation_key": { + "type": "string", + "default": CHEF_VALIDATION_PEM_PATH, + "description": dedent( + """\ Optional path for validation_cert. default to - ``{}``.""".format(CHEF_VALIDATION_PEM_PATH)) + ``{}``.""".format( + CHEF_VALIDATION_PEM_PATH + ) + ), }, - 'firstboot_path': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "firstboot_path": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults - to ``{}``.""".format(CHEF_FB_PATH)) + to ``{}``.""".format( + CHEF_FB_PATH + ) + ), }, - 'exec': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "exec": { + "type": "boolean", + "default": False, + "description": dedent( + """\ define if we should run or not run chef (defaults to false, unless a gem installed is requested where this - will then default to true).""") + will then default to true).""" + ), }, - 'client_key': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['client_key'], - 'description': dedent("""\ + "client_key": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["client_key"], + "description": dedent( + """\ Optional path for client_cert. default to - ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key'])) + ``{}``.""".format( + CHEF_RB_TPL_DEFAULTS["client_key"] + ) + ), }, - 'encrypted_data_bag_secret': { - 'type': 'string', - 'default': None, - 'description': dedent("""\ + "encrypted_data_bag_secret": { + "type": "string", + "default": None, + "description": dedent( + """\ Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to None, meaning that chef will have to look at the path ``{}`` for it. - """.format(CHEF_ENCRYPTED_DATA_BAG_PATH)) + """.format( + CHEF_ENCRYPTED_DATA_BAG_PATH + ) + ), }, - 'environment': { - 'type': 'string', - 'default': CHEF_ENVIRONMENT, - 'description': dedent("""\ + "environment": { + "type": "string", + "default": CHEF_ENVIRONMENT, + "description": dedent( + """\ Specifies which environment chef will use. By default, it will use the ``{}`` configuration. - """.format(CHEF_ENVIRONMENT)) + """.format( + CHEF_ENVIRONMENT + ) + ), }, - 'file_backup_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'], - 'description': dedent("""\ + "file_backup_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_backup_path"], + "description": dedent( + """\ Specifies the location in which backup files are stored. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_backup_path'])) + CHEF_RB_TPL_DEFAULTS["file_backup_path"] + ) + ), }, - 'file_cache_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'], - 'description': dedent("""\ + "file_cache_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_cache_path"], + "description": dedent( + """\ Specifies the location in which chef cache files will be saved. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_cache_path'])) + CHEF_RB_TPL_DEFAULTS["file_cache_path"] + ) + ), }, - 'json_attribs': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "json_attribs": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Specifies the location in which some chef json data is stored. By default, it uses the - ``{}`` location.""".format(CHEF_FB_PATH)) + ``{}`` location.""".format( + CHEF_FB_PATH + ) + ), }, - 'log_level': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_level'], - 'description': dedent("""\ + "log_level": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_level"], + "description": dedent( + """\ Defines the level of logging to be stored in the log file. By default this value is set to ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['log_level'])) + """.format( + CHEF_RB_TPL_DEFAULTS["log_level"] + ) + ), }, - 'log_location': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_location'], - 'description': dedent("""\ + "log_location": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_location"], + "description": dedent( + """\ Specifies the location of the chef lof file. By default, the location is specified at ``{}``.""".format( - CHEF_RB_TPL_DEFAULTS['log_location'])) + CHEF_RB_TPL_DEFAULTS["log_location"] + ) + ), }, - 'node_name': { - 'type': 'string', - 'description': dedent("""\ + "node_name": { + "type": "string", + "description": dedent( + """\ The name of the node to run. By default, we will - use th instance id as the node name.""") + use th instance id as the node name.""" + ), }, - 'omnibus_url': { - 'type': 'string', - 'default': OMNIBUS_URL, - 'description': dedent("""\ + "omnibus_url": { + "type": "string", + "default": OMNIBUS_URL, + "description": dedent( + """\ Omnibus URL if chef should be installed through Omnibus. By default, it uses the - ``{}``.""".format(OMNIBUS_URL)) + ``{}``.""".format( + OMNIBUS_URL + ) + ), }, - 'omnibus_url_retries': { - 'type': 'integer', - 'default': OMNIBUS_URL_RETRIES, - 'description': dedent("""\ + "omnibus_url_retries": { + "type": "integer", + "default": OMNIBUS_URL_RETRIES, + "description": dedent( + """\ The number of retries that will be attempted to reach - the Omnibus URL""") + the Omnibus URL""" + ), }, - 'omnibus_version': { - 'type': 'string', - 'description': dedent("""\ + "omnibus_version": { + "type": "string", + "description": dedent( + """\ Optional version string to require for omnibus - install.""") + install.""" + ), }, - 'pid_file': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['pid_file'], - 'description': dedent("""\ + "pid_file": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["pid_file"], + "description": dedent( + """\ The location in which a process identification number (pid) is saved. By default, it saves in the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['pid_file'])) + CHEF_RB_TPL_DEFAULTS["pid_file"] + ) + ), }, - 'server_url': { - 'type': 'string', - 'description': 'The URL for the chef server' + "server_url": { + "type": "string", + "description": "The URL for the chef server", }, - 'show_time': { - 'type': 'boolean', - 'default': True, - 'description': 'Show time in chef logs' + "show_time": { + "type": "boolean", + "default": True, + "description": "Show time in chef logs", }, - 'ssl_verify_mode': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'], - 'description': dedent("""\ + "ssl_verify_mode": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"], + "description": dedent( + """\ Set the verify mode for HTTPS requests. We can have two possible values for this parameter: @@ -306,67 +370,76 @@ schema = { - ``:verify_peer``: Validate all SSL certificates. By default, the parameter is set as ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'])) + """.format( + CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"] + ) + ), }, - 'validation_name': { - 'type': 'string', - 'description': dedent("""\ + "validation_name": { + "type": "string", + "description": dedent( + """\ The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during - the initial Chef Infra Client run.""") + the initial Chef Infra Client run.""" + ), }, - 'force_install': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "force_install": { + "type": "boolean", + "default": False, + "description": dedent( + """\ If set to ``True``, forces chef installation, even - if it is already installed.""") + if it is already installed.""" + ), }, - 'initial_attributes': { - 'type': 'object', - 'items': { - 'type': 'string' - }, - 'description': dedent("""\ + "initial_attributes": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Specify a list of initial attributes used by the - cookbooks.""") + cookbooks.""" + ), }, - 'install_type': { - 'type': 'string', - 'default': 'packages', - 'description': dedent("""\ + "install_type": { + "type": "string", + "default": "packages", + "description": dedent( + """\ The type of installation for chef. It can be one of the following values: - ``packages`` - ``gems`` - - ``omnibus``""") + - ``omnibus``""" + ), }, - 'run_list': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'description': 'A run list for a first boot json.' + "run_list": { + "type": "array", + "items": {"type": "string"}, + "description": "A run list for a first boot json.", }, "chef_license": { - 'type': 'string', - 'description': dedent("""\ + "type": "string", + "description": dedent( + """\ string that indicates if user accepts or not license - related to some of chef products""") - } - } + related to some of chef products""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): - delete_pem = util.get_cfg_option_bool(chef_cfg, - 'delete_validation_post_exec', - default=False) + delete_pem = util.get_cfg_option_bool( + chef_cfg, "delete_validation_post_exec", default=False + ) if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH): os.unlink(CHEF_VALIDATION_PEM_PATH) @@ -389,16 +462,20 @@ def get_template_params(iid, chef_cfg, log): else: params[k] = util.get_cfg_option_str(chef_cfg, k) # These ones are overwritten to be exact values... - params.update({ - 'generated_by': util.make_header(), - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', - default=iid), - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', - default='_default'), - # These two are mandatory... - 'server_url': chef_cfg['server_url'], - 'validation_name': chef_cfg['validation_name'], - }) + params.update( + { + "generated_by": util.make_header(), + "node_name": util.get_cfg_option_str( + chef_cfg, "node_name", default=iid + ), + "environment": util.get_cfg_option_str( + chef_cfg, "environment", default="_default" + ), + # These two are mandatory... + "server_url": chef_cfg["server_url"], + "validation_name": chef_cfg["validation_name"], + } + ) return params @@ -406,35 +483,38 @@ def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything - if 'chef' not in cfg: - log.debug(("Skipping module named %s," - " no 'chef' key in configuration"), name) + if "chef" not in cfg: + log.debug( + "Skipping module named %s, no 'chef' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) - chef_cfg = cfg['chef'] + chef_cfg = cfg["chef"] # Ensure the chef directories we use exist - chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') + chef_dirs = util.get_cfg_option_list(chef_cfg, "directories") if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) - vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH) - vcert = chef_cfg.get('validation_cert') + vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH) + vcert = chef_cfg.get("validation_cert") # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): - log.warning("chef validation_cert provided as 'system', but " - "validation_key path '%s' does not exist.", - vkey_path) + log.warning( + "chef validation_cert provided as 'system', but " + "validation_key path '%s' does not exist.", + vkey_path, + ) # Create the chef config from template - template_fn = cloud.get_template_filename('chef_client.rb') + template_fn = cloud.get_template_filename("chef_client.rb") if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) @@ -448,32 +528,33 @@ def handle(name, cfg, cloud, log, _args): util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warning("No template found, not rendering to %s", - CHEF_RB_PATH) + log.warning("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json - fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', - default=CHEF_FB_PATH) + fb_filename = util.get_cfg_option_str( + chef_cfg, "firstboot_path", default=CHEF_FB_PATH + ) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} - if 'run_list' in chef_cfg: - initial_json['run_list'] = chef_cfg['run_list'] - if 'initial_attributes' in chef_cfg: - initial_attributes = chef_cfg['initial_attributes'] + if "run_list" in chef_cfg: + initial_json["run_list"] = chef_cfg["run_list"] + if "initial_attributes" in chef_cfg: + initial_attributes = chef_cfg["initial_attributes"] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... - force_install = util.get_cfg_option_bool(chef_cfg, - 'force_install', default=False) + force_install = util.get_cfg_option_bool( + chef_cfg, "force_install", default=False + ) installed = subp.is_exe(CHEF_EXEC_PATH) if not installed or force_install: run = install_chef(cloud, chef_cfg, log) elif installed: - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) else: run = False if run: @@ -482,18 +563,21 @@ def handle(name, cfg, cloud, log, _args): def run_chef(chef_cfg, log): - log.debug('Running chef-client') + log.debug("Running chef-client") cmd = [CHEF_EXEC_PATH] - if 'exec_arguments' in chef_cfg: - cmd_args = chef_cfg['exec_arguments'] + if "exec_arguments" in chef_cfg: + cmd_args = chef_cfg["exec_arguments"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.append(cmd_args) else: - log.warning("Unknown type %s provided for chef" - " 'exec_arguments' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) @@ -507,16 +591,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): The 'args' argument to subp will be updated with the full path to the filename as the first argument. """ - basename = kwargs.pop('basename', "subp_blob") + basename = kwargs.pop("basename", "subp_blob") - if len(args) == 0 and 'args' not in kwargs: + if len(args) == 0 and "args" not in kwargs: args = [tuple()] # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: tmpf = os.path.join(tmpd, basename) - if 'args' in kwargs: - kwargs['args'] = [tmpf] + list(kwargs['args']) + if "args" in kwargs: + kwargs["args"] = [tmpf] + list(kwargs["args"]) else: args = list(args) args[0] = [tmpf] + args[0] @@ -543,36 +627,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): if omnibus_version is None: args = [] else: - args = ['-v', omnibus_version] + args = ["-v", omnibus_version] content = url_helper.readurl(url=url, retries=retries).contents return subp_blob_in_tempfile( - blob=content, args=args, - basename='chef-omnibus-install', capture=False) + blob=content, args=args, basename="chef-omnibus-install", capture=False + ) def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' - install_type = util.get_cfg_option_str(chef_cfg, 'install_type', - 'packages') - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + install_type = util.get_cfg_option_str( + chef_cfg, "install_type", "packages" + ) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) if install_type == "gems": # This will install and run the chef-client from gems - chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) - ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', - RUBY_VERSION_DEFAULT) + chef_version = util.get_cfg_option_str(chef_cfg, "version", None) + ruby_version = util.get_cfg_option_str( + chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT + ) install_chef_from_gems(ruby_version, chef_version, cloud.distro) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) - elif install_type == 'packages': + run = util.get_cfg_option_bool(chef_cfg, "exec", default=True) + elif install_type == "packages": # This will install and run the chef-client from packages - cloud.distro.install_packages(('chef',)) - elif install_type == 'omnibus': + cloud.distro.install_packages(("chef",)) + elif install_type == "omnibus": omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") install_chef_from_omnibus( url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), - omnibus_version=omnibus_version) + omnibus_version=omnibus_version, + ) else: log.warning("Unknown chef install type '%s'", install_type) run = False @@ -581,25 +668,47 @@ def install_chef(cloud, chef_cfg, log): def get_ruby_packages(version): # return a list of packages needed to install ruby at version - pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] + pkgs = ["ruby%s" % version, "ruby%s-dev" % version] if version == "1.8": - pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) + pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8")) return pkgs def install_chef_from_gems(ruby_version, chef_version, distro): distro.install_packages(get_ruby_packages(ruby_version)) - if not os.path.exists('/usr/bin/gem'): - util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') - if not os.path.exists('/usr/bin/ruby'): - util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') + if not os.path.exists("/usr/bin/gem"): + util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem") + if not os.path.exists("/usr/bin/ruby"): + util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby") if chef_version: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '-v %s' % chef_version, '--no-ri', - '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "-v %s" % chef_version, + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) else: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '--no-ri', '--no-rdoc', '--bindir', - '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 4d5a6aa2..d09fc129 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -30,18 +30,16 @@ location that this cloud-init has been configured with when running. import copy from io import StringIO -from cloudinit import type_utils -from cloudinit import util -from cloudinit import safeyaml +from cloudinit import safeyaml, type_utils, util -SKIP_KEYS = frozenset(['log_cfgs']) +SKIP_KEYS = frozenset(["log_cfgs"]) def _make_header(text): header = StringIO() header.write("-" * 80) header.write("\n") - header.write(text.center(80, ' ')) + header.write(text.center(80, " ")) header.write("\n") header.write("-" * 80) header.write("\n") @@ -56,17 +54,16 @@ def _dumps(obj): def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" - verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) + verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: - out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) + out_file = util.get_cfg_by_path(cfg, ("debug", "output")) if not verbose: - log.debug(("Skipping module named %s," - " verbose printing disabled"), name) + log.debug("Skipping module named %s, verbose printing disabled", name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) @@ -85,8 +82,9 @@ def handle(name, cfg, cloud, log, args): to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) - to_print.write("Datasource: %s\n" % - (type_utils.obj_name(cloud.datasource))) + to_print.write( + "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource)) + ) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) @@ -102,4 +100,5 @@ def handle(name, cfg, cloud, log, args): else: util.multi_log("".join(content_to_file), console=True, stderr=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 61c769b3..5e528e81 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -26,32 +26,35 @@ by default. disable_ec2_metadata: """ -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] -REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] +REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"] +REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: reject_cmd = None - if subp.which('ip'): + if subp.which("ip"): reject_cmd = REJECT_CMD_IP - elif subp.which('ifconfig'): + elif subp.which("ifconfig"): reject_cmd = REJECT_CMD_IF else: - log.error(('Neither "route" nor "ip" command found, unable to ' - 'manipulate routing table')) + log.error( + 'Neither "route" nor "ip" command found, unable to ' + "manipulate routing table" + ) return subp.subp(reject_cmd, capture=False) else: - log.debug(("Skipping module named %s," - " disabling the ec2 route not enabled"), name) + log.debug( + "Skipping module named %s, disabling the ec2 route not enabled", + name, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 440f05f1..4d527c7a 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -100,13 +100,13 @@ A label can be specified for the filesystem using replace_fs: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp import logging import os import shlex +from cloudinit import subp, util +from cloudinit.settings import PER_INSTANCE + frequency = PER_INSTANCE # Define the commands to use @@ -118,7 +118,7 @@ BLKDEV_CMD = subp.which("blockdev") PARTPROBE_CMD = subp.which("partprobe") WIPEFS_CMD = subp.which("wipefs") -LANG_C_ENV = {'LANG': 'C'} +LANG_C_ENV = {"LANG": "C"} LOG = logging.getLogger(__name__) @@ -145,9 +145,12 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new partition table/disk") - util.log_time(logfunc=LOG.debug, - msg="Creating partition on %s" % disk, - func=mkpart, args=(disk, definition)) + util.log_time( + logfunc=LOG.debug, + msg="Creating partition on %s" % disk, + func=mkpart, + args=(disk, definition), + ) except Exception as e: util.logexc(LOG, "Failed partitioning operation\n%s" % e) @@ -162,10 +165,13 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new filesystem.") - device = definition.get('device') - util.log_time(logfunc=LOG.debug, - msg="Creating fs for %s" % device, - func=mkfs, args=(definition,)) + device = definition.get("device") + util.log_time( + logfunc=LOG.debug, + msg="Creating fs for %s" % device, + func=mkfs, + args=(definition,), + ) except Exception as e: util.logexc(LOG, "Failed during filesystem operation\n%s" % e) @@ -178,16 +184,22 @@ def update_disk_setup_devices(disk_setup, tformer): if transformed is None or transformed == origname: continue if transformed in disk_setup: - LOG.info("Replacing %s in disk_setup for translation of %s", - origname, transformed) + LOG.info( + "Replacing %s in disk_setup for translation of %s", + origname, + transformed, + ) del disk_setup[transformed] disk_setup[transformed] = disk_setup[origname] if isinstance(disk_setup[transformed], dict): - disk_setup[transformed]['_origname'] = origname + disk_setup[transformed]["_origname"] = origname del disk_setup[origname] - LOG.debug("updated disk_setup device entry '%s' to '%s'", - origname, transformed) + LOG.debug( + "updated disk_setup device entry '%s' to '%s'", + origname, + transformed, + ) def update_fs_setup_devices(disk_setup, tformer): @@ -198,7 +210,7 @@ def update_fs_setup_devices(disk_setup, tformer): LOG.warning("entry in disk_setup not a dict: %s", definition) continue - origname = definition.get('device') + origname = definition.get("device") if origname is None: continue @@ -208,19 +220,24 @@ def update_fs_setup_devices(disk_setup, tformer): tformed = tformer(dev) if tformed is not None: dev = tformed - LOG.debug("%s is mapped to disk=%s part=%s", - origname, tformed, part) - definition['_origname'] = origname - definition['device'] = tformed + LOG.debug( + "%s is mapped to disk=%s part=%s", origname, tformed, part + ) + definition["_origname"] = origname + definition["device"] = tformed if part: # In origname with .N, N overrides 'partition' key. - if 'partition' in definition: - LOG.warning("Partition '%s' from dotted device name '%s' " - "overrides 'partition' key in %s", part, origname, - definition) - definition['_partition'] = definition['partition'] - definition['partition'] = part + if "partition" in definition: + LOG.warning( + "Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", + part, + origname, + definition, + ) + definition["_partition"] = definition["partition"] + definition["partition"] = part def value_splitter(values, start=None): @@ -232,7 +249,7 @@ def value_splitter(values, start=None): if start: _values = _values[start:] - for key, value in [x.split('=') for x in _values]: + for key, value in [x.split("=") for x in _values]: yield key, value @@ -251,11 +268,16 @@ def enumerate_disk(device, nodeps=False): name: the device name, i.e. sda """ - lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL', - device] + lsblk_cmd = [ + LSBLK_CMD, + "--pairs", + "--output", + "NAME,TYPE,FSTYPE,LABEL", + device, + ] if nodeps: - lsblk_cmd.append('--nodeps') + lsblk_cmd.append("--nodeps") info = None try: @@ -269,10 +291,10 @@ def enumerate_disk(device, nodeps=False): for part in parts: d = { - 'name': None, - 'type': None, - 'fstype': None, - 'label': None, + "name": None, + "type": None, + "fstype": None, + "label": None, } for key, value in value_splitter(part): @@ -303,9 +325,9 @@ def is_device_valid(name, partition=False): LOG.warning("Query against device %s failed", name) return False - if partition and d_type == 'part': + if partition and d_type == "part": return True - elif not partition and d_type == 'disk': + elif not partition and d_type == "disk": return True return False @@ -321,7 +343,7 @@ def check_fs(device): """ out, label, fs_type, uuid = None, None, None, None - blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device] + blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device] try: out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: @@ -332,11 +354,11 @@ def check_fs(device): if out: if len(out.splitlines()) == 1: for key, value in value_splitter(out, start=1): - if key.lower() == 'label': + if key.lower() == "label": label = value - elif key.lower() == 'type': + elif key.lower() == "type": fs_type = value - elif key.lower() == 'uuid': + elif key.lower() == "uuid": uuid = value return label, fs_type, uuid @@ -350,8 +372,14 @@ def is_filesystem(device): return fs_type -def find_device_node(device, fs_type=None, label=None, valid_targets=None, - label_match=True, replace_fs=None): +def find_device_node( + device, + fs_type=None, + label=None, + valid_targets=None, + label_match=True, + replace_fs=None, +): """ Find a device that is either matches the spec, or the first @@ -366,31 +394,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None, label = "" if not valid_targets: - valid_targets = ['disk', 'part'] + valid_targets = ["disk", "part"] raw_device_used = False for d in enumerate_disk(device): - if d['fstype'] == replace_fs and label_match is False: + if d["fstype"] == replace_fs and label_match is False: # We found a device where we want to replace the FS - return ('/dev/%s' % d['name'], False) + return ("/dev/%s" % d["name"], False) - if (d['fstype'] == fs_type and - ((label_match and d['label'] == label) or not label_match)): + if d["fstype"] == fs_type and ( + (label_match and d["label"] == label) or not label_match + ): # If we find a matching device, we return that - return ('/dev/%s' % d['name'], True) + return ("/dev/%s" % d["name"], True) - if d['type'] in valid_targets: + if d["type"] in valid_targets: - if d['type'] != 'disk' or d['fstype']: + if d["type"] != "disk" or d["fstype"]: raw_device_used = True - if d['type'] == 'disk': + if d["type"] == "disk": # Skip the raw disk, its the default pass - elif not d['fstype']: - return ('/dev/%s' % d['name'], False) + elif not d["fstype"]: + return ("/dev/%s" % d["name"], False) if not raw_device_used: return (device, False) @@ -433,7 +462,7 @@ def get_dyn_func(*args): if len(args) < 2: raise Exception("Unable to determine dynamic funcation name") - func_name = (args[0] % args[1]) + func_name = args[0] % args[1] func_args = args[2:] try: @@ -448,8 +477,8 @@ def get_dyn_func(*args): def get_hdd_size(device): try: - size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device]) - sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device]) + size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device]) + sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device]) except Exception as e: raise Exception("Failed to get %s size\n%s" % (device, e)) from e @@ -481,13 +510,13 @@ def check_partition_mbr_layout(device, layout): if device in _line[0]: # We don't understand extended partitions yet - if _line[-1].lower() in ['extended', 'empty']: + if _line[-1].lower() in ["extended", "empty"]: continue # Find the partition types type_label = None for x in sorted(range(1, len(_line)), reverse=True): - if _line[x].isdigit() and _line[x] != '/': + if _line[x].isdigit() and _line[x] != "/": type_label = _line[x] break @@ -496,7 +525,7 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): - prt_cmd = [SGDISK_CMD, '-p', device] + prt_cmd = [SGDISK_CMD, "-p", device] try: out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: @@ -522,7 +551,7 @@ def check_partition_gpt_layout(device, layout): # Number Start (sector) End (sector) Size Code Name # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: - if line.strip().startswith('Number'): + if line.strip().startswith("Number"): break codes = [line.strip().split()[5] for line in out_lines] @@ -545,10 +574,16 @@ def check_partition_layout(table_type, device, layout): function called check_partition_%s_layout """ found_layout = get_dyn_func( - "check_partition_%s_layout", table_type, device, layout) - - LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", - table_type, device, layout, found_layout) + "check_partition_%s_layout", table_type, device, layout + ) + + LOG.debug( + "called check_partition_%s_layout(%s, %s), returned: %s", + table_type, + device, + layout, + found_layout, + ) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -559,10 +594,12 @@ def check_partition_layout(table_type, device, layout): elif len(found_layout) == len(layout): # This just makes sure that the number of requested # partitions and the type labels are right - layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None - for x in layout] - LOG.debug("Layout types=%s. Found types=%s", - layout_types, found_layout) + layout_types = [ + str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout + ] + LOG.debug( + "Layout types=%s. Found types=%s", layout_types, found_layout + ) for itype, ftype in zip(layout_types, found_layout): if itype is not None and str(ftype) != str(itype): return False @@ -588,8 +625,9 @@ def get_partition_mbr_layout(size, layout): # Create a single partition return "0," - if ((len(layout) == 0 and isinstance(layout, list)) or - not isinstance(layout, list)): + if (len(layout) == 0 and isinstance(layout, list)) or not isinstance( + layout, list + ): raise Exception("Partition layout is invalid") last_part_num = len(layout) @@ -617,8 +655,10 @@ def get_partition_mbr_layout(size, layout): sfdisk_definition = "\n".join(part_definition) if len(part_definition) > 4: - raise Exception("Calculated partition definition is too big\n%s" % - sfdisk_definition) + raise Exception( + "Calculated partition definition is too big\n%s" + % sfdisk_definition + ) return sfdisk_definition @@ -632,14 +672,15 @@ def get_partition_gpt_layout(size, layout): if isinstance(partition, list): if len(partition) != 2: raise Exception( - "Partition was incorrectly defined: %s" % partition) + "Partition was incorrectly defined: %s" % partition + ) percent, partition_type = partition else: percent = partition partition_type = None part_size = int(float(size) * (float(percent) / 100)) - partition_specs.append((partition_type, [0, '+{}'.format(part_size)])) + partition_specs.append((partition_type, [0, "+{}".format(part_size)])) # The last partition should use up all remaining space partition_specs[-1][-1][-1] = 0 @@ -649,7 +690,7 @@ def get_partition_gpt_layout(size, layout): def purge_disk_ptable(device): # wipe the first and last megabyte of a disk (or file) # gpt stores partition table both at front and at end. - null = '\0' + null = "\0" start_len = 1024 * 1024 end_len = 1024 * 1024 with open(device, "rb+") as fp: @@ -668,14 +709,14 @@ def purge_disk(device): # wipe any file systems first for d in enumerate_disk(device): - if d['type'] not in ["disk", "crypt"]: - wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] + if d["type"] not in ["disk", "crypt"]: + wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]] try: - LOG.info("Purging filesystem on /dev/%s", d['name']) + LOG.info("Purging filesystem on /dev/%s", d["name"]) subp.subp(wipefs_cmd) except Exception as e: raise Exception( - "Failed FS purge of /dev/%s" % d['name'] + "Failed FS purge of /dev/%s" % d["name"] ) from e purge_disk_ptable(device) @@ -701,7 +742,7 @@ def read_parttbl(device): if PARTPROBE_CMD is not None: probe_cmd = [PARTPROBE_CMD, device] else: - probe_cmd = [BLKDEV_CMD, '--rereadpt', device] + probe_cmd = [BLKDEV_CMD, "--rereadpt", device] util.udevadm_settle() try: subp.subp(probe_cmd) @@ -730,17 +771,24 @@ def exec_mkpart_mbr(device, layout): def exec_mkpart_gpt(device, layout): try: - subp.subp([SGDISK_CMD, '-Z', device]) + subp.subp([SGDISK_CMD, "-Z", device]) for index, (partition_type, (start, end)) in enumerate(layout): index += 1 - subp.subp([SGDISK_CMD, - '-n', '{}:{}:{}'.format(index, start, end), device]) + subp.subp( + [ + SGDISK_CMD, + "-n", + "{}:{}:{}".format(index, start, end), + device, + ] + ) if partition_type is not None: # convert to a 4 char (or more) string right padded with 0 # 82 -> 8200. 'Linux' -> 'Linux' pinput = str(partition_type).ljust(4, "0") subp.subp( - [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) + [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device] + ) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -766,8 +814,10 @@ def assert_and_settle_device(device): if not os.path.exists(device): util.udevadm_settle() if not os.path.exists(device): - raise RuntimeError("Device %s did not exist and was not created " - "with a udevadm settle." % device) + raise RuntimeError( + "Device %s did not exist and was not created " + "with a udevadm settle." % device + ) # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have @@ -794,9 +844,9 @@ def mkpart(device, definition): device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) - overwrite = definition.get('overwrite', False) - layout = definition.get('layout', False) - table_type = definition.get('table_type', 'mbr') + overwrite = definition.get("overwrite", False) + layout = definition.get("layout", False) + table_type = definition.get("table_type", "mbr") # Check if the default device is a partition or not LOG.debug("Checking against default devices") @@ -809,7 +859,8 @@ def mkpart(device, definition): LOG.debug("Checking if device %s is a valid device", device) if not is_device_valid(device): raise Exception( - 'Device {device} is not a disk device!'.format(device=device)) + "Device {device} is not a disk device!".format(device=device) + ) # Remove the partition table entries if isinstance(layout, str) and layout.lower() == "remove": @@ -845,21 +896,21 @@ def lookup_force_flag(fs): A force flag might be -F or -F, this look it up """ flags = { - 'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - 'swap': '-f', + "ext": "-F", + "btrfs": "-f", + "xfs": "-f", + "reiserfs": "-f", + "swap": "-f", } - if 'ext' in fs.lower(): - fs = 'ext' + if "ext" in fs.lower(): + fs = "ext" if fs.lower() in flags: return flags[fs] LOG.warning("Force flag for %s is unknown.", fs) - return '' + return "" def mkfs(fs_cfg): @@ -883,14 +934,14 @@ def mkfs(fs_cfg): When 'cmd' is provided then no other parameter is required. """ - label = fs_cfg.get('label') - device = fs_cfg.get('device') - partition = str(fs_cfg.get('partition', 'any')) - fs_type = fs_cfg.get('filesystem') - fs_cmd = fs_cfg.get('cmd', []) - fs_opts = fs_cfg.get('extra_opts', []) - fs_replace = fs_cfg.get('replace_fs', False) - overwrite = fs_cfg.get('overwrite', False) + label = fs_cfg.get("label") + device = fs_cfg.get("device") + partition = str(fs_cfg.get("partition", "any")) + fs_type = fs_cfg.get("filesystem") + fs_cmd = fs_cfg.get("cmd", []) + fs_opts = fs_cfg.get("extra_opts", []) + fs_replace = fs_cfg.get("replace_fs", False) + overwrite = fs_cfg.get("overwrite", False) # ensure that we get a real device rather than a symbolic link assert_and_settle_device(device) @@ -903,14 +954,19 @@ def mkfs(fs_cfg): # Handle manual definition of partition if partition.isdigit(): device = "%s%s" % (device, partition) - LOG.debug("Manual request of partition %s for %s", - partition, device) + LOG.debug( + "Manual request of partition %s for %s", partition, device + ) # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", - device, check_label, check_fstype) + LOG.debug( + "Device '%s' has check_label='%s' check_fstype=%s", + device, + check_label, + check_fstype, + ) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -924,19 +980,23 @@ def mkfs(fs_cfg): else: LOG.debug("Device %s is cleared for formating", device) - elif partition and str(partition).lower() in ('auto', 'any'): + elif partition and str(partition).lower() in ("auto", "any"): # For auto devices, we match if the filesystem does exist odevice = device LOG.debug("Identifying device to create %s filesytem on", label) # any mean pick the first match on the device with matching fs_type label_match = True - if partition.lower() == 'any': + if partition.lower() == "any": label_match = False - device, reuse = find_device_node(device, fs_type=fs_type, label=label, - label_match=label_match, - replace_fs=fs_replace) + device, reuse = find_device_node( + device, + fs_type=fs_type, + label=label, + label_match=label_match, + replace_fs=fs_replace, + ) LOG.debug("Automatic device for %s identified as %s", odevice, device) if reuse: @@ -947,18 +1007,25 @@ def mkfs(fs_cfg): LOG.debug("Replacing file system on %s as instructed.", device) if not device: - LOG.debug("No device aviable that matches request. " - "Skipping fs creation for %s", fs_cfg) + LOG.debug( + "No device aviable that matches request. " + "Skipping fs creation for %s", + fs_cfg, + ) return - elif not partition or str(partition).lower() == 'none': + elif not partition or str(partition).lower() == "none": LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") return - LOG.debug("File system type '%s' with label '%s' will be created on %s", - fs_type, label, device) + LOG.debug( + "File system type '%s' with label '%s' will be created on %s", + fs_type, + label, + device, + ) # Make sure the device is defined if not device: @@ -969,26 +1036,29 @@ def mkfs(fs_cfg): if not (fs_type or fs_cmd): raise Exception( "No way to create filesystem '{label}'. fs_type or fs_cmd " - "must be set.".format(label=label)) + "must be set.".format(label=label) + ) # Create the commands shell = False if fs_cmd: - fs_cmd = fs_cfg['cmd'] % { - 'label': label, - 'filesystem': fs_type, - 'device': device, + fs_cmd = fs_cfg["cmd"] % { + "label": label, + "filesystem": fs_type, + "device": device, } shell = True if overwrite: LOG.warning( "fs_setup:overwrite ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) if fs_opts: LOG.warning( "fs_setup:extra_opts ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) else: # Find the mkfs command mkfs_cmd = subp.which("mkfs.%s" % fs_type) @@ -996,8 +1066,11 @@ def mkfs(fs_cfg): mkfs_cmd = subp.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", - fs_type, fs_type) + LOG.warning( + "Cannot create fstype '%s'. No mkfs.%s command", + fs_type, + fs_type, + ) return fs_cmd = [mkfs_cmd, device] @@ -1022,4 +1095,5 @@ def mkfs(fs_cfg): except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 40eee052..a928082b 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -24,12 +24,12 @@ user configuration should be required. import os from cloudinit import log as logging -from cloudinit.settings import PER_ALWAYS from cloudinit import subp +from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] LOG = logging.getLogger(__name__) @@ -39,15 +39,18 @@ def is_upstart_system(): return False myenv = os.environ.copy() - if 'UPSTART_SESSION' in myenv: - del myenv['UPSTART_SESSION'] - check_cmd = ['initctl', 'version'] + if "UPSTART_SESSION" in myenv: + del myenv["UPSTART_SESSION"] + check_cmd = ["initctl", "version"] try: (out, _err) = subp.subp(check_cmd, env=myenv) - return 'upstart' in out + return "upstart" in out except subp.ProcessExecutionError as e: - LOG.debug("'%s' returned '%s', not using upstart", - ' '.join(check_cmd), e.exit_code) + LOG.debug( + "'%s' returned '%s', not using upstart", + " ".join(check_cmd), + e.exit_code, + ) return False @@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args): if not event_names: # Default to the 'cloud-config' # event for backwards compat. - event_names = ['cloud-config'] + event_names = ["cloud-config"] if not is_upstart_system(): log.debug("not upstart system, '%s' disabled", name) @@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args): cfgpath = cloud.paths.get_ipath_cur("cloud_config") for n in event_names: - cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath] + cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath] try: subp.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? log.warning("Emission of upstart event %s failed due to: %s", n, e) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index 91f50e22..50a81744 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -38,60 +38,62 @@ If cloud-init sees a ``fan`` entry in cloud-config it will: """ from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util LOG = logging.getLogger(__name__) frequency = PER_INSTANCE BUILTIN_CFG = { - 'config': None, - 'config_path': '/etc/network/fan', + "config": None, + "config_path": "/etc/network/fan", } def stop_update_start(distro, service, config_file, content): try: - distro.manage_service('stop', service) + distro.manage_service("stop", service) stop_failed = False except subp.ProcessExecutionError as e: stop_failed = True LOG.warning("failed to stop %s: %s", service, e) - if not content.endswith('\n'): - content += '\n' + if not content.endswith("\n"): + content += "\n" util.write_file(config_file, content, omode="w") try: - distro.manage_service('start', service) + distro.manage_service("start", service) if stop_failed: LOG.warning("success: %s started", service) except subp.ProcessExecutionError as e: LOG.warning("failed to start %s: %s", service, e) - distro.manage_service('enable', service) + distro.manage_service("enable", service) def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('fan') + cfgin = cfg.get("fan") if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) - if not mycfg.get('config'): + if not mycfg.get("config"): LOG.debug("%s: no 'fan' config entry. disabling", name) return - util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w") + util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w") distro = cloud.distro - if not subp.which('fanctl'): - distro.install_packages(['ubuntu-fan']) + if not subp.which("fanctl"): + distro.install_packages(["ubuntu-fan"]) stop_update_start( distro, - service='ubuntu-fan', config_file=mycfg.get('config_path'), - content=mycfg.get('config')) + service="ubuntu-fan", + config_file=mycfg.get("config_path"), + content=mycfg.get("config"), + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index 4fa5297e..f443ccd8 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -31,10 +31,7 @@ specified as a jinja template with the following variables set: """ -from cloudinit import templater -from cloudinit import util -from cloudinit import version - +from cloudinit import templater, util, version from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS @@ -49,7 +46,7 @@ FINAL_MESSAGE_DEF = ( def handle(_name, cfg, cloud, log, args): - msg_in = '' + msg_in = "" if len(args) != 0: msg_in = str(args[0]) else: @@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args): cver = version.version_string() try: subs = { - 'uptime': uptime, - 'timestamp': ts, - 'version': cver, - 'datasource': str(cloud.datasource), + "uptime": uptime, + "timestamp": ts, + "version": cver, + "datasource": str(cloud.datasource), } subs.update(dict([(k.upper(), v) for k, v in subs.items()])) - util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), - console=False, stderr=True, log=log) + util.multi_log( + "%s\n" % (templater.render_string(msg_in, subs)), + console=False, + stderr=True, + log=log, + ) except Exception: util.logexc(log, "Failed to render final message template") @@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args): if cloud.datasource.is_disconnected: log.warning("Used fallback datasource") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py index 924b967c..3c307153 100644 --- a/cloudinit/config/cc_foo.py +++ b/cloudinit/config/cc_foo.py @@ -53,4 +53,5 @@ frequency = PER_INSTANCE def handle(name, _cfg, _cloud, log, _args): log.debug("Hi from module %s", name) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 1ddc9dc7..43334caa 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -70,17 +70,15 @@ import re import stat from cloudinit import log as logging +from cloudinit import subp, temp_utils, util from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util frequency = PER_ALWAYS DEFAULT_CONFIG = { - 'mode': 'auto', - 'devices': ['/'], - 'ignore_growroot_disabled': False, + "mode": "auto", + "devices": ["/"], + "ignore_growroot_disabled": False, } @@ -131,7 +129,7 @@ class ResizeFailedException(Exception): class ResizeGrowPart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (out, _err) = subp.subp(["growpart", "--help"], env=myenv) @@ -144,7 +142,7 @@ class ResizeGrowPart(object): def resize(self, diskdev, partnum, partdev): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" before = get_size(partdev) # growpart uses tmp dir to store intermediate states @@ -153,14 +151,19 @@ class ResizeGrowPart(object): growpart_tmp = os.path.join(tmpd, "growpart") if not os.path.exists(growpart_tmp): os.mkdir(growpart_tmp, 0o700) - myenv['TMPDIR'] = growpart_tmp + myenv["TMPDIR"] = growpart_tmp try: - subp.subp(["growpart", '--dry-run', diskdev, partnum], - env=myenv) + subp.subp( + ["growpart", "--dry-run", diskdev, partnum], env=myenv + ) except subp.ProcessExecutionError as e: if e.exit_code != 1: - util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", - diskdev, partnum) + util.logexc( + LOG, + "Failed growpart --dry-run for (%s, %s)", + diskdev, + partnum, + ) raise ResizeFailedException(e) from e return (before, before) @@ -176,7 +179,7 @@ class ResizeGrowPart(object): class ResizeGpart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) @@ -234,11 +237,11 @@ def device_part_info(devpath): # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): freebsd_part = "/dev/" + util.find_freebsd_part(devpath) - m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) + m = re.search("^(/dev/.+)p([0-9])$", freebsd_part) return (m.group(1), m.group(2)) elif util.is_DragonFlyBSD(): dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath) - m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part) + m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): @@ -275,7 +278,7 @@ def devent2dev(devent): container = util.is_container() # Ensure the path is a block device. - if (dev == "/dev/root" and not container): + if dev == "/dev/root" and not container: dev = util.rootdev_from_cmdline(util.get_cmdline()) if dev is None: if os.path.exists(dev): @@ -293,65 +296,102 @@ def resize_devices(resizer, devices): try: blockdev = devent2dev(devent) except ValueError as e: - info.append((devent, RESIZE.SKIPPED, - "unable to convert to device: %s" % e,)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "unable to convert to device: %s" % e, + ) + ) continue try: statret = os.stat(blockdev) except OSError as e: - info.append((devent, RESIZE.SKIPPED, - "stat of '%s' failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "stat of '%s' failed: %s" % (blockdev, e), + ) + ) continue - if (not stat.S_ISBLK(statret.st_mode) and - not stat.S_ISCHR(statret.st_mode)): - info.append((devent, RESIZE.SKIPPED, - "device '%s' not a block device" % blockdev,)) + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR( + statret.st_mode + ): + info.append( + ( + devent, + RESIZE.SKIPPED, + "device '%s' not a block device" % blockdev, + ) + ) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - info.append((devent, RESIZE.SKIPPED, - "device_part_info(%s) failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "device_part_info(%s) failed: %s" % (blockdev, e), + ) + ) continue try: (old, new) = resizer.resize(disk, ptnum, blockdev) if old == new: - info.append((devent, RESIZE.NOCHANGE, - "no change necessary (%s, %s)" % (disk, ptnum),)) + info.append( + ( + devent, + RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum), + ) + ) else: - info.append((devent, RESIZE.CHANGED, - "changed (%s, %s) from %s to %s" % - (disk, ptnum, old, new),)) + info.append( + ( + devent, + RESIZE.CHANGED, + "changed (%s, %s) from %s to %s" + % (disk, ptnum, old, new), + ) + ) except ResizeFailedException as e: - info.append((devent, RESIZE.FAILED, - "failed to resize: disk=%s, ptnum=%s: %s" % - (disk, ptnum, e),)) + info.append( + ( + devent, + RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" + % (disk, ptnum, e), + ) + ) return info def handle(_name, cfg, _cloud, log, _args): - if 'growpart' not in cfg: - log.debug("No 'growpart' entry in cfg. Using default: %s" % - DEFAULT_CONFIG) - cfg['growpart'] = DEFAULT_CONFIG + if "growpart" not in cfg: + log.debug( + "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG + ) + cfg["growpart"] = DEFAULT_CONFIG - mycfg = cfg.get('growpart') + mycfg = cfg.get("growpart") if not isinstance(mycfg, dict): log.warning("'growpart' in config was not a dict") return - mode = mycfg.get('mode', "auto") + mode = mycfg.get("mode", "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return - if util.is_false(mycfg.get('ignore_growroot_disabled', False)): + if util.is_false(mycfg.get("ignore_growroot_disabled", False)): if os.path.isfile("/etc/growroot-disabled"): log.debug("growpart disabled: /etc/growroot-disabled exists") log.debug("use ignore_growroot_disabled to ignore") @@ -370,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = util.log_time(logfunc=log.debug, msg="resize_devices", - func=resize_devices, args=(resizer, devices)) + resized = util.log_time( + logfunc=log.debug, + msg="resize_devices", + func=resize_devices, + args=(resizer, devices), + ) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) @@ -379,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("'%s' %s: %s" % (entry, action, msg)) -RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart)) +RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart)) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index eb03c664..ad7243d9 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -43,11 +43,10 @@ seeded with empty values, and install_devices_empty is set to true. import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.subp import ProcessExecutionError -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def fetch_idevs(log): @@ -60,8 +59,9 @@ def fetch_idevs(log): try: # get the root disk where the /boot directory resides. - disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'], - capture=True)[0].strip() + disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[ + 0 + ].strip() except ProcessExecutionError as e: # grub-common may not be installed, especially on containers # FileNotFoundError is a nested exception of ProcessExecutionError @@ -81,26 +81,30 @@ def fetch_idevs(log): if not disk or not os.path.exists(disk): # If we failed to detect a disk, we can return early - return '' + return "" try: # check if disk exists and use udevadm to fetch symlinks - devices = subp.subp( - ['udevadm', 'info', '--root', '--query=symlink', disk], - capture=True - )[0].strip().split() + devices = ( + subp.subp( + ["udevadm", "info", "--root", "--query=symlink", disk], + capture=True, + )[0] + .strip() + .split() + ) except Exception: util.logexc( log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk ) - log.debug('considering these device symlinks: %s', ','.join(devices)) + log.debug("considering these device symlinks: %s", ",".join(devices)) # filter symlinks for /dev/disk/by-id entries - devices = [dev for dev in devices if 'disk/by-id' in dev] - log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices)) + devices = [dev for dev in devices if "disk/by-id" in dev] + log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices)) # select first device if there is one, else fall back to plain name idevs = sorted(devices)[0] if devices else disk - log.debug('selected %s', idevs) + log.debug("selected %s", idevs) return idevs @@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args): if not mycfg: mycfg = {} - enabled = mycfg.get('enabled', True) + enabled = mycfg.get("enabled", True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str( - mycfg, "grub-pc/install_devices_empty", None) + mycfg, "grub-pc/install_devices_empty", None + ) if idevs is None: idevs = fetch_idevs(log) @@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args): # now idevs and idevs_empty are set to determined values # or, those set by user - dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" - "grub-pc grub-pc/install_devices_empty boolean %s\n") % - (idevs, idevs_empty)) + dconf_sel = ( + "grub-pc grub-pc/install_devices string %s\n" + "grub-pc grub-pc/install_devices_empty boolean %s\n" + % (idevs, idevs_empty) + ) - log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + log.debug( + "Setting grub debconf-set-selections with '%s','%s'" + % (idevs, idevs_empty) + ) try: - subp.subp(['debconf-set-selections'], dconf_sel) + subp.subp(["debconf-set-selections"], dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index 9b4075cc..952d9f13 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -3,15 +3,12 @@ import os from textwrap import dedent -from cloudinit import util -from cloudinit import subp -from cloudinit import stages +from cloudinit import stages, subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS -from cloudinit.event import EventType, EventScope +from cloudinit.event import EventScope, EventType from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE distros = [ALL_DISTROS] @@ -19,7 +16,8 @@ meta = { "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", - "description": dedent("""\ + "description": dedent( + """\ This module will install the udev rules to enable hotplug if supported by the datasource and enabled in the userdata. The udev rules will be installed as @@ -32,21 +30,26 @@ meta = { network configuration. Currently supported datasources: Openstack, EC2 - """), + """ + ), "distros": distros, "examples": [ - dedent("""\ + dedent( + """\ # Enable hotplug of network devices updates: network: when: ["hotplug"] - """), - dedent("""\ + """ + ), + dedent( + """\ # Enable network hotplug alongside boot event updates: network: when: ["boot", "hotplug"] - """), + """ + ), ], "frequency": frequency, } @@ -74,14 +77,14 @@ schema = { "boot-legacy", "boot", "hotplug", - ] - } + ], + }, } - } + }, } - } + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -100,14 +103,15 @@ LABEL="cloudinit_end" def handle(_name, cfg, cloud, log, _args): validate_cloudconfig_schema(cfg, schema) network_hotplug_enabled = ( - 'updates' in cfg and - 'network' in cfg['updates'] and - 'when' in cfg['updates']['network'] and - 'hotplug' in cfg['updates']['network']['when'] + "updates" in cfg + and "network" in cfg["updates"] + and "when" in cfg["updates"]["network"] + and "hotplug" in cfg["updates"]["network"]["when"] ) hotplug_supported = EventType.HOTPLUG in ( - cloud.datasource.get_supported_events( - [EventType.HOTPLUG]).get(EventScope.NETWORK, set()) + cloud.datasource.get_supported_events([EventType.HOTPLUG]).get( + EventScope.NETWORK, set() + ) ) hotplug_enabled = stages.update_event_enabled( datasource=cloud.datasource, diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index d72b5244..ab35e136 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -38,49 +38,53 @@ host keys are not written to console. import os +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE # This is a tool that cloud init provides -HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints' +HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints" def _get_helper_tool_path(distro): try: base_lib = distro.usr_lib_exec except AttributeError: - base_lib = '/usr/lib' + base_lib = "/usr/lib" return HELPER_TOOL_TPL % base_lib def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): - log.debug(("Skipping module named %s, " - "logging of SSH host keys disabled"), name) + log.debug( + "Skipping module named %s, logging of SSH host keys disabled", name + ) return helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): - log.warning(("Unable to activate module %s," - " helper tool not found at %s"), name, helper_path) + log.warning( + "Unable to activate module %s, helper tool not found at %s", + name, + helper_path, + ) return - fp_blacklist = util.get_cfg_option_list(cfg, - "ssh_fp_console_blacklist", []) - key_blacklist = util.get_cfg_option_list(cfg, - "ssh_key_console_blacklist", - ["ssh-dss"]) + fp_blacklist = util.get_cfg_option_list( + cfg, "ssh_fp_console_blacklist", [] + ) + key_blacklist = util.get_cfg_option_list( + cfg, "ssh_key_console_blacklist", ["ssh-dss"] + ) try: - cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] + cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)] (stdout, _stderr) = subp.subp(cmd) - util.multi_log("%s\n" % (stdout.strip()), - stderr=False, console=True) + util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: log.warning("Writing keys to the system console failed!") raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 299c4d01..03ebf411 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -60,10 +60,7 @@ from io import BytesIO from configobj import ConfigObj -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, type_utils, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE @@ -71,15 +68,15 @@ frequency = PER_INSTANCE LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" LS_DEFAULT_FILE = "/etc/default/landscape-client" -distros = ['ubuntu'] +distros = ["ubuntu"] # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 LSC_BUILTIN_CFG = { - 'client': { - 'log_level': "info", - 'url': "https://landscape.canonical.com/message-system", - 'ping_url': "http://landscape.canonical.com/ping", - 'data_path': "/var/lib/landscape/client", + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", } } @@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args): raise RuntimeError( "'landscape' key existed in config, but not a dictionary type," " is a {_type} instead".format( - _type=type_utils.obj_name(ls_cloudcfg))) + _type=type_utils.obj_name(ls_cloudcfg) + ) + ) if not ls_cloudcfg: return - cloud.distro.install_packages(('landscape-client',)) + cloud.distro.install_packages(("landscape-client",)) merge_data = [ LSC_BUILTIN_CFG, @@ -135,4 +134,5 @@ def merge_together(objs): cfg.merge(ConfigObj(obj)) return cfg + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 7fed9abd..487f58f7 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -14,45 +14,48 @@ from cloudinit import util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_locale', - 'name': 'Locale', - 'title': 'Set system locale', - 'description': dedent( + "id": "cc_locale", + "name": "Locale", + "title": "Set system locale", + "description": dedent( """\ Configure the system locale and apply it system wide. By default use the locale specified by the datasource.""" ), - 'distros': distros, - 'examples': [ - dedent("""\ + "distros": distros, + "examples": [ + dedent( + """\ # Set the locale to ar_AE locale: ar_AE - """), - dedent("""\ + """ + ), + dedent( + """\ # Set the locale to fr_CA in /etc/alternate_path/locale locale: fr_CA locale_configfile: /etc/alternate_path/locale - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'locale': { - 'type': 'string', - 'description': ( + "type": "object", + "properties": { + "locale": { + "type": "string", + "description": ( "The locale to set as the system's locale (e.g. ar_PS)" ), }, - 'locale_configfile': { - 'type': 'string', - 'description': ( + "locale_configfile": { + "type": "string", + "description": ( "The file in which to write the locale configuration (defaults" " to the distro's default location)" ), @@ -70,8 +73,9 @@ def handle(name, cfg, cloud, log, args): locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if util.is_false(locale): - log.debug("Skipping module named %s, disabled by config: %s", - name, locale) + log.debug( + "Skipping module named %s, disabled by config: %s", name, locale + ) return validate_cloudconfig_schema(cfg, schema) @@ -80,4 +84,5 @@ def handle(name, cfg, cloud, log, args): locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 486037d9..13ddcbe9 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,12 +47,12 @@ lxd-bridge will be configured accordingly. domain: """ -from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util import os -distros = ['ubuntu'] +from cloudinit import log as logging +from cloudinit import subp, util + +distros = ["ubuntu"] LOG = logging.getLogger(__name__) @@ -61,36 +61,42 @@ _DEFAULT_NETWORK_NAME = "lxdbr0" def handle(name, cfg, cloud, log, args): # Get config - lxd_cfg = cfg.get('lxd') + lxd_cfg = cfg.get("lxd") if not lxd_cfg: - log.debug("Skipping module named %s, not present or disabled by cfg", - name) + log.debug( + "Skipping module named %s, not present or disabled by cfg", name + ) return if not isinstance(lxd_cfg, dict): - log.warning("lxd config must be a dictionary. found a '%s'", - type(lxd_cfg)) + log.warning( + "lxd config must be a dictionary. found a '%s'", type(lxd_cfg) + ) return # Grab the configuration - init_cfg = lxd_cfg.get('init') + init_cfg = lxd_cfg.get("init") if not isinstance(init_cfg, dict): - log.warning("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + log.warning( + "lxd/init config must be a dictionary. found a '%s'", + type(init_cfg), + ) init_cfg = {} - bridge_cfg = lxd_cfg.get('bridge', {}) + bridge_cfg = lxd_cfg.get("bridge", {}) if not isinstance(bridge_cfg, dict): - log.warning("lxd/bridge config must be a dictionary. found a '%s'", - type(bridge_cfg)) + log.warning( + "lxd/bridge config must be a dictionary. found a '%s'", + type(bridge_cfg), + ) bridge_cfg = {} # Install the needed packages packages = [] if not subp.which("lxd"): - packages.append('lxd') + packages.append("lxd") - if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'): - packages.append('zfsutils-linux') + if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"): + packages.append("zfsutils-linux") if len(packages): try: @@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args): # Set up lxd if init config is given if init_cfg: init_keys = ( - 'network_address', 'network_port', 'storage_backend', - 'storage_create_device', 'storage_create_loop', - 'storage_pool', 'trust_password') - subp.subp(['lxd', 'waitready', '--timeout=300']) - cmd = ['lxd', 'init', '--auto'] + "network_address", + "network_port", + "storage_backend", + "storage_create_device", + "storage_create_loop", + "storage_pool", + "trust_password", + ) + subp.subp(["lxd", "waitready", "--timeout=300"]) + cmd = ["lxd", "init", "--auto"] for k in init_keys: if init_cfg.get(k): - cmd.extend(["--%s=%s" % - (k.replace('_', '-'), str(init_cfg[k]))]) + cmd.extend( + ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))] + ) subp.subp(cmd) # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) - if os.path.exists("/etc/default/lxd-bridge") \ - and subp.which(dconf_comm): + if os.path.exists("/etc/default/lxd-bridge") and subp.which( + dconf_comm + ): # Bridge configured through packaging debconf = bridge_to_debconf(bridge_cfg) @@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args): # Update debconf database try: log.debug("Setting lxd debconf via " + dconf_comm) - data = "\n".join(["set %s %s" % (k, v) - for k, v in debconf.items()]) + "\n" - subp.subp(['debconf-communicate'], data) + data = ( + "\n".join( + ["set %s %s" % (k, v) for k, v in debconf.items()] + ) + + "\n" + ) + subp.subp(["debconf-communicate"], data) except Exception: - util.logexc(log, "Failed to run '%s' for lxd with" % - dconf_comm) + util.logexc( + log, "Failed to run '%s' for lxd with" % dconf_comm + ) # Remove the existing configuration file (forces re-generation) util.del_file("/etc/default/lxd-bridge") # Run reconfigure log.debug("Running dpkg-reconfigure for lxd") - subp.subp(['dpkg-reconfigure', 'lxd', - '--frontend=noninteractive']) + subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"]) else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) maybe_cleanup_default( - net_name=net_name, did_init=bool(init_cfg), - create=bool(cmd_create), attach=bool(cmd_attach)) + net_name=net_name, + did_init=bool(init_cfg), + create=bool(cmd_create), + attach=bool(cmd_attach), + ) if cmd_create: - log.debug("Creating lxd bridge: %s" % - " ".join(cmd_create)) + log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) _lxc(cmd_create) if cmd_attach: - log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_attach)) + log.debug( + "Setting up default lxd bridge: %s" % " ".join(cmd_attach) + ) _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( - "Unable to configure lxd bridge without %s." + dconf_comm) + "Unable to configure lxd bridge without %s." + dconf_comm + ) def bridge_to_debconf(bridge_cfg): @@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg): if bridge_cfg.get("ipv4_address"): debconf["lxd/bridge-ipv4"] = "true" - debconf["lxd/bridge-ipv4-address"] = \ - bridge_cfg.get("ipv4_address") - debconf["lxd/bridge-ipv4-netmask"] = \ - bridge_cfg.get("ipv4_netmask") - debconf["lxd/bridge-ipv4-dhcp-first"] = \ - bridge_cfg.get("ipv4_dhcp_first") - debconf["lxd/bridge-ipv4-dhcp-last"] = \ - bridge_cfg.get("ipv4_dhcp_last") - debconf["lxd/bridge-ipv4-dhcp-leases"] = \ - bridge_cfg.get("ipv4_dhcp_leases") - debconf["lxd/bridge-ipv4-nat"] = \ - bridge_cfg.get("ipv4_nat", "true") + debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address") + debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask") + debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get( + "ipv4_dhcp_first" + ) + debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get( + "ipv4_dhcp_last" + ) + debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get( + "ipv4_dhcp_leases" + ) + debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true") if bridge_cfg.get("ipv6_address"): debconf["lxd/bridge-ipv6"] = "true" - debconf["lxd/bridge-ipv6-address"] = \ - bridge_cfg.get("ipv6_address") - debconf["lxd/bridge-ipv6-netmask"] = \ - bridge_cfg.get("ipv6_netmask") - debconf["lxd/bridge-ipv6-nat"] = \ - bridge_cfg.get("ipv6_nat", "false") + debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address") + debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask") + debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get( + "ipv6_nat", "false" + ) if bridge_cfg.get("domain"): debconf["lxd/bridge-domain"] = bridge_cfg.get("domain") else: - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) return debconf @@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg): bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["network", "attach-profile", bridge_name, - "default", "eth0"] + cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach if bridge_cfg.get("mode") != "new": - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): - cmd_create.append("ipv4.address=%s/%s" % - (bridge_cfg.get("ipv4_address"), - bridge_cfg.get("ipv4_netmask"))) + cmd_create.append( + "ipv4.address=%s/%s" + % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask")) + ) if bridge_cfg.get("ipv4_nat", "true") == "true": cmd_create.append("ipv4.nat=true") - if bridge_cfg.get("ipv4_dhcp_first") and \ - bridge_cfg.get("ipv4_dhcp_last"): - dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"), - bridge_cfg.get("ipv4_dhcp_last")) + if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get( + "ipv4_dhcp_last" + ): + dhcp_range = "%s-%s" % ( + bridge_cfg.get("ipv4_dhcp_first"), + bridge_cfg.get("ipv4_dhcp_last"), + ) cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range) else: cmd_create.append("ipv4.address=none") if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"): - cmd_create.append("ipv6.address=%s/%s" % - (bridge_cfg.get("ipv6_address"), - bridge_cfg.get("ipv6_netmask"))) + cmd_create.append( + "ipv6.address=%s/%s" + % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask")) + ) if bridge_cfg.get("ipv6_nat", "false") == "true": cmd_create.append("ipv6.nat=true") @@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg): def _lxc(cmd): - env = {'LC_ALL': 'C', - 'HOME': os.environ.get('HOME', '/root'), - 'USER': os.environ.get('USER', 'root')} - subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + env = { + "LC_ALL": "C", + "HOME": os.environ.get("HOME", "/root"), + "USER": os.environ.get("USER", "root"), + } + subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env) -def maybe_cleanup_default(net_name, did_init, create, attach, - profile="default", nic_name="eth0"): +def maybe_cleanup_default( + net_name, did_init, create, attach, profile="default", nic_name="eth0" +): """Newer versions of lxc (3.0.1+) create a lxdbr0 network when 'lxd init --auto' is run. Older versions did not. @@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach, raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 41ea4fc9..1b0158ec 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -56,18 +56,21 @@ import io from configobj import ConfigObj from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" -SERVER_CFG = '/etc/mcollective/server.cfg' +SERVER_CFG = "/etc/mcollective/server.cfg" LOG = logging.getLogger(__name__) -def configure(config, server_cfg=SERVER_CFG, - pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): +def configure( + config, + server_cfg=SERVER_CFG, + pubcert_file=PUBCERT_FILE, + pricert_file=PRICERT_FILE, +): # Read server.cfg (if it exists) values from the # original file in order to be able to mix the rest up. try: @@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG, if e.errno != errno.ENOENT: raise else: - LOG.debug("Did not find file %s (starting with an empty" - " config)", server_cfg) + LOG.debug( + "Did not find file %s (starting with an empty config)", + server_cfg, + ) mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): - if cfg_name == 'public-cert': + if cfg_name == "public-cert": util.write_file(pubcert_file, cfg, mode=0o644) - mcollective_config[ - 'plugin.ssl_server_public'] = pubcert_file - mcollective_config['securityprovider'] = 'ssl' - elif cfg_name == 'private-cert': + mcollective_config["plugin.ssl_server_public"] = pubcert_file + mcollective_config["securityprovider"] = "ssl" + elif cfg_name == "private-cert": util.write_file(pricert_file, cfg, mode=0o600) - mcollective_config[ - 'plugin.ssl_server_private'] = pricert_file - mcollective_config['securityprovider'] = 'ssl' + mcollective_config["plugin.ssl_server_private"] = pricert_file + mcollective_config["securityprovider"] = "ssl" else: if isinstance(cfg, str): # Just set it in the 'main' section @@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG, def handle(name, cfg, cloud, log, _args): # If there isn't a mcollective key in the configuration don't do anything - if 'mcollective' not in cfg: - log.debug(("Skipping module named %s, " - "no 'mcollective' key in configuration"), name) + if "mcollective" not in cfg: + log.debug( + "Skipping module named %s, no 'mcollective' key in configuration", + name, + ) return - mcollective_cfg = cfg['mcollective'] + mcollective_cfg = cfg["mcollective"] # Start by installing the mcollective package ... cloud.distro.install_packages(("mcollective",)) # ... and then update the mcollective configuration - if 'conf' in mcollective_cfg: - configure(config=mcollective_cfg['conf']) + if "conf" in mcollective_cfg: + configure(config=mcollective_cfg["conf"]) # restart mcollective to handle updated config - subp.subp(['service', 'mcollective', 'restart'], capture=False) + subp.subp(["service", "mcollective", "restart"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 79bcc27d..4fafb4af 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -29,16 +29,14 @@ false`` in config. import os import shutil -from cloudinit import helpers -from cloudinit import util - +from cloudinit import helpers, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS def _migrate_canon_sems(cloud): - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) am_adjusted = 0 for sem_path in paths: if not sem_path or not os.path.exists(sem_path): @@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): legacy_adjust = { - 'apt-update-upgrade': [ - 'apt-configure', - 'package-update-upgrade-install', + "apt-update-upgrade": [ + "apt-configure", + "package-update-upgrade-install", ], } - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) for sem_path in paths: if not sem_path or not os.path.exists(sem_path): continue @@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log): util.del_file(os.path.join(sem_path, p)) (_name, freq) = os.path.splitext(p) for m in migrate_to: - log.debug("Migrating %s => %s with the same frequency", - p, m) + log.debug( + "Migrating %s => %s with the same frequency", p, m + ) with sem_helper.lock(m, freq): pass @@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args): log.debug("Skipping module named %s, migration disabled", name) return sems_moved = _migrate_canon_sems(cloud) - log.debug("Migrated %s semaphore files to there canonicalized names", - sems_moved) + log.debug( + "Migrated %s semaphore files to there canonicalized names", sems_moved + ) _migrate_legacy_sems(cloud, log) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index eeb008d2..ec2e46ff 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -62,15 +62,12 @@ swap file is created. maxsize: """ -from string import whitespace - import logging import os import re +from string import whitespace -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, type_utils, util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" @@ -105,21 +102,25 @@ def is_network_device(name): def _get_nth_partition_for_device(device_path, partition_number): - potential_suffixes = [str(partition_number), 'p%s' % (partition_number,), - '-part%s' % (partition_number,)] + potential_suffixes = [ + str(partition_number), + "p%s" % (partition_number,), + "-part%s" % (partition_number,), + ] for suffix in potential_suffixes: - potential_partition_device = '%s%s' % (device_path, suffix) + potential_partition_device = "%s%s" % (device_path, suffix) if os.path.exists(potential_partition_device): return potential_partition_device return None def _is_block_device(device_path, partition_path=None): - device_name = os.path.realpath(device_path).split('/')[-1] - sys_path = os.path.join('/sys/block/', device_name) + device_name = os.path.realpath(device_path).split("/")[-1] + sys_path = os.path.join("/sys/block/", device_name) if partition_path is not None: sys_path = os.path.join( - sys_path, os.path.realpath(partition_path).split('/')[-1]) + sys_path, os.path.realpath(partition_path).split("/")[-1] + ) return os.path.exists(sys_path) @@ -159,8 +160,9 @@ def sanitize_devname(startname, transformer, log, aliases=None): if partition_number is None: partition_path = _get_nth_partition_for_device(device_path, 1) else: - partition_path = _get_nth_partition_for_device(device_path, - partition_number) + partition_path = _get_nth_partition_for_device( + device_path, partition_number + ) if partition_path is None: return None @@ -174,12 +176,12 @@ def sanitize_devname(startname, transformer, log, aliases=None): def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] GB = 2 ** 30 sugg_max = 8 * GB - info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize} + info = {"avail": "na", "max_in": maxsize, "mem": memsize} if fsys is None and maxsize is None: # set max to 8GB default if no filesystem given @@ -187,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): elif fsys: statvfs = os.statvfs(fsys) avail = statvfs.f_frsize * statvfs.f_bfree - info['avail'] = avail + info["avail"] = avail if maxsize is None: # set to 25% of filesystem space maxsize = min(int(avail / 4), sugg_max) - elif maxsize > ((avail * .9)): + elif maxsize > ((avail * 0.9)): # set to 90% of available disk space - maxsize = int(avail * .9) + maxsize = int(avail * 0.9) elif maxsize is None: maxsize = sugg_max - info['max'] = maxsize + info["max"] = maxsize formulas = [ # < 1G: swap = double memory @@ -226,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): if size is not None: size = maxsize - info['size'] = size + info["size"] = size MB = 2 ** 20 pinfo = {} @@ -236,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): else: pinfo[k] = v - LOG.debug("suggest %s swap for %s memory with '%s'" - " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], - pinfo['avail'], pinfo['max_in'], pinfo['max']) + LOG.debug( + "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'", + pinfo["size"], + pinfo["mem"], + pinfo["avail"], + pinfo["max_in"], + pinfo["max"], + ) return size @@ -248,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None: errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" def create_swap(fname, size, method): - LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", - fname, fstype, method) + LOG.debug( + "Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, + fstype, + method, + ) if method == "fallocate": - cmd = ['fallocate', '-l', '%sM' % size, fname] + cmd = ["fallocate", "-l", "%sM" % size, fname] elif method == "dd": - cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', - 'count=%s' % size] + cmd = [ + "dd", + "if=/dev/zero", + "of=%s" % fname, + "bs=1M", + "count=%s" % size, + ] try: subp.subp(cmd, capture=True) @@ -269,8 +285,9 @@ def create_swapfile(fname: str, size: str) -> None: fstype = util.get_mount_info(swap_dir)[1] - if (fstype == "xfs" and - util.kernel_version() < (4, 18)) or fstype == "btrfs": + if ( + fstype == "xfs" and util.kernel_version() < (4, 18) + ) or fstype == "btrfs": create_swap(fname, size, "dd") else: try: @@ -282,7 +299,7 @@ def create_swapfile(fname: str, size: str) -> None: if os.path.exists(fname): util.chmod(fname, 0o600) try: - subp.subp(['mkswap', fname]) + subp.subp(["mkswap", fname]) except subp.ProcessExecutionError: util.del_file(fname) raise @@ -297,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None): swap_dir = os.path.dirname(fname) if str(size).lower() == "auto": try: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] except IOError: LOG.debug("Not creating swap: failed to read meminfo") return util.ensure_dir(swap_dir) - size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, - memsize=memsize) + size = suggested_swapsize( + fsys=swap_dir, maxsize=maxsize, memsize=memsize + ) mibsize = str(int(size / (2 ** 20))) if not size: LOG.debug("Not creating swap: suggested size was 0") return - util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, - args=[fname, mibsize]) + util.log_time( + LOG.debug, + msg="Setting up swap file", + func=create_swapfile, + args=[fname, mibsize], + ) return fname def handle_swapcfg(swapcfg): """handle the swap config, calling setup_swap if necessary. - return None or (filename, size) + return None or (filename, size) """ if not isinstance(swapcfg, dict): LOG.warning("input for swap config was not a dict.") return None - fname = swapcfg.get('filename', '/swap.img') - size = swapcfg.get('size', 0) - maxsize = swapcfg.get('maxsize', None) + fname = swapcfg.get("filename", "/swap.img") + size = swapcfg.get("size", 0) + maxsize = swapcfg.get("maxsize", None) if not (size and fname): LOG.debug("no need to setup swap") @@ -335,8 +357,10 @@ def handle_swapcfg(swapcfg): if os.path.exists(fname): if not os.path.exists("/proc/swaps"): - LOG.debug("swap file %s exists, but no /proc/swaps exists, " - "being safe", fname) + LOG.debug( + "swap file %s exists, but no /proc/swaps exists, being safe", + fname, + ) return fname try: for line in util.load_file("/proc/swaps").splitlines(): @@ -345,8 +369,9 @@ def handle_swapcfg(swapcfg): return fname LOG.debug("swap file %s exists, but not in /proc/swaps", fname) except Exception: - LOG.warning("swap file %s exists. Error reading /proc/swaps", - fname) + LOG.warning( + "swap file %s exists. Error reading /proc/swaps", fname + ) return fname try: @@ -373,8 +398,10 @@ def handle(_name, cfg, cloud, log, _args): defvals = cfg.get("mount_default_fields", defvals) # these are our default set of mounts - defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], - ["swap", "none", "swap", "sw", "0", "0"]] + defmnts = [ + ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"], + ] cfgmnt = [] if "mounts" in cfg: @@ -404,13 +431,17 @@ def handle(_name, cfg, cloud, log, _args): for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): - log.warning("Mount option %s not a list, got a %s instead", - (i + 1), type_utils.obj_name(cfgmnt[i])) + log.warning( + "Mount option %s not a list, got a %s instead", + (i + 1), + type_utils.obj_name(cfgmnt[i]), + ) continue start = str(cfgmnt[i][0]) - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed %s => %s" % (start, sanitized)) @@ -418,8 +449,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent named mount %s", start) continue elif sanitized in fstab_devs: - log.info("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.info( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue cfgmnt[i][0] = sanitized @@ -452,8 +486,9 @@ def handle(_name, cfg, cloud, log, _args): # entry has the same device name for defmnt in defmnts: start = defmnt[0] - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) @@ -461,8 +496,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent default named mount %s", start) continue elif sanitized in fstab_devs: - log.debug("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.debug( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue defmnt[0] = sanitized @@ -474,8 +512,7 @@ def handle(_name, cfg, cloud, log, _args): break if cfgmnt_has: - log.debug(("Not including %s, already" - " previously included"), start) + log.debug("Not including %s, already previously included", start) continue cfgmnt.append(defmnt) @@ -488,7 +525,7 @@ def handle(_name, cfg, cloud, log, _args): else: actlist.append(x) - swapret = handle_swapcfg(cfg.get('swap', {})) + swapret = handle_swapcfg(cfg.get("swap", {})) if swapret: actlist.append([swapret, "none", "swap", "sw", "0", "0"]) @@ -507,10 +544,11 @@ def handle(_name, cfg, cloud, log, _args): needswap = True if line[1].startswith("/"): dirs.append(line[1]) - cc_lines.append('\t'.join(line)) + cc_lines.append("\t".join(line)) - mount_points = [v['mountpoint'] for k, v in util.mounts().items() - if 'mountpoint' in v] + mount_points = [ + v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v + ] for d in dirs: try: util.ensure_dir(d) @@ -525,11 +563,12 @@ def handle(_name, cfg, cloud, log, _args): sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] - sops = (["- " + drop for drop in sdrops if drop not in sadds] + - ["+ " + add for add in sadds if add not in sdrops]) + sops = ["- " + drop for drop in sdrops if drop not in sadds] + [ + "+ " + add for add in sadds if add not in sdrops + ] fstab_lines.extend(cc_lines) - contents = "%s\n" % ('\n'.join(fstab_lines)) + contents = "%s\n" % "\n".join(fstab_lines) util.write_file(FSTAB_PATH, contents) activate_cmds = [] @@ -549,7 +588,7 @@ def handle(_name, cfg, cloud, log, _args): fmt = "Activating swap and mounts with: %s" for cmd in activate_cmds: - fmt = "Activate mounts: %s:" + ' '.join(cmd) + fmt = "Activate mounts: %s:" + " ".join(cmd) try: subp.subp(cmd) log.debug(fmt, "PASS") @@ -557,4 +596,5 @@ def handle(_name, cfg, cloud, log, _args): log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c55d5d86..a31da9bb 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -11,124 +11,132 @@ import os from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, temp_utils, templater, type_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -NTP_CONF = '/etc/ntp.conf' +NTP_CONF = "/etc/ntp.conf" NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] +distros = [ + "almalinux", + "alpine", + "centos", + "cloudlinux", + "debian", + "eurolinux", + "fedora", + "miraclelinux", + "openEuler", + "opensuse", + "photon", + "rhel", + "rocky", + "sles", + "ubuntu", + "virtuozzo", +] NTP_CLIENT_CONFIG = { - 'chrony': { - 'check_exe': 'chronyd', - 'confpath': '/etc/chrony.conf', - 'packages': ['chrony'], - 'service_name': 'chrony', - 'template_name': 'chrony.conf.{distro}', - 'template': None, + "chrony": { + "check_exe": "chronyd", + "confpath": "/etc/chrony.conf", + "packages": ["chrony"], + "service_name": "chrony", + "template_name": "chrony.conf.{distro}", + "template": None, }, - 'ntp': { - 'check_exe': 'ntpd', - 'confpath': NTP_CONF, - 'packages': ['ntp'], - 'service_name': 'ntp', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntp": { + "check_exe": "ntpd", + "confpath": NTP_CONF, + "packages": ["ntp"], + "service_name": "ntp", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'ntpdate': { - 'check_exe': 'ntpdate', - 'confpath': NTP_CONF, - 'packages': ['ntpdate'], - 'service_name': 'ntpdate', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntpdate": { + "check_exe": "ntpdate", + "confpath": NTP_CONF, + "packages": ["ntpdate"], + "service_name": "ntpdate", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'systemd-timesyncd': { - 'check_exe': '/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', - 'packages': [], - 'service_name': 'systemd-timesyncd', - 'template_name': 'timesyncd.conf', - 'template': None, + "systemd-timesyncd": { + "check_exe": "/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf", + "packages": [], + "service_name": "systemd-timesyncd", + "template_name": "timesyncd.conf", + "template": None, }, } # This is Distro-specific configuration overrides of the base config DISTRO_CLIENT_CONFIG = { - 'alpine': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', - 'service_name': 'chronyd', + "alpine": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'packages': [], - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "packages": [], + "service_name": "ntpd", }, }, - 'debian': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "debian": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, - 'opensuse': { - 'chrony': { - 'service_name': 'chronyd', + "opensuse": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'photon': { - 'chrony': { - 'service_name': 'chronyd', + "photon": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'service_name': 'ntpd', - 'confpath': '/etc/ntp.conf' - }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf', + "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"}, + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", }, }, - 'rhel': { - 'ntp': { - 'service_name': 'ntpd', + "rhel": { + "ntp": { + "service_name": "ntpd", }, - 'chrony': { - 'service_name': 'chronyd', + "chrony": { + "service_name": "chronyd", }, }, - 'sles': { - 'chrony': { - 'service_name': 'chronyd', + "sles": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'ubuntu': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "ubuntu": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, } @@ -141,10 +149,11 @@ DISTRO_CLIENT_CONFIG = { # configuration. meta = { - 'id': 'cc_ntp', - 'name': 'NTP', - 'title': 'enable and configure ntp', - 'description': dedent("""\ + "id": "cc_ntp", + "name": "NTP", + "title": "enable and configure ntp", + "description": dedent( + """\ Handle ntp configuration. If ntp is not installed on the system and ntp configuration is specified, ntp will be installed. If there is a default ntp config file in the image or one is present in the @@ -152,16 +161,20 @@ meta = { appended to the filename before any changes are made. A list of ntp pools and ntp servers can be provided under the ``ntp`` config key. If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used - in the format ``{0-3}.{distro}.pool.ntp.org``."""), - 'distros': distros, - 'examples': [ - dedent("""\ + in the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ # Override ntp with chrony configuration on Ubuntu ntp: enabled: true ntp_client: chrony # Uses cloud-init default chrony configuration - """), - dedent("""\ + """ + ), + dedent( + """\ # Provide a custom ntp client configuration ntp: enabled: true @@ -188,120 +201,137 @@ meta = { servers: - ntp.server.local - ntp.ubuntu.com - - 192.168.23.2""")], - 'frequency': PER_INSTANCE, + - 192.168.23.2""" + ), + ], + "frequency": PER_INSTANCE, } schema = { - 'type': 'object', - 'properties': { - 'ntp': { - 'type': ['object', 'null'], - 'properties': { - 'pools': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "ntp": { + "type": ["object", "null"], + "properties": { + "pools": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp pools. If both pools and servers are empty, 4 default pool servers will be provided of the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: for Alpine Linux when using the Busybox NTP client this setting will be ignored due to the limited - functionality of Busybox's ntpd.""") + functionality of Busybox's ntpd.""" + ), }, - 'servers': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "servers": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with - the format ``{0-3}.{distro}.pool.ntp.org``.""") + the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), }, - 'ntp_client': { - 'type': 'string', - 'default': 'auto', - 'description': dedent("""\ + "ntp_client": { + "type": "string", + "default": "auto", + "description": dedent( + """\ Name of an NTP client to use to configure system NTP. When unprovided or 'auto' the default client preferred by the distribution will be used. The following built-in client names can be used to override existing configuration defaults: chrony, ntp, ntpdate, - systemd-timesyncd."""), + systemd-timesyncd.""" + ), }, - 'enabled': { - 'type': 'boolean', - 'default': True, - 'description': dedent("""\ + "enabled": { + "type": "boolean", + "default": True, + "description": dedent( + """\ Attempt to enable ntp clients if set to True. If set to False, ntp client will not be configured or - installed"""), + installed""" + ), }, - 'config': { - 'description': dedent("""\ + "config": { + "description": dedent( + """\ Configuration settings or overrides for the - ``ntp_client`` specified."""), - 'type': ['object'], - 'properties': { - 'confpath': { - 'type': 'string', - 'description': dedent("""\ + ``ntp_client`` specified.""" + ), + "type": ["object"], + "properties": { + "confpath": { + "type": "string", + "description": dedent( + """\ The path to where the ``ntp_client`` - configuration is written."""), + configuration is written.""" + ), }, - 'check_exe': { - 'type': 'string', - 'description': dedent("""\ + "check_exe": { + "type": "string", + "description": dedent( + """\ The executable name for the ``ntp_client``. For example, ntp service ``check_exe`` is - 'ntpd' because it runs the ntpd binary."""), + 'ntpd' because it runs the ntpd binary.""" + ), }, - 'packages': { - 'type': 'array', - 'items': { - 'type': 'string', + "packages": { + "type": "array", + "items": { + "type": "string", }, - 'uniqueItems': True, - 'description': dedent("""\ + "uniqueItems": True, + "description": dedent( + """\ List of packages needed to be installed for the - selected ``ntp_client``."""), + selected ``ntp_client``.""" + ), }, - 'service_name': { - 'type': 'string', - 'description': dedent("""\ + "service_name": { + "type": "string", + "description": dedent( + """\ The systemd or sysvinit service name used to start and stop the ``ntp_client`` - service."""), + service.""" + ), }, - 'template': { - 'type': 'string', - 'description': dedent("""\ + "template": { + "type": "string", + "description": dedent( + """\ Inline template allowing users to define their own ``ntp_client`` configuration template. The value must start with '## template:jinja' to enable use of templating support. - """), + """ + ), }, }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'minProperties': 1, # If we have config, define something - 'additionalProperties': False + "minProperties": 1, # If we have config, define something + "additionalProperties": False, }, }, - 'additionalProperties': False + "additionalProperties": False, } - } + }, } -REQUIRED_NTP_CONFIG_KEYS = frozenset([ - 'check_exe', 'confpath', 'packages', 'service_name']) +REQUIRED_NTP_CONFIG_KEYS = frozenset( + ["check_exe", "confpath", "packages", "service_name"] +) __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -334,21 +364,23 @@ def select_ntp_client(ntp_client, distro): distro_cfg = distro_ntp_client_configs(distro.name) # user specified client, return its config - if ntp_client and ntp_client != 'auto': - LOG.debug('Selected NTP client "%s" via user-data configuration', - ntp_client) + if ntp_client and ntp_client != "auto": + LOG.debug( + 'Selected NTP client "%s" via user-data configuration', ntp_client + ) return distro_cfg.get(ntp_client, {}) # default to auto if unset in distro - distro_ntp_client = distro.get_option('ntp_client', 'auto') + distro_ntp_client = distro.get_option("ntp_client", "auto") clientcfg = {} if distro_ntp_client == "auto": for client in distro.preferred_ntp_clients: cfg = distro_cfg.get(client) - if subp.which(cfg.get('check_exe')): - LOG.debug('Selected NTP client "%s", already installed', - client) + if subp.which(cfg.get("check_exe")): + LOG.debug( + 'Selected NTP client "%s", already installed', client + ) clientcfg = cfg break @@ -356,11 +388,14 @@ def select_ntp_client(ntp_client, distro): client = distro.preferred_ntp_clients[0] LOG.debug( 'Selected distro preferred NTP client "%s", not yet installed', - client) + client, + ) clientcfg = distro_cfg.get(client) else: - LOG.debug('Selected NTP client "%s" via distro system config', - distro_ntp_client) + LOG.debug( + 'Selected NTP client "%s" via distro system config', + distro_ntp_client, + ) clientcfg = distro_cfg.get(distro_ntp_client, {}) return clientcfg @@ -378,7 +413,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"): if subp.which(check_exe): return if packages is None: - packages = ['ntp'] + packages = ["ntp"] install_func(packages) @@ -403,25 +438,34 @@ def generate_server_names(distro): names = [] pool_distro = distro - if distro == 'sles': + if distro == "sles": # For legal reasons x.pool.sles.ntp.org does not exist, # use the opensuse pool - pool_distro = 'opensuse' - elif distro == 'alpine' or distro == 'eurolinux': + pool_distro = "opensuse" + elif distro == "alpine" or distro == "eurolinux": # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist # so use general x.pool.ntp.org instead. The same applies to EuroLinux - pool_distro = '' + pool_distro = "" for x in range(0, NR_POOL_SERVERS): - names.append(".".join( - [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n])) + names.append( + ".".join( + [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n] + ) + ) return names -def write_ntp_config_template(distro_name, service_name=None, servers=None, - pools=None, path=None, template_fn=None, - template=None): +def write_ntp_config_template( + distro_name, + service_name=None, + servers=None, + pools=None, + path=None, + template_fn=None, + template=None, +): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @@ -444,27 +488,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None, if not pools: pools = [] - if (len(servers) == 0 and distro_name == 'alpine' and - service_name == 'ntpd'): + if ( + len(servers) == 0 + and distro_name == "alpine" + and service_name == "ntpd" + ): # Alpine's Busybox ntpd only understands "servers" configuration # and not "pool" configuration. servers = generate_server_names(distro_name) - LOG.debug( - 'Adding distro default ntp servers: %s', ','.join(servers)) + LOG.debug("Adding distro default ntp servers: %s", ",".join(servers)) elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( - 'Adding distro default ntp pool servers: %s', ','.join(pools)) + "Adding distro default ntp pool servers: %s", ",".join(pools) + ) if not path: - raise ValueError('Invalid value for path parameter') + raise ValueError("Invalid value for path parameter") if not template_fn and not template: - raise ValueError('Not template_fn or template provided') + raise ValueError("Not template_fn or template provided") - params = {'servers': servers, 'pools': pools} + params = {"servers": servers, "pools": pools} if template: - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) @@ -487,50 +534,62 @@ def supplemental_schema_validation(ntp_config): errors = [] missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) if missing: - keys = ', '.join(sorted(missing)) + keys = ", ".join(sorted(missing)) errors.append( - 'Missing required ntp:config keys: {keys}'.format(keys=keys)) - elif not any([ntp_config.get('template'), - ntp_config.get('template_name')]): + "Missing required ntp:config keys: {keys}".format(keys=keys) + ) + elif not any( + [ntp_config.get("template"), ntp_config.get("template_name")] + ): errors.append( - 'Either ntp:config:template or ntp:config:template_name values' - ' are required') + "Either ntp:config:template or ntp:config:template_name values" + " are required" + ) for key, value in sorted(ntp_config.items()): - keypath = 'ntp:config:' + key - if key == 'confpath': + keypath = "ntp:config:" + key + if key == "confpath": if not all([value, isinstance(value, str)]): errors.append( - 'Expected a config file path {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key == 'packages': + "Expected a config file path {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key == "packages": if not isinstance(value, list): errors.append( - 'Expected a list of required package names for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key in ('template', 'template_name'): + "Expected a list of required package names for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key in ("template", "template_name"): if value is None: # Either template or template_name can be none continue if not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) elif not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}. Found ({value})".format( + keypath=keypath, value=value + ) + ) if errors: - raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( - errors='\n'.join(errors))) + raise ValueError( + r"Invalid ntp configuration:\n{errors}".format( + errors="\n".join(errors) + ) + ) def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" - if 'ntp' not in cfg: + if "ntp" not in cfg: LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) + "Skipping module named %s, not present or disabled by cfg", name + ) return - ntp_cfg = cfg['ntp'] + ntp_cfg = cfg["ntp"] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package @@ -538,52 +597,61 @@ def handle(name, cfg, cloud, log, _args): if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)) + ) validate_cloudconfig_schema(cfg, schema) # Allow users to explicitly enable/disable - enabled = ntp_cfg.get('enabled', True) + enabled = ntp_cfg.get("enabled", True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration - ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), - cloud.distro) + ntp_client_config = select_ntp_client( + ntp_cfg.get("ntp_client"), cloud.distro + ) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( - [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + [ntp_client_config, ntp_cfg.get("config", {})], reverse=True + ) supplemental_schema_validation(ntp_client_config) - rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + rename_ntp_conf(confpath=ntp_client_config.get("confpath")) template_fn = None - if not ntp_client_config.get('template'): - template_name = ( - ntp_client_config.get('template_name').replace('{distro}', - cloud.distro.name)) + if not ntp_client_config.get("template"): + template_name = ntp_client_config.get("template_name").replace( + "{distro}", cloud.distro.name + ) template_fn = cloud.get_template_filename(template_name) if not template_fn: - msg = ('No template found, not rendering %s' % - ntp_client_config.get('template_name')) + msg = ( + "No template found, not rendering %s" + % ntp_client_config.get("template_name") + ) raise RuntimeError(msg) - write_ntp_config_template(cloud.distro.name, - service_name=ntp_client_config.get( - 'service_name'), - servers=ntp_cfg.get('servers', []), - pools=ntp_cfg.get('pools', []), - path=ntp_client_config.get('confpath'), - template_fn=template_fn, - template=ntp_client_config.get('template')) - - install_ntp_client(cloud.distro.install_packages, - packages=ntp_client_config['packages'], - check_exe=ntp_client_config['check_exe']) + write_ntp_config_template( + cloud.distro.name, + service_name=ntp_client_config.get("service_name"), + servers=ntp_cfg.get("servers", []), + pools=ntp_cfg.get("pools", []), + path=ntp_client_config.get("confpath"), + template_fn=template_fn, + template=ntp_client_config.get("template"), + ) + + install_ntp_client( + cloud.distro.install_packages, + packages=ntp_client_config["packages"], + check_exe=ntp_client_config["check_exe"], + ) try: - cloud.distro.manage_service('reload', - ntp_client_config.get('service_name')) + cloud.distro.manage_service( + "reload", ntp_client_config.get("service_name") + ) except subp.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 036baf85..14cdfab8 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -43,8 +43,7 @@ import os import time from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util REBOOT_FILE = "/var/run/reboot-required" REBOOT_CMD = ["/sbin/reboot"] @@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): log.debug("Rebooted, but still running after %s seconds", int(elapsed)) # If we got here, not good elapsed = time.time() - start - raise RuntimeError(("Reboot did not happen" - " after %s seconds!") % (int(elapsed))) + raise RuntimeError( + "Reboot did not happen after %s seconds!" % (int(elapsed)) + ) def handle(_name, cfg, cloud, log, _args): # Handle the old style + new config names - update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') - upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') - reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', - 'package_reboot_if_required') - pkglist = util.get_cfg_option_list(cfg, 'packages', []) + update = _multi_cfg_bool_get(cfg, "apt_update", "package_update") + upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade") + reboot_if_required = _multi_cfg_bool_get( + cfg, "apt_reboot_if_required", "package_reboot_if_required" + ) + pkglist = util.get_cfg_option_list(cfg, "packages", []) errors = [] if update or len(pkglist) or upgrade: @@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args): reboot_fn_exists = os.path.isfile(REBOOT_FILE) if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: - log.warning("Rebooting after upgrade or install per " - "%s", REBOOT_FILE) + log.warning( + "Rebooting after upgrade or install per %s", REBOOT_FILE + ) # Flush the above warning + anything else out... logging.flushLoggers(log) _fire_reboot(log) @@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args): errors.append(e) if len(errors): - log.warning("%s failed with exceptions, re-raising the last one", - len(errors)) + log.warning( + "%s failed with exceptions, re-raising the last one", len(errors) + ) raise errors[-1] + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 733c3910..cc1fe53e 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,22 +41,19 @@ keys to post. Available keys are: tries: 10 """ -from cloudinit import templater -from cloudinit import url_helper -from cloudinit import util - +from cloudinit import templater, url_helper, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE POST_LIST_ALL = [ - 'pub_key_dsa', - 'pub_key_rsa', - 'pub_key_ecdsa', - 'pub_key_ed25519', - 'instance_id', - 'hostname', - 'fqdn' + "pub_key_dsa", + "pub_key_rsa", + "pub_key_ecdsa", + "pub_key_ed25519", + "instance_id", + "hostname", + "fqdn", ] @@ -74,48 +71,58 @@ def handle(name, cfg, cloud, log, args): if len(args) != 0: ph_cfg = util.read_conf(args[0]) else: - if 'phone_home' not in cfg: - log.debug(("Skipping module named %s, " - "no 'phone_home' configuration found"), name) + if "phone_home" not in cfg: + log.debug( + "Skipping module named %s, " + "no 'phone_home' configuration found", + name, + ) return - ph_cfg = cfg['phone_home'] - - if 'url' not in ph_cfg: - log.warning(("Skipping module named %s, " - "no 'url' found in 'phone_home' configuration"), name) + ph_cfg = cfg["phone_home"] + + if "url" not in ph_cfg: + log.warning( + "Skipping module named %s, " + "no 'url' found in 'phone_home' configuration", + name, + ) return - url = ph_cfg['url'] - post_list = ph_cfg.get('post', 'all') - tries = ph_cfg.get('tries') + url = ph_cfg["url"] + post_list = ph_cfg.get("post", "all") + tries = ph_cfg.get("tries") try: tries = int(tries) except Exception: tries = 10 - util.logexc(log, "Configuration entry 'tries' is not an integer, " - "using %s instead", tries) + util.logexc( + log, + "Configuration entry 'tries' is not an integer, using %s instead", + tries, + ) if post_list == "all": post_list = POST_LIST_ALL all_keys = {} - all_keys['instance_id'] = cloud.get_instance_id() - all_keys['hostname'] = cloud.get_hostname() - all_keys['fqdn'] = cloud.get_hostname(fqdn=True) + all_keys["instance_id"] = cloud.get_instance_id() + all_keys["hostname"] = cloud.get_hostname() + all_keys["fqdn"] = cloud.get_hostname(fqdn=True) pubkeys = { - 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', - 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', - 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', - 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub', + "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub", + "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub", + "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub", + "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", } for (n, path) in pubkeys.items(): try: all_keys[n] = util.load_file(path) except Exception: - util.logexc(log, "%s: failed to open, can not phone home that " - "data!", path) + util.logexc( + log, "%s: failed to open, can not phone home that data!", path + ) submit_keys = {} for k in post_list: @@ -123,28 +130,37 @@ def handle(name, cfg, cloud, log, args): submit_keys[k] = all_keys[k] else: submit_keys[k] = None - log.warning(("Requested key %s from 'post'" - " configuration list not available"), k) + log.warning( + "Requested key %s from 'post'" + " configuration list not available", + k, + ) # Get them read to be posted real_submit_keys = {} for (k, v) in submit_keys.items(): if v is None: - real_submit_keys[k] = 'N/A' + real_submit_keys[k] = "N/A" else: real_submit_keys[k] = str(v) # Incase the url is parameterized url_params = { - 'INSTANCE_ID': all_keys['instance_id'], + "INSTANCE_ID": all_keys["instance_id"], } url = templater.render_string(url, url_params) try: url_helper.read_file_or_url( - url, data=real_submit_keys, retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url, + data=real_submit_keys, + retries=tries, + sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths), + ) except Exception: - util.logexc(log, "Failed to post phone home data to %s in %s tries", - url, tries) + util.logexc( + log, "Failed to post phone home data to %s in %s tries", url, tries + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 5780a7e9..d4eb68c0 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -58,9 +58,8 @@ import re import subprocess import time +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE @@ -75,9 +74,9 @@ def givecmdline(pid): # PID COMM ARGS # 1 init /bin/init -- if util.is_FreeBSD(): - (output, _err) = subp.subp(['procstat', '-c', str(pid)]) + (output, _err) = subp.subp(["procstat", "-c", str(pid)]) line = output.splitlines()[1] - m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) @@ -106,8 +105,9 @@ def check_condition(cond, log=None): return False else: if log: - log.warning(pre + "unexpected exit %s. " % ret + - "do not apply change.") + log.warning( + pre + "unexpected exit %s. " % ret + "do not apply change." + ) return False except Exception as e: if log: @@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args): devnull_fp = open(os.devnull, "w") - log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) + log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, - condition, execmd, [args, devnull_fp]) + util.fork_cb( + run_after_pid_gone, + mypid, + cmdline, + timeout, + log, + condition, + execmd, + [args, devnull_fp], + ) def load_power_state(cfg, distro): # returns a tuple of shutdown_command, timeout # shutdown_command is None if no config found - pstate = cfg.get('power_state') + pstate = cfg.get("power_state") if pstate is None: return (None, None, None) @@ -155,22 +163,25 @@ def load_power_state(cfg, distro): if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") - modes_ok = ['halt', 'poweroff', 'reboot'] + modes_ok = ["halt", "poweroff", "reboot"] mode = pstate.get("mode") if mode not in distro.shutdown_options_map: raise TypeError( - "power_state[mode] required, must be one of: %s. found: '%s'." % - (','.join(modes_ok), mode)) + "power_state[mode] required, must be one of: %s. found: '%s'." + % (",".join(modes_ok), mode) + ) - args = distro.shutdown_command(mode=mode, - delay=pstate.get("delay", "now"), - message=pstate.get("message")) + args = distro.shutdown_command( + mode=mode, + delay=pstate.get("delay", "now"), + message=pstate.get("message"), + ) try: - timeout = float(pstate.get('timeout', 30.0)) + timeout = float(pstate.get("timeout", 30.0)) except ValueError as e: raise ValueError( - "failed to convert timeout '%s' to float." % pstate['timeout'] + "failed to convert timeout '%s' to float." % pstate["timeout"] ) from e condition = pstate.get("condition", True) @@ -186,8 +197,12 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): ret = 1 try: - proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, - stdout=output, stderr=subprocess.STDOUT) + proc = subprocess.Popen( + exe_args, + stdin=subprocess.PIPE, + stdout=output, + stderr=subprocess.STDOUT, + ) proc.communicate(data_in) ret = proc.returncode except Exception: @@ -230,7 +245,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): except Exception as e: fatal("Unexpected Exception: %s" % e) - time.sleep(.25) + time.sleep(0.25) if not msg: fatal("Unexpected error in run_after_pid_gone") @@ -246,4 +261,5 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): func(*args) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index dc20fc44..f51f49bc 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -108,23 +108,20 @@ key (by default the agent will execute with the ``--test`` flag). import os import socket -import yaml from io import StringIO -from cloudinit import helpers -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util -from cloudinit import url_helper +import yaml -AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501 -PUPPET_AGENT_DEFAULT_ARGS = ['--test'] +from cloudinit import helpers, subp, temp_utils, url_helper, util +AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501 +PUPPET_AGENT_DEFAULT_ARGS = ["--test"] -class PuppetConstants(object): - def __init__(self, puppet_conf_file, puppet_ssl_dir, - csr_attributes_path, log): +class PuppetConstants(object): + def __init__( + self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log + ): self.conf_path = puppet_conf_file self.ssl_dir = puppet_ssl_dir self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") @@ -134,18 +131,27 @@ class PuppetConstants(object): def _autostart_puppet(log): # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - subp.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - subp.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + if os.path.exists("/etc/default/puppet"): + subp.subp( + [ + "sed", + "-i", + "-e", + "s/^START=.*/START=yes/", + "/etc/default/puppet", + ], + capture=False, + ) + elif os.path.exists("/bin/systemctl"): + subp.subp( + ["/bin/systemctl", "enable", "puppet.service"], capture=False + ) + elif os.path.exists("/sbin/chkconfig"): + subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False) else: - log.warning(("Sorry we do not know how to enable" - " puppet services on this system")) + log.warning( + "Sorry we do not know how to enable puppet services on this system" + ) def get_config_value(puppet_bin, setting): @@ -153,12 +159,13 @@ def get_config_value(puppet_bin, setting): :param puppet_bin: path to puppet binary :param setting: setting to query """ - out, _ = subp.subp([puppet_bin, 'config', 'print', setting]) + out, _ = subp.subp([puppet_bin, "config", "print", setting]) return out.rstrip() -def install_puppet_aio(url=AIO_INSTALL_URL, version=None, - collection=None, cleanup=True): +def install_puppet_aio( + url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True +): """Install puppet-agent from the puppetlabs repositories using the one-shot shell script @@ -169,62 +176,70 @@ def install_puppet_aio(url=AIO_INSTALL_URL, version=None, """ args = [] if version is not None: - args = ['-v', version] + args = ["-v", version] if collection is not None: - args += ['-c', collection] + args += ["-c", collection] # Purge puppetlabs repos after installation if cleanup: - args += ['--cleanup'] + args += ["--cleanup"] content = url_helper.readurl(url=url, retries=5).contents # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: - tmpf = os.path.join(tmpd, 'puppet-install') + tmpf = os.path.join(tmpd, "puppet-install") util.write_file(tmpf, content, mode=0o700) return subp.subp([tmpf] + args, capture=False) def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything - if 'puppet' not in cfg: - log.debug(("Skipping module named %s," - " no 'puppet' configuration found"), name) + if "puppet" not in cfg: + log.debug( + "Skipping module named %s, no 'puppet' configuration found", name + ) return - puppet_cfg = cfg['puppet'] + puppet_cfg = cfg["puppet"] # Start by installing the puppet package if necessary... - install = util.get_cfg_option_bool(puppet_cfg, 'install', True) - version = util.get_cfg_option_str(puppet_cfg, 'version', None) - collection = util.get_cfg_option_str(puppet_cfg, 'collection', None) + install = util.get_cfg_option_bool(puppet_cfg, "install", True) + version = util.get_cfg_option_str(puppet_cfg, "version", None) + collection = util.get_cfg_option_str(puppet_cfg, "collection", None) install_type = util.get_cfg_option_str( - puppet_cfg, 'install_type', 'packages') - cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) - run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) - start_puppetd = util.get_cfg_option_bool(puppet_cfg, - 'start_service', - default=True) + puppet_cfg, "install_type", "packages" + ) + cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True) + run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False) + start_puppetd = util.get_cfg_option_bool( + puppet_cfg, "start_service", default=True + ) aio_install_url = util.get_cfg_option_str( - puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) + puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL + ) # AIO and distro packages use different paths - if install_type == 'aio': - puppet_user = 'root' - puppet_bin = '/opt/puppetlabs/bin/puppet' - puppet_package = 'puppet-agent' + if install_type == "aio": + puppet_user = "root" + puppet_bin = "/opt/puppetlabs/bin/puppet" + puppet_package = "puppet-agent" else: # default to 'packages' - puppet_user = 'puppet' - puppet_bin = 'puppet' - puppet_package = 'puppet' + puppet_user = "puppet" + puppet_bin = "puppet" + puppet_package = "puppet" package_name = util.get_cfg_option_str( - puppet_cfg, 'package_name', puppet_package) + puppet_cfg, "package_name", puppet_package + ) if not install and version: - log.warning(("Puppet install set to false but version supplied," - " doing nothing.")) + log.warning( + "Puppet install set to false but version supplied, doing nothing." + ) elif install: - log.debug(("Attempting to install puppet %s from %s"), - version if version else 'latest', install_type) + log.debug( + "Attempting to install puppet %s from %s", + version if version else "latest", + install_type, + ) if install_type == "packages": cloud.distro.install_packages((package_name, version)) @@ -235,17 +250,21 @@ def handle(name, cfg, cloud, log, _args): run = False conf_file = util.get_cfg_option_str( - puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config')) + puppet_cfg, "conf_file", get_config_value(puppet_bin, "config") + ) ssl_dir = util.get_cfg_option_str( - puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir')) + puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir") + ) csr_attributes_path = util.get_cfg_option_str( - puppet_cfg, 'csr_attributes_path', - get_config_value(puppet_bin, 'csr_attributes')) + puppet_cfg, + "csr_attributes_path", + get_config_value(puppet_bin, "csr_attributes"), + ) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration - if 'conf' in puppet_cfg: + if "conf" in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values @@ -254,30 +273,31 @@ def handle(name, cfg, cloud, log, _args): # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] - cleaned_contents = '\n'.join(cleaned_lines) + cleaned_contents = "\n".join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.read_file( - StringIO(cleaned_contents), - source=p_constants.conf_path) - for (cfg_name, cfg) in puppet_cfg['conf'].items(): + StringIO(cleaned_contents), source=p_constants.conf_path + ) + for (cfg_name, cfg) in puppet_cfg["conf"].items(): # Cert configuration is a special case # Dump the puppetserver ca certificate in the correct place - if cfg_name == 'ca_cert': + if cfg_name == "ca_cert": # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_dir, puppet_user, "root") util.ensure_dir(p_constants.ssl_cert_dir) - util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root") util.write_file(p_constants.ssl_cert_path, cfg) - util.chownbyname(p_constants.ssl_cert_path, - puppet_user, 'root') + util.chownbyname( + p_constants.ssl_cert_path, puppet_user, "root" + ) else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): - if o == 'certname': + if o == "certname": # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) @@ -288,14 +308,16 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - util.rename(p_constants.conf_path, "%s.old" - % (p_constants.conf_path)) + util.rename( + p_constants.conf_path, "%s.old" % (p_constants.conf_path) + ) util.write_file(p_constants.conf_path, puppet_config.stringify()) - if 'csr_attributes' in puppet_cfg: - util.write_file(p_constants.csr_attributes_path, - yaml.dump(puppet_cfg['csr_attributes'], - default_flow_style=False)) + if "csr_attributes" in puppet_cfg: + util.write_file( + p_constants.csr_attributes_path, + yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False), + ) # Set it up so it autostarts if start_puppetd: @@ -303,18 +325,21 @@ def handle(name, cfg, cloud, log, _args): # Run the agent if needed if run: - log.debug('Running puppet-agent') - cmd = [puppet_bin, 'agent'] - if 'exec_args' in puppet_cfg: - cmd_args = puppet_cfg['exec_args'] + log.debug("Running puppet-agent") + cmd = [puppet_bin, "agent"] + if "exec_args" in puppet_cfg: + cmd_args = puppet_cfg["exec_args"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.extend(cmd_args.split()) else: - log.warning("Unknown type %s provided for puppet" - " 'exec_args' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for puppet" + " 'exec_args' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) else: cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) @@ -322,6 +347,7 @@ def handle(name, cfg, cloud, log, _args): if start_puppetd: # Start puppetd - subp.subp(['service', 'puppet', 'start'], capture=False) + subp.subp(["service", "puppet", "start"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py index d5e0ecb2..87be5348 100644 --- a/cloudinit/config/cc_refresh_rmc_and_interface.py +++ b/cloudinit/config/cc_refresh_rmc_and_interface.py @@ -34,20 +34,18 @@ This module handles """ +import errno + from cloudinit import log as logging +from cloudinit import netinfo, subp, util from cloudinit.settings import PER_ALWAYS -from cloudinit import util -from cloudinit import subp -from cloudinit import netinfo - -import errno frequency = PER_ALWAYS LOG = logging.getLogger(__name__) # Ensure that /opt/rsct/bin has been added to standard PATH of the # distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl . -RMCCTRL = 'rmcctrl' +RMCCTRL = "rmcctrl" def handle(name, _cfg, _cloud, _log, _args): @@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args): return LOG.debug( - 'Making the IPv6 up explicitly. ' - 'Ensuring IPv6 interface is not being handled by NetworkManager ' - 'and it is restarted to re-establish the communication with ' - 'the hypervisor') + "Making the IPv6 up explicitly. " + "Ensuring IPv6 interface is not being handled by NetworkManager " + "and it is restarted to re-establish the communication with " + "the hypervisor" + ) ifaces = find_ipv6_ifaces() @@ -80,7 +79,7 @@ def find_ipv6_ifaces(): ifaces = [] for iface, data in info.items(): if iface == "lo": - LOG.debug('Skipping localhost interface') + LOG.debug("Skipping localhost interface") if len(data.get("ipv4", [])) != 0: # skip this interface, as it has ipv4 addrs continue @@ -92,16 +91,16 @@ def refresh_ipv6(interface): # IPv6 interface is explicitly brought up, subsequent to which the # RMC services are restarted to re-establish the communication with # the hypervisor. - subp.subp(['ip', 'link', 'set', interface, 'down']) - subp.subp(['ip', 'link', 'set', interface, 'up']) + subp.subp(["ip", "link", "set", interface, "down"]) + subp.subp(["ip", "link", "set", interface, "up"]) def sysconfig_path(iface): - return '/etc/sysconfig/network-scripts/ifcfg-' + iface + return "/etc/sysconfig/network-scripts/ifcfg-" + iface def restart_network_manager(): - subp.subp(['systemctl', 'restart', 'NetworkManager']) + subp.subp(["systemctl", "restart", "NetworkManager"]) def disable_ipv6(iface_file): @@ -113,12 +112,11 @@ def disable_ipv6(iface_file): contents = util.load_file(iface_file) except IOError as e: if e.errno == errno.ENOENT: - LOG.debug("IPv6 interface file %s does not exist\n", - iface_file) + LOG.debug("IPv6 interface file %s does not exist\n", iface_file) else: raise e - if 'IPV6INIT' not in contents: + if "IPV6INIT" not in contents: LOG.debug("Interface file %s did not have IPV6INIT", iface_file) return @@ -135,11 +133,12 @@ def disable_ipv6(iface_file): def search(contents): # Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file. - return( - contents.startswith("IPV6ADDR") or - contents.startswith("IPADDR6") or - contents.startswith("IPV6INIT") or - contents.startswith("NM_CONTROLLED")) + return ( + contents.startswith("IPV6ADDR") + or contents.startswith("IPADDR6") + or contents.startswith("IPV6INIT") + or contents.startswith("NM_CONTROLLED") + ) def refresh_rmc(): @@ -152,8 +151,8 @@ def refresh_rmc(): # until the subsystem and all resource managers are stopped. # -s : start Resource Monitoring & Control subsystem. try: - subp.subp([RMCCTRL, '-z']) - subp.subp([RMCCTRL, '-s']) + subp.subp([RMCCTRL, "-z"]) + subp.subp([RMCCTRL, "-s"]) except Exception: - util.logexc(LOG, 'Failed to refresh the RMC subsystem.') + util.logexc(LOG, "Failed to refresh the RMC subsystem.") raise diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py index 1cd72774..3b929903 100644 --- a/cloudinit/config/cc_reset_rmc.py +++ b/cloudinit/config/cc_reset_rmc.py @@ -39,9 +39,8 @@ Prerequisite of using this module is to install RSCT packages. import os from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp frequency = PER_INSTANCE @@ -49,34 +48,34 @@ frequency = PER_INSTANCE # The symlink for RMCCTRL and RECFGCT are # /usr/sbin/rsct/bin/rmcctrl and # /usr/sbin/rsct/install/bin/recfgct respectively. -RSCT_PATH = '/opt/rsct/install/bin' -RMCCTRL = 'rmcctrl' -RECFGCT = 'recfgct' +RSCT_PATH = "/opt/rsct/install/bin" +RMCCTRL = "rmcctrl" +RECFGCT = "recfgct" LOG = logging.getLogger(__name__) -NODE_ID_FILE = '/etc/ct_node_id' +NODE_ID_FILE = "/etc/ct_node_id" def handle(name, _cfg, cloud, _log, _args): # Ensuring node id has to be generated only once during first boot - if cloud.datasource.platform_type == 'none': - LOG.debug('Skipping creation of new ct_node_id node') + if cloud.datasource.platform_type == "none": + LOG.debug("Skipping creation of new ct_node_id node") return if not os.path.isdir(RSCT_PATH): LOG.debug("module disabled, RSCT_PATH not present") return - orig_path = os.environ.get('PATH') + orig_path = os.environ.get("PATH") try: add_path(orig_path) reset_rmc() finally: if orig_path: - os.environ['PATH'] = orig_path + os.environ["PATH"] = orig_path else: - del os.environ['PATH'] + del os.environ["PATH"] def reconfigure_rsct_subsystems(): @@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems(): LOG.debug(out.strip()) return out except subp.ProcessExecutionError: - util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.') + util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.") raise def get_node_id(): try: fp = util.load_file(NODE_ID_FILE) - node_id = fp.split('\n')[0] + node_id = fp.split("\n")[0] return node_id except Exception: - util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE) + util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE) raise @@ -107,25 +106,25 @@ def add_path(orig_path): # So thet cloud init automatically find and # run RECFGCT to create new node_id. suff = ":" + orig_path if orig_path else "" - os.environ['PATH'] = RSCT_PATH + suff - return os.environ['PATH'] + os.environ["PATH"] = RSCT_PATH + suff + return os.environ["PATH"] def rmcctrl(): # Stop the RMC subsystem and all resource managers so that we can make # some changes to it try: - return subp.subp([RMCCTRL, '-z']) + return subp.subp([RMCCTRL, "-z"]) except Exception: - util.logexc(LOG, 'Failed to stop the RMC subsystem.') + util.logexc(LOG, "Failed to stop the RMC subsystem.") raise def reset_rmc(): - LOG.debug('Attempting to reset RMC.') + LOG.debug("Attempting to reset RMC.") node_id_before = get_node_id() - LOG.debug('Node ID at beginning of module: %s', node_id_before) + LOG.debug("Node ID at beginning of module: %s", node_id_before) # Stop the RMC subsystem and all resource managers so that we can make # some changes to it @@ -133,11 +132,11 @@ def reset_rmc(): reconfigure_rsct_subsystems() node_id_after = get_node_id() - LOG.debug('Node ID at end of module: %s', node_id_after) + LOG.debug("Node ID at end of module: %s", node_id_after) # Check if new node ID is generated or not # by comparing old and new node ID if node_id_after == node_id_before: - msg = 'New node ID did not get generated.' + msg = "New node ID did not get generated." LOG.error(msg) raise Exception(msg) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 00bb7ae7..b009c392 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,21 +13,21 @@ import os import stat from textwrap import dedent +from cloudinit import subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import util NOBLOCK = "noblock" frequency = PER_ALWAYS -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_resizefs', - 'name': 'Resizefs', - 'title': 'Resize filesystem', - 'description': dedent("""\ + "id": "cc_resizefs", + "name": "Resizefs", + "title": "Resize filesystem", + "description": dedent( + """\ Resize a filesystem to use all avaliable space on partition. This module is useful along with ``cc_growpart`` and will ensure that if the root partition has been resized the root filesystem will be resized @@ -36,22 +36,26 @@ meta = { running. Optionally, the resize operation can be performed in the background while cloud-init continues running modules. This can be enabled by setting ``resize_rootfs`` to ``true``. This module can be - disabled altogether by setting ``resize_rootfs`` to ``false``."""), - 'distros': distros, - 'examples': [ - 'resize_rootfs: false # disable root filesystem resize operation'], - 'frequency': PER_ALWAYS, + disabled altogether by setting ``resize_rootfs`` to ``false``.""" + ), + "distros": distros, + "examples": [ + "resize_rootfs: false # disable root filesystem resize operation" + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'resize_rootfs': { - 'enum': [True, False, NOBLOCK], - 'description': dedent("""\ - Whether to resize the root partition. Default: 'true'""") + "type": "object", + "properties": { + "resize_rootfs": { + "enum": [True, False, NOBLOCK], + "description": dedent( + """\ + Whether to resize the root partition. Default: 'true'""" + ), } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -63,32 +67,38 @@ def _resize_btrfs(mount_point, devpth): # Use a subvolume that is not ro to trick the resize operation to do the # "right" thing. The use of ".snapshot" is specific to "snapper" a generic # solution would be walk the subvolumes and find a rw mounted subvolume. - if (not util.mount_is_read_write(mount_point) and - os.path.isdir("%s/.snapshots" % mount_point)): - return ('btrfs', 'filesystem', 'resize', 'max', - '%s/.snapshots' % mount_point) + if not util.mount_is_read_write(mount_point) and os.path.isdir( + "%s/.snapshots" % mount_point + ): + return ( + "btrfs", + "filesystem", + "resize", + "max", + "%s/.snapshots" % mount_point, + ) else: - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + return ("btrfs", "filesystem", "resize", "max", mount_point) def _resize_ext(mount_point, devpth): - return ('resize2fs', devpth) + return ("resize2fs", devpth) def _resize_xfs(mount_point, devpth): - return ('xfs_growfs', mount_point) + return ("xfs_growfs", mount_point) def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', mount_point) + return ("growfs", "-y", mount_point) def _resize_zfs(mount_point, devpth): - return ('zpool', 'online', '-e', mount_point, devpth) + return ("zpool", "online", "-e", mount_point, devpth) def _resize_hammer2(mount_point, devpth): - return ('hammer2', 'growfs', mount_point) + return ("hammer2", "growfs", mount_point) def _can_skip_resize_ufs(mount_point, devpth): @@ -100,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): # growfs exits with 1 for almost all cases up to this one. # This means we can't just use rcs=[0, 1] as subp parameter: try: - subp.subp(['growfs', '-N', devpth]) + subp.subp(["growfs", "-N", devpth]) except subp.ProcessExecutionError as e: if e.stderr.startswith(skip_start) and skip_contain in e.stderr: # This FS is already at the desired size @@ -114,17 +124,15 @@ def _can_skip_resize_ufs(mount_point, devpth): # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. RESIZE_FS_PREFIXES_CMDS = [ - ('btrfs', _resize_btrfs), - ('ext', _resize_ext), - ('xfs', _resize_xfs), - ('ufs', _resize_ufs), - ('zfs', _resize_zfs), - ('hammer2', _resize_hammer2), + ("btrfs", _resize_btrfs), + ("ext", _resize_ext), + ("xfs", _resize_xfs), + ("ufs", _resize_ufs), + ("zfs", _resize_zfs), + ("hammer2", _resize_hammer2), ] -RESIZE_FS_PRECHECK_CMDS = { - 'ufs': _can_skip_resize_ufs -} +RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs} def can_skip_resize(fs_type, resize_what, devpth): @@ -148,52 +156,66 @@ def maybe_get_writable_device_path(devpath, info, log): container = util.is_container() # Ensure the path is a block device. - if (devpath == "/dev/root" and not os.path.exists(devpath) and - not container): + if ( + devpath == "/dev/root" + and not os.path.exists(devpath) + and not container + ): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: log.warning("Unable to find device '/dev/root'") return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) - if devpath == 'overlayroot': + if devpath == "overlayroot": log.debug("Not attempting to resize devpath '%s': %s", devpath, info) return None # FreeBSD zpool can also just use gpt/