diff options
author | James Falcon <james.falcon@canonical.com> | 2021-12-15 20:16:38 -0600 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-12-15 19:16:38 -0700 |
commit | bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf (patch) | |
tree | 1fbb3269fc87e39832e3286ef42eefd2b23fcd44 /tests | |
parent | 2bcf4fa972fde686c2e3141c58e640640b44dd00 (diff) | |
download | vyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.tar.gz vyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.zip |
Adopt Black and isort (SC-700) (#1157)
Applied Black and isort, fixed any linting issues, updated tox.ini
and CI.
Diffstat (limited to 'tests')
207 files changed, 28027 insertions, 20103 deletions
diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py index e1d4cd28..81f9b02f 100644 --- a/tests/integration_tests/__init__.py +++ b/tests/integration_tests/__init__.py @@ -7,6 +7,8 @@ def random_mac_address() -> str: The MAC address will have a 1 in its least significant bit, indicating it to be a locally administered address. """ - return "02:00:00:%02x:%02x:%02x" % (random.randint(0, 255), - random.randint(0, 255), - random.randint(0, 255)) + return "02:00:00:%02x:%02x:%02x" % ( + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + ) diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py index 534cfb9a..ddc74503 100644 --- a/tests/integration_tests/bugs/test_gh570.py +++ b/tests/integration_tests/bugs/test_gh570.py @@ -4,9 +4,10 @@ Test that we can add optional vendor-data to the seedfrom file in a NoCloud environment """ -from tests.integration_tests.instances import IntegrationInstance import pytest +from tests.integration_tests.instances import IntegrationInstance + VENDOR_DATA = """\ #cloud-config runcmd: @@ -19,7 +20,7 @@ runcmd: @pytest.mark.lxd_container @pytest.mark.lxd_vm def test_nocloud_seedfrom_vendordata(client: IntegrationInstance): - seed_dir = '/var/tmp/test_seed_dir' + seed_dir = "/var/tmp/test_seed_dir" result = client.execute( "mkdir {seed_dir} && " "touch {seed_dir}/user-data && " @@ -30,10 +31,10 @@ def test_nocloud_seedfrom_vendordata(client: IntegrationInstance): assert result.return_code == 0 client.write_to_file( - '{}/vendor-data'.format(seed_dir), + "{}/vendor-data".format(seed_dir), VENDOR_DATA, ) - client.execute('cloud-init clean --logs') + client.execute("cloud-init clean --logs") client.restart() - assert client.execute('cloud-init status').ok - assert 'seeded_vendordata_test_file' in client.execute('ls /var/tmp') + assert client.execute("cloud-init status").ok + assert "seeded_vendordata_test_file" in client.execute("ls /var/tmp") diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py index dba01b34..7c720143 100644 --- a/tests/integration_tests/bugs/test_gh626.py +++ b/tests/integration_tests/bugs/test_gh626.py @@ -11,7 +11,6 @@ from tests.integration_tests import random_mac_address from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance - MAC_ADDRESS = random_mac_address() NETWORK_CONFIG = """\ version: 2 @@ -21,7 +20,9 @@ ethernets: wakeonlan: true match: macaddress: {} -""".format(MAC_ADDRESS) +""".format( + MAC_ADDRESS +) EXPECTED_ENI_END = """\ iface eth0 inet dhcp @@ -31,17 +32,19 @@ iface eth0 inet dhcp @pytest.mark.sru_2020_11 @pytest.mark.lxd_container @pytest.mark.lxd_vm -@pytest.mark.lxd_config_dict({ - 'user.network-config': NETWORK_CONFIG, - "volatile.eth0.hwaddr": MAC_ADDRESS, -}) +@pytest.mark.lxd_config_dict( + { + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, + } +) def test_wakeonlan(client: IntegrationInstance): - if ImageSpecification.from_os_image().release == 'xenial': - eni = client.execute('cat /etc/network/interfaces.d/50-cloud-init.cfg') + if ImageSpecification.from_os_image().release == "xenial": + eni = client.execute("cat /etc/network/interfaces.d/50-cloud-init.cfg") assert eni.endswith(EXPECTED_ENI_END) return - netplan_cfg = client.execute('cat /etc/netplan/50-cloud-init.yaml') + netplan_cfg = client.execute("cat /etc/netplan/50-cloud-init.yaml") netplan_yaml = yaml.safe_load(netplan_cfg) - assert 'wakeonlan' in netplan_yaml['network']['ethernets']['eth0'] - assert netplan_yaml['network']['ethernets']['eth0']['wakeonlan'] is True + assert "wakeonlan" in netplan_yaml["network"]["ethernets"]["eth0"] + assert netplan_yaml["network"]["ethernets"]["eth0"]["wakeonlan"] is True diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py index f3702a2e..c7a897c6 100644 --- a/tests/integration_tests/bugs/test_gh632.py +++ b/tests/integration_tests/bugs/test_gh632.py @@ -14,18 +14,20 @@ from tests.integration_tests.util import verify_clean_log @pytest.mark.lxd_vm def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): client.write_to_file( - '/etc/cloud/cloud.cfg.d/90_dpkg.cfg', - 'datasource_list: [ RbxCloud, NoCloud ]\n', + "/etc/cloud/cloud.cfg.d/90_dpkg.cfg", + "datasource_list: [ RbxCloud, NoCloud ]\n", ) client.write_to_file( - '/etc/cloud/ds-identify.cfg', - 'policy: enabled\n', + "/etc/cloud/ds-identify.cfg", + "policy: enabled\n", ) - client.execute('cloud-init clean --logs') + client.execute("cloud-init clean --logs") client.restart() - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) - assert 'Failed to load metadata and userdata' not in log - assert ("Getting data from <class 'cloudinit.sources.DataSourceRbxCloud." - "DataSourceRbxCloud'> failed") not in log + assert "Failed to load metadata and userdata" not in log + assert ( + "Getting data from <class 'cloudinit.sources.DataSourceRbxCloud." + "DataSourceRbxCloud'> failed" not in log + ) diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py index ce57052e..95edb48d 100644 --- a/tests/integration_tests/bugs/test_gh668.py +++ b/tests/integration_tests/bugs/test_gh668.py @@ -10,7 +10,6 @@ import pytest from tests.integration_tests import random_mac_address from tests.integration_tests.instances import IntegrationInstance - DESTINATION_IP = "172.16.0.10" GATEWAY_IP = "10.0.0.100" MAC_ADDRESS = random_mac_address() @@ -26,17 +25,21 @@ ethernets: via: {} match: macaddress: {} -""".format(DESTINATION_IP, GATEWAY_IP, MAC_ADDRESS) +""".format( + DESTINATION_IP, GATEWAY_IP, MAC_ADDRESS +) EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP) @pytest.mark.lxd_container @pytest.mark.lxd_vm -@pytest.mark.lxd_config_dict({ - "user.network-config": NETWORK_CONFIG, - "volatile.eth0.hwaddr": MAC_ADDRESS, -}) +@pytest.mark.lxd_config_dict( + { + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, + } +) @pytest.mark.lxd_use_exec def test_static_route_to_host(client: IntegrationInstance): route = client.execute("ip route | grep {}".format(DESTINATION_IP)) diff --git a/tests/integration_tests/bugs/test_gh671.py b/tests/integration_tests/bugs/test_gh671.py index 5e90cdda..15f204ee 100644 --- a/tests/integration_tests/bugs/test_gh671.py +++ b/tests/integration_tests/bugs/test_gh671.py @@ -11,13 +11,13 @@ import pytest from tests.integration_tests.clouds import IntegrationCloud -OLD_PASSWORD = 'DoIM33tTheComplexityRequirements!??' -NEW_PASSWORD = 'DoIM33tTheComplexityRequirementsNow!??' +OLD_PASSWORD = "DoIM33tTheComplexityRequirements!??" +NEW_PASSWORD = "DoIM33tTheComplexityRequirementsNow!??" def _check_password(instance, unhashed_password): - shadow_password = instance.execute('getent shadow ubuntu').split(':')[1] - salt = shadow_password.rsplit('$', 1)[0] + shadow_password = instance.execute("getent shadow ubuntu").split(":")[1] + salt = shadow_password.rsplit("$", 1)[0] hashed_password = crypt.crypt(unhashed_password, salt) assert shadow_password == hashed_password @@ -26,29 +26,28 @@ def _check_password(instance, unhashed_password): @pytest.mark.sru_2020_11 def test_update_default_password(setup_image, session_cloud: IntegrationCloud): os_profile = { - 'os_profile': { - 'admin_password': '', - 'linux_configuration': { - 'disable_password_authentication': False - } + "os_profile": { + "admin_password": "", + "linux_configuration": {"disable_password_authentication": False}, } } - os_profile['os_profile']['admin_password'] = OLD_PASSWORD - instance1 = session_cloud.launch(launch_kwargs={'vm_params': os_profile}) + os_profile["os_profile"]["admin_password"] = OLD_PASSWORD + instance1 = session_cloud.launch(launch_kwargs={"vm_params": os_profile}) _check_password(instance1, OLD_PASSWORD) snapshot_id = instance1.cloud.cloud_instance.snapshot( - instance1.instance, - delete_provisioned_user=False + instance1.instance, delete_provisioned_user=False ) - os_profile['os_profile']['admin_password'] = NEW_PASSWORD + os_profile["os_profile"]["admin_password"] = NEW_PASSWORD try: - with session_cloud.launch(launch_kwargs={ - 'image_id': snapshot_id, - 'vm_params': os_profile, - }) as instance2: + with session_cloud.launch( + launch_kwargs={ + "image_id": snapshot_id, + "vm_params": os_profile, + } + ) as instance2: _check_password(instance2, NEW_PASSWORD) finally: session_cloud.cloud_instance.delete_image(snapshot_id) diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py index 1119d461..a62e8b36 100644 --- a/tests/integration_tests/bugs/test_gh868.py +++ b/tests/integration_tests/bugs/test_gh868.py @@ -4,7 +4,6 @@ import pytest from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import verify_clean_log - USERDATA = """\ #cloud-config chef: @@ -24,5 +23,5 @@ chef: @pytest.mark.lxd_vm @pytest.mark.user_data(USERDATA) def test_chef_license(client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py index 27d41c2b..451a9972 100644 --- a/tests/integration_tests/bugs/test_lp1813396.py +++ b/tests/integration_tests/bugs/test_lp1813396.py @@ -8,7 +8,6 @@ import pytest from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import verify_ordered_items_in_text - USER_DATA = """\ #cloud-config apt: @@ -23,7 +22,7 @@ apt: @pytest.mark.sru_2020_11 @pytest.mark.user_data(USER_DATA) def test_gpg_no_tty(client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") to_verify = [ "Running command ['gpg', '--no-tty', " "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] " diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py index 732f2179..a800eab4 100644 --- a/tests/integration_tests/bugs/test_lp1835584.py +++ b/tests/integration_tests/bugs/test_lp1835584.py @@ -31,12 +31,9 @@ import re import pytest -from tests.integration_tests.instances import IntegrationAzureInstance -from tests.integration_tests.clouds import ( - ImageSpecification, IntegrationCloud -) +from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud from tests.integration_tests.conftest import get_validated_source - +from tests.integration_tests.instances import IntegrationAzureInstance IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = ( "Canonical:0001-com-ubuntu-pro-bionic-fips:pro-fips-18_04:18.04.202010201" @@ -44,14 +41,12 @@ IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = ( def _check_iid_insensitive_across_kernel_upgrade( - instance: IntegrationAzureInstance + instance: IntegrationAzureInstance, ): uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid") - assert uuid.isupper(), ( - "Expected uppercase UUID on Ubuntu FIPS image {}".format( - uuid - ) - ) + assert ( + uuid.isupper() + ), "Expected uppercase UUID on Ubuntu FIPS image {}".format(uuid) orig_kernel = instance.execute("uname -r").strip() assert "azure-fips" in orig_kernel result = instance.execute("apt-get update") @@ -80,7 +75,7 @@ def _check_iid_insensitive_across_kernel_upgrade( @pytest.mark.azure @pytest.mark.sru_next def test_azure_kernel_upgrade_case_insensitive_uuid( - session_cloud: IntegrationCloud + session_cloud: IntegrationCloud, ): cfg_image_spec = ImageSpecification.from_os_image() if (cfg_image_spec.os, cfg_image_spec.release) != ("ubuntu", "bionic"): diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py index 6dd61222..d56ca320 100644 --- a/tests/integration_tests/bugs/test_lp1886531.py +++ b/tests/integration_tests/bugs/test_lp1886531.py @@ -13,7 +13,6 @@ import pytest from tests.integration_tests.util import verify_clean_log - USER_DATA = """\ #cloud-config bootcmd: @@ -22,7 +21,6 @@ bootcmd: class TestLp1886531: - @pytest.mark.user_data(USER_DATA) def test_lp1886531(self, client): log_content = client.read_from_file("/var/log/cloud-init.log") diff --git a/tests/integration_tests/bugs/test_lp1897099.py b/tests/integration_tests/bugs/test_lp1897099.py index 27c8927f..876a2887 100644 --- a/tests/integration_tests/bugs/test_lp1897099.py +++ b/tests/integration_tests/bugs/test_lp1897099.py @@ -7,7 +7,6 @@ https://bugs.launchpad.net/cloud-init/+bug/1897099 import pytest - USER_DATA = """\ #cloud-config bootcmd: @@ -21,11 +20,11 @@ swap: @pytest.mark.sru_2020_11 @pytest.mark.user_data(USER_DATA) -@pytest.mark.no_container('Containers cannot configure swap') +@pytest.mark.no_container("Containers cannot configure swap") def test_fallocate_fallback(client): - log = client.read_from_file('/var/log/cloud-init.log') - assert '/swap.img' in client.execute('cat /proc/swaps') - assert '/swap.img' in client.execute('cat /etc/fstab') - assert 'fallocate swap creation failed, will attempt with dd' in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "/swap.img" in client.execute("cat /proc/swaps") + assert "/swap.img" in client.execute("cat /etc/fstab") + assert "fallocate swap creation failed, will attempt with dd" in log assert "Running command ['dd', 'if=/dev/zero', 'of=/swap.img'" in log - assert 'SUCCESS: config-mounts ran successfully' in log + assert "SUCCESS: config-mounts ran successfully" in log diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index 909bc690..115bd34f 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -33,13 +33,17 @@ ethernets: match: macaddress: {} version: 2 -""".format(MAC_ADDRESS) +""".format( + MAC_ADDRESS +) -@pytest.mark.lxd_config_dict({ - "user.network-config": NETWORK_CONFIG, - "volatile.eth0.hwaddr": MAC_ADDRESS, -}) +@pytest.mark.lxd_config_dict( + { + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, + } +) @pytest.mark.lxd_vm @pytest.mark.lxd_use_exec @pytest.mark.not_bionic diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py index fcc2b751..3df10883 100644 --- a/tests/integration_tests/bugs/test_lp1900837.py +++ b/tests/integration_tests/bugs/test_lp1900837.py @@ -23,7 +23,7 @@ class TestLogPermissionsNotResetOnReboot: # Reboot client.restart() - assert client.execute('cloud-init status').ok + assert client.execute("cloud-init status").ok # Check that permissions are not reset on reboot assert "600" == _get_log_perms(client) diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py index 2b47f0a8..7de8bd77 100644 --- a/tests/integration_tests/bugs/test_lp1901011.py +++ b/tests/integration_tests/bugs/test_lp1901011.py @@ -10,12 +10,16 @@ from tests.integration_tests.clouds import IntegrationCloud @pytest.mark.azure -@pytest.mark.parametrize('instance_type,is_ephemeral', [ - ('Standard_DS1_v2', True), - ('Standard_D2s_v4', False), -]) -def test_ephemeral(instance_type, is_ephemeral, - session_cloud: IntegrationCloud, setup_image): +@pytest.mark.parametrize( + "instance_type,is_ephemeral", + [ + ("Standard_DS1_v2", True), + ("Standard_D2s_v4", False), + ], +) +def test_ephemeral( + instance_type, is_ephemeral, session_cloud: IntegrationCloud, setup_image +): if is_ephemeral: expected_log = ( "Ephemeral resource disk '/dev/disk/cloud/azure_resource' exists. " @@ -29,30 +33,35 @@ def test_ephemeral(instance_type, is_ephemeral, ) with session_cloud.launch( - launch_kwargs={'instance_type': instance_type} + launch_kwargs={"instance_type": instance_type} ) as client: # Verify log file - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") assert expected_log in log # Verify devices - dev_links = client.execute('ls /dev/disk/cloud') - assert 'azure_root' in dev_links - assert 'azure_root-part1' in dev_links + dev_links = client.execute("ls /dev/disk/cloud") + assert "azure_root" in dev_links + assert "azure_root-part1" in dev_links if is_ephemeral: - assert 'azure_resource' in dev_links - assert 'azure_resource-part1' in dev_links + assert "azure_resource" in dev_links + assert "azure_resource-part1" in dev_links # Verify mounts - blks = client.execute('lsblk -pPo NAME,TYPE,MOUNTPOINT') + blks = client.execute("lsblk -pPo NAME,TYPE,MOUNTPOINT") root_device = client.execute( - 'realpath /dev/disk/cloud/azure_root-part1' + "realpath /dev/disk/cloud/azure_root-part1" + ) + assert ( + 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format(root_device) in blks ) - assert 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format( - root_device) in blks if is_ephemeral: ephemeral_device = client.execute( - 'realpath /dev/disk/cloud/azure_resource-part1' + "realpath /dev/disk/cloud/azure_resource-part1" + ) + assert ( + 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format( + ephemeral_device + ) + in blks ) - assert 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format( - ephemeral_device) in blks diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py index 87f92d5e..ddd996f9 100644 --- a/tests/integration_tests/bugs/test_lp1910835.py +++ b/tests/integration_tests/bugs/test_lp1910835.py @@ -19,7 +19,6 @@ will match. """ import pytest - USER_DATA_TMPL = """\ #cloud-config ssh_authorized_keys: diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py index efafae50..55511ed2 100644 --- a/tests/integration_tests/bugs/test_lp1912844.py +++ b/tests/integration_tests/bugs/test_lp1912844.py @@ -51,7 +51,9 @@ vlans: id: 200 link: ovs-br mtu: 1500 -""".format(MAC_ADDRESS) +""".format( + MAC_ADDRESS +) SETUP_USER_DATA = """\ diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index dee2adff..d4670bac 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -1,17 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. -from abc import ABC, abstractmethod import datetime import logging import os.path import random import string +from abc import ABC, abstractmethod from uuid import UUID from pycloudlib import ( EC2, GCE, - Azure, OCI, + Azure, LXDContainer, LXDVirtualMachine, Openstack, @@ -19,14 +19,15 @@ from pycloudlib import ( from pycloudlib.lxd.instance import LXDInstance import cloudinit -from cloudinit.subp import subp, ProcessExecutionError +from cloudinit.subp import ProcessExecutionError, subp from tests.integration_tests import integration_settings from tests.integration_tests.instances import ( + IntegrationAzureInstance, IntegrationEc2Instance, IntegrationGceInstance, - IntegrationAzureInstance, IntegrationInstance, - IntegrationOciInstance, + IntegrationInstance, IntegrationLxdInstance, + IntegrationOciInstance, ) from tests.integration_tests.util import emit_dots_on_travis @@ -36,7 +37,7 @@ except ImportError: pass -log = logging.getLogger('integration_testing') +log = logging.getLogger("integration_testing") def _get_ubuntu_series() -> list: @@ -149,41 +150,48 @@ class IntegrationCloud(ABC): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) return pycloudlib_instance - def launch(self, user_data=None, launch_kwargs=None, - settings=integration_settings, **kwargs): + def launch( + self, + user_data=None, + launch_kwargs=None, + settings=integration_settings, + **kwargs + ): if launch_kwargs is None: launch_kwargs = {} if self.settings.EXISTING_INSTANCE_ID: log.info( - 'Not launching instance due to EXISTING_INSTANCE_ID. ' - 'Instance id: %s', self.settings.EXISTING_INSTANCE_ID) + "Not launching instance due to EXISTING_INSTANCE_ID. " + "Instance id: %s", + self.settings.EXISTING_INSTANCE_ID, + ) self.instance = self.cloud_instance.get_instance( self.settings.EXISTING_INSTANCE_ID ) return self.instance default_launch_kwargs = { - 'image_id': self.image_id, - 'user_data': user_data, + "image_id": self.image_id, + "user_data": user_data, } launch_kwargs = {**default_launch_kwargs, **launch_kwargs} log.info( "Launching instance with launch_kwargs:\n%s", - "\n".join("{}={}".format(*item) for item in launch_kwargs.items()) + "\n".join("{}={}".format(*item) for item in launch_kwargs.items()), ) with emit_dots_on_travis(): pycloudlib_instance = self._perform_launch(launch_kwargs, **kwargs) - log.info('Launched instance: %s', pycloudlib_instance) + log.info("Launched instance: %s", pycloudlib_instance) instance = self.get_instance(pycloudlib_instance, settings) - if launch_kwargs.get('wait', True): + if launch_kwargs.get("wait", True): # If we aren't waiting, we can't rely on command execution here log.info( - 'cloud-init version: %s', - instance.execute("cloud-init --version") + "cloud-init version: %s", + instance.execute("cloud-init --version"), ) serial = instance.execute("grep serial /etc/cloud/build.info") if serial: - log.info('image serial: %s', serial.split()[1]) + log.info("image serial: %s", serial.split()[1]) return instance def get_instance(self, cloud_instance, settings=integration_settings): @@ -199,66 +207,70 @@ class IntegrationCloud(ABC): if self.snapshot_id: if self.settings.KEEP_IMAGE: log.info( - 'NOT deleting snapshot image created for this testrun ' - 'because KEEP_IMAGE is True: %s', self.snapshot_id) + "NOT deleting snapshot image created for this testrun " + "because KEEP_IMAGE is True: %s", + self.snapshot_id, + ) else: log.info( - 'Deleting snapshot image created for this testrun: %s', - self.snapshot_id + "Deleting snapshot image created for this testrun: %s", + self.snapshot_id, ) self.cloud_instance.delete_image(self.snapshot_id) class Ec2Cloud(IntegrationCloud): - datasource = 'ec2' + datasource = "ec2" integration_instance_cls = IntegrationEc2Instance def _get_cloud_instance(self): - return EC2(tag='ec2-integration-test') + return EC2(tag="ec2-integration-test") class GceCloud(IntegrationCloud): - datasource = 'gce' + datasource = "gce" integration_instance_cls = IntegrationGceInstance def _get_cloud_instance(self): return GCE( - tag='gce-integration-test', + tag="gce-integration-test", ) class AzureCloud(IntegrationCloud): - datasource = 'azure' + datasource = "azure" integration_instance_cls = IntegrationAzureInstance def _get_cloud_instance(self): - return Azure(tag='azure-integration-test') + return Azure(tag="azure-integration-test") def destroy(self): if self.settings.KEEP_INSTANCE: log.info( - 'NOT deleting resource group because KEEP_INSTANCE is true ' - 'and deleting resource group would also delete instance. ' - 'Instance and resource group must both be manually deleted.' + "NOT deleting resource group because KEEP_INSTANCE is true " + "and deleting resource group would also delete instance. " + "Instance and resource group must both be manually deleted." ) else: self.cloud_instance.delete_resource_group() class OciCloud(IntegrationCloud): - datasource = 'oci' + datasource = "oci" integration_instance_cls = IntegrationOciInstance def _get_cloud_instance(self): if not integration_settings.ORACLE_AVAILABILITY_DOMAIN: raise Exception( - 'ORACLE_AVAILABILITY_DOMAIN must be set to a valid ' - 'availability domain. If using the oracle CLI, ' - 'try `oci iam availability-domain list`' + "ORACLE_AVAILABILITY_DOMAIN must be set to a valid " + "availability domain. If using the oracle CLI, " + "try `oci iam availability-domain list`" ) return OCI( - tag='oci-integration-test', - availability_domain=integration_settings.ORACLE_AVAILABILITY_DOMAIN + tag="oci-integration-test", + availability_domain=( + integration_settings.ORACLE_AVAILABILITY_DOMAIN, + ), ) @@ -277,38 +289,42 @@ class _LxdIntegrationCloud(IntegrationCloud): def _mount_source(instance: LXDInstance): cloudinit_path = cloudinit.__path__[0] mounts = [ - (cloudinit_path, '/usr/lib/python3/dist-packages/cloudinit'), - (os.path.join(cloudinit_path, '..', 'config', 'cloud.cfg.d'), - '/etc/cloud/cloud.cfg.d'), - (os.path.join(cloudinit_path, '..', 'templates'), - '/etc/cloud/templates'), + (cloudinit_path, "/usr/lib/python3/dist-packages/cloudinit"), + ( + os.path.join(cloudinit_path, "..", "config", "cloud.cfg.d"), + "/etc/cloud/cloud.cfg.d", + ), + ( + os.path.join(cloudinit_path, "..", "templates"), + "/etc/cloud/templates", + ), ] for (n, (source_path, target_path)) in enumerate(mounts): format_variables = { - 'name': instance.name, - 'source_path': os.path.realpath(source_path), - 'container_path': target_path, - 'idx': n, + "name": instance.name, + "source_path": os.path.realpath(source_path), + "container_path": target_path, + "idx": n, } log.info( - 'Mounting source %(source_path)s directly onto LXD' - ' container/VM named %(name)s at %(container_path)s', - format_variables + "Mounting source %(source_path)s directly onto LXD" + " container/VM named %(name)s at %(container_path)s", + format_variables, ) command = ( - 'lxc config device add {name} host-cloud-init-{idx} disk ' - 'source={source_path} ' - 'path={container_path}' + "lxc config device add {name} host-cloud-init-{idx} disk " + "source={source_path} " + "path={container_path}" ).format(**format_variables) subp(command.split()) def _perform_launch(self, launch_kwargs, **kwargs): - launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None) - wait = launch_kwargs.pop('wait', True) - release = launch_kwargs.pop('image_id') + launch_kwargs["inst_type"] = launch_kwargs.pop("instance_type", None) + wait = launch_kwargs.pop("wait", True) + release = launch_kwargs.pop("image_id") try: - profile_list = launch_kwargs['profile_list'] + profile_list = launch_kwargs["profile_list"] except KeyError: profile_list = self._get_or_set_profile_list(release) @@ -317,52 +333,53 @@ class _LxdIntegrationCloud(IntegrationCloud): random.choices(string.ascii_lowercase + string.digits, k=8) ) pycloudlib_instance = self.cloud_instance.init( - launch_kwargs.pop('name', default_name), + launch_kwargs.pop("name", default_name), release, profile_list=profile_list, **launch_kwargs ) - if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE': + if self.settings.CLOUD_INIT_SOURCE == "IN_PLACE": self._mount_source(pycloudlib_instance) - if 'lxd_setup' in kwargs: + if "lxd_setup" in kwargs: log.info("Running callback specified by 'lxd_setup' mark") - kwargs['lxd_setup'](pycloudlib_instance) + kwargs["lxd_setup"](pycloudlib_instance) pycloudlib_instance.start(wait=wait) return pycloudlib_instance class LxdContainerCloud(_LxdIntegrationCloud): - datasource = 'lxd_container' + datasource = "lxd_container" pycloudlib_instance_cls = LXDContainer - instance_tag = 'lxd-container-integration-test' + instance_tag = "lxd-container-integration-test" class LxdVmCloud(_LxdIntegrationCloud): - datasource = 'lxd_vm' + datasource = "lxd_vm" pycloudlib_instance_cls = LXDVirtualMachine - instance_tag = 'lxd-vm-integration-test' + instance_tag = "lxd-vm-integration-test" _profile_list = None def _get_or_set_profile_list(self, release): if self._profile_list: return self._profile_list self._profile_list = self.cloud_instance.build_necessary_profiles( - release) + release + ) return self._profile_list class OpenstackCloud(IntegrationCloud): - datasource = 'openstack' + datasource = "openstack" integration_instance_cls = IntegrationInstance def _get_cloud_instance(self): if not integration_settings.OPENSTACK_NETWORK: raise Exception( - 'OPENSTACK_NETWORK must be set to a valid Openstack network. ' - 'If using the openstack CLI, try `openstack network list`' + "OPENSTACK_NETWORK must be set to a valid Openstack network. " + "If using the openstack CLI, try `openstack network list`" ) return Openstack( - tag='openstack-integration-test', + tag="openstack-integration-test", network=integration_settings.OPENSTACK_NETWORK, ) @@ -372,9 +389,9 @@ class OpenstackCloud(IntegrationCloud): UUID(image.image_id) except ValueError as e: raise Exception( - 'When using Openstack, `OS_IMAGE` MUST be specified with ' - 'a 36-character UUID image ID. Passing in a release name is ' - 'not valid here.\n' - 'OS image id: {}'.format(image.image_id) + "When using Openstack, `OS_IMAGE` MUST be specified with " + "a 36-character UUID image ID. Passing in a release name is " + "not valid here.\n" + "OS image id: {}".format(image.image_id) ) from e return image.image_id diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 5eab5a45..b14b6ad0 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -2,12 +2,13 @@ import datetime import functools import logging -import pytest import os import sys -from tarfile import TarFile from contextlib import contextmanager from pathlib import Path +from tarfile import TarFile + +import pytest from tests.integration_tests import integration_settings from tests.integration_tests.clouds import ( @@ -19,31 +20,30 @@ from tests.integration_tests.clouds import ( LxdContainerCloud, LxdVmCloud, OciCloud, - _LxdIntegrationCloud, OpenstackCloud, + _LxdIntegrationCloud, ) from tests.integration_tests.instances import ( CloudInitSource, IntegrationInstance, ) - -log = logging.getLogger('integration_testing') +log = logging.getLogger("integration_testing") log.addHandler(logging.StreamHandler(sys.stdout)) log.setLevel(logging.INFO) platforms = { - 'ec2': Ec2Cloud, - 'gce': GceCloud, - 'azure': AzureCloud, - 'oci': OciCloud, - 'lxd_container': LxdContainerCloud, - 'lxd_vm': LxdVmCloud, - 'openstack': OpenstackCloud, + "ec2": Ec2Cloud, + "gce": GceCloud, + "azure": AzureCloud, + "oci": OciCloud, + "lxd_container": LxdContainerCloud, + "lxd_vm": LxdVmCloud, + "openstack": OpenstackCloud, } os_list = ["ubuntu"] -session_start_time = datetime.datetime.now().strftime('%y%m%d%H%M%S') +session_start_time = datetime.datetime.now().strftime("%y%m%d%H%M%S") XENIAL_LXD_VM_EXEC_MSG = """\ The default xenial images do not support `exec` for LXD VMs. @@ -69,14 +69,14 @@ def pytest_runtest_setup(item): test_marks = [mark.name for mark in item.iter_markers()] supported_platforms = set(all_platforms).intersection(test_marks) current_platform = integration_settings.PLATFORM - unsupported_message = 'Cannot run on platform {}'.format(current_platform) - if 'no_container' in test_marks: - if 'lxd_container' in test_marks: + unsupported_message = "Cannot run on platform {}".format(current_platform) + if "no_container" in test_marks: + if "lxd_container" in test_marks: raise Exception( - 'lxd_container and no_container marks simultaneously set ' - 'on test' + "lxd_container and no_container marks simultaneously set " + "on test" ) - if current_platform == 'lxd_container': + if current_platform == "lxd_container": pytest.skip(unsupported_message) if supported_platforms and current_platform not in supported_platforms: pytest.skip(unsupported_message) @@ -86,8 +86,8 @@ def pytest_runtest_setup(item): supported_os_set = set(os_list).intersection(test_marks) if current_os and supported_os_set and current_os not in supported_os_set: pytest.skip("Cannot run on OS {}".format(current_os)) - if 'unstable' in test_marks and not integration_settings.RUN_UNSTABLE: - pytest.skip('Test marked unstable. Manually remove mark to run it') + if "unstable" in test_marks and not integration_settings.RUN_UNSTABLE: + pytest.skip("Test marked unstable. Manually remove mark to run it") current_release = image.release if "not_{}".format(current_release) in test_marks: @@ -101,7 +101,7 @@ def disable_subp_usage(request): pass -@pytest.yield_fixture(scope='session') +@pytest.yield_fixture(scope="session") def session_cloud(): if integration_settings.PLATFORM not in platforms.keys(): raise ValueError( @@ -122,28 +122,30 @@ def session_cloud(): def get_validated_source( session_cloud: IntegrationCloud, - source=integration_settings.CLOUD_INIT_SOURCE + source=integration_settings.CLOUD_INIT_SOURCE, ) -> CloudInitSource: - if source == 'NONE': + if source == "NONE": return CloudInitSource.NONE - elif source == 'IN_PLACE': - if session_cloud.datasource not in ['lxd_container', 'lxd_vm']: + elif source == "IN_PLACE": + if session_cloud.datasource not in ["lxd_container", "lxd_vm"]: raise ValueError( - 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') + "IN_PLACE as CLOUD_INIT_SOURCE only works for LXD" + ) return CloudInitSource.IN_PLACE - elif source == 'PROPOSED': + elif source == "PROPOSED": return CloudInitSource.PROPOSED - elif source.startswith('ppa:'): + elif source.startswith("ppa:"): return CloudInitSource.PPA elif os.path.isfile(str(source)): return CloudInitSource.DEB_PACKAGE elif source == "UPGRADE": return CloudInitSource.UPGRADE raise ValueError( - 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(source)) + "Invalid value for CLOUD_INIT_SOURCE setting: {}".format(source) + ) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def setup_image(session_cloud: IntegrationCloud): """Setup the target environment with the correct version of cloud-init. @@ -153,17 +155,18 @@ def setup_image(session_cloud: IntegrationCloud): source = get_validated_source(session_cloud) if not source.installs_new_version(): return - log.info('Setting up environment for %s', session_cloud.datasource) + log.info("Setting up environment for %s", session_cloud.datasource) client = session_cloud.launch() client.install_new_cloud_init(source) # Even if we're keeping instances, we don't want to keep this # one around as it was just for image creation client.destroy() - log.info('Done with environment setup') + log.info("Done with environment setup") -def _collect_logs(instance: IntegrationInstance, node_id: str, - test_failed: bool): +def _collect_logs( + instance: IntegrationInstance, node_id: str, test_failed: bool +): """Collect logs from remote instance. Args: @@ -172,36 +175,43 @@ def _collect_logs(instance: IntegrationInstance, node_id: str, tests/integration_tests/test_example.py::TestExample.test_example test_failed: If test failed or not """ - if any([ - integration_settings.COLLECT_LOGS == 'NEVER', - integration_settings.COLLECT_LOGS == 'ON_ERROR' and not test_failed - ]): + if any( + [ + integration_settings.COLLECT_LOGS == "NEVER", + integration_settings.COLLECT_LOGS == "ON_ERROR" + and not test_failed, + ] + ): return instance.execute( - 'cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz') + "cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz" + ) node_id_path = Path( - node_id - .replace('.py', '') # Having a directory with '.py' would be weird - .replace('::', os.path.sep) # Turn classes/tests into paths - .replace('[', '-') # For parametrized names - .replace(']', '') # For parameterized names + node_id.replace( + ".py", "" + ) # Having a directory with '.py' would be weird + .replace("::", os.path.sep) # Turn classes/tests into paths + .replace("[", "-") # For parametrized names + .replace("]", "") # For parameterized names + ) + log_dir = ( + Path(integration_settings.LOCAL_LOG_PATH) + / session_start_time + / node_id_path ) - log_dir = Path( - integration_settings.LOCAL_LOG_PATH - ) / session_start_time / node_id_path log.info("Writing logs to %s", log_dir) if not log_dir.exists(): log_dir.mkdir(parents=True) # Add a symlink to the latest log output directory - last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / 'last' + last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / "last" if os.path.islink(last_symlink): os.unlink(last_symlink) os.symlink(log_dir.parent, last_symlink) - tarball_path = log_dir / 'cloud-init.tar.gz' - instance.pull_file('/var/tmp/cloud-init.tar.gz', tarball_path) + tarball_path = log_dir / "cloud-init.tar.gz" + instance.pull_file("/var/tmp/cloud-init.tar.gz", tarball_path) tarball = TarFile.open(str(tarball_path)) tarball.extractall(path=str(log_dir)) @@ -218,12 +228,12 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): getter = functools.partial( fixture_utils.closest_marker_first_arg_or, request, default=None ) - user_data = getter('user_data') - name = getter('instance_name') - lxd_config_dict = getter('lxd_config_dict') - lxd_setup = getter('lxd_setup') + user_data = getter("user_data") + name = getter("instance_name") + lxd_config_dict = getter("lxd_config_dict") + lxd_setup = getter("lxd_setup") lxd_use_exec = fixture_utils.closest_marker_args_or( - request, 'lxd_use_exec', None + request, "lxd_use_exec", None ) launch_kwargs = {} @@ -250,8 +260,8 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): local_launch_kwargs = {} if lxd_setup is not None: if not isinstance(session_cloud, _LxdIntegrationCloud): - pytest.skip('lxd_setup requires LXD') - local_launch_kwargs['lxd_setup'] = lxd_setup + pytest.skip("lxd_setup requires LXD") + local_launch_kwargs["lxd_setup"] = lxd_setup with session_cloud.launch( user_data=user_data, launch_kwargs=launch_kwargs, **local_launch_kwargs @@ -273,14 +283,14 @@ def client(request, fixture_utils, session_cloud, setup_image): yield client -@pytest.yield_fixture(scope='module') +@pytest.yield_fixture(scope="module") def module_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per module.""" with _client(request, fixture_utils, session_cloud) as client: yield client -@pytest.yield_fixture(scope='class') +@pytest.yield_fixture(scope="class") def class_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per class.""" with _client(request, fixture_utils, session_cloud) as client: diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py index 3f05e906..da010813 100644 --- a/tests/integration_tests/datasources/test_lxd_discovery.py +++ b/tests/integration_tests/datasources/test_lxd_discovery.py @@ -1,4 +1,5 @@ import json + import pytest import yaml @@ -9,10 +10,10 @@ from tests.integration_tests.util import verify_clean_log def _customize_envionment(client: IntegrationInstance): client.write_to_file( - '/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg', - 'datasource_list: [LXD]\n', + "/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg", + "datasource_list: [LXD]\n", ) - client.execute('cloud-init clean --logs') + client.execute("cloud-init clean --logs") client.restart() @@ -25,40 +26,44 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): """Test that DataSourceLXD is detected instead of NoCloud.""" _customize_envionment(client) nic_dev = "enp5s0" if client.settings.PLATFORM == "lxd_vm" else "eth0" - result = client.execute('cloud-init status --long') + result = client.execute("cloud-init status --long") if not result.ok: - raise AssertionError('cloud-init failed:\n%s', result.stderr) + raise AssertionError("cloud-init failed:\n%s", result.stderr) if "DataSourceLXD" not in result.stdout: raise AssertionError( - 'cloud-init did not discover DataSourceLXD', result.stdout + "cloud-init did not discover DataSourceLXD", result.stdout ) - netplan_yaml = client.execute('cat /etc/netplan/50-cloud-init.yaml') + netplan_yaml = client.execute("cat /etc/netplan/50-cloud-init.yaml") netplan_cfg = yaml.safe_load(netplan_yaml) assert { - 'network': {'ethernets': {nic_dev: {'dhcp4': True}}, 'version': 2} + "network": {"ethernets": {nic_dev: {"dhcp4": True}}, "version": 2} } == netplan_cfg - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) - result = client.execute('cloud-id') + result = client.execute("cloud-id") if result.stdout != "lxd": raise AssertionError( "cloud-id didn't report lxd. Result: %s", result.stdout ) # Validate config instance data represented - data = json.loads(client.read_from_file( - '/run/cloud-init/instance-data.json') + data = json.loads( + client.read_from_file("/run/cloud-init/instance-data.json") ) v1 = data["v1"] ds_cfg = data["ds"] assert "lxd" == v1["platform"] assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] - ds_cfg = json.loads(client.execute('cloud-init query ds').stdout) + ds_cfg = json.loads(client.execute("cloud-init query ds").stdout) assert ["_doc", "_metadata_api_version", "config", "meta-data"] == sorted( list(ds_cfg.keys()) ) if ( - client.settings.PLATFORM == "lxd_vm" and - ImageSpecification.from_os_image().release in ("xenial", "bionic") + client.settings.PLATFORM == "lxd_vm" + and ImageSpecification.from_os_image().release + in ( + "xenial", + "bionic", + ) ): # pycloudlib injects user.vendor_data for lxd_vm on bionic and xenial # to start the lxd-agent. @@ -74,17 +79,13 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): assert {"public-keys": v1["public_ssh_keys"][0]} == ( yaml.safe_load(ds_cfg["config"]["user.meta-data"]) ) - assert ( - "#cloud-config\ninstance-id" in ds_cfg["meta-data"] - ) + assert "#cloud-config\ninstance-id" in ds_cfg["meta-data"] # Assert NoCloud seed data is still present in cloud image metadata # This will start failing if we redact metadata templates from # https://cloud-images.ubuntu.com/daily/server/jammy/current/\ # jammy-server-cloudimg-amd64-lxd.tar.xz nocloud_metadata = yaml.safe_load( - client.read_from_file( - "/var/lib/cloud/seed/nocloud-net/meta-data" - ) + client.read_from_file("/var/lib/cloud/seed/nocloud-net/meta-data") ) assert client.instance.name == nocloud_metadata["instance-id"] assert ( diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py index 24e71f9d..32ac7053 100644 --- a/tests/integration_tests/datasources/test_network_dependency.py +++ b/tests/integration_tests/datasources/test_network_dependency.py @@ -6,10 +6,10 @@ from tests.integration_tests.instances import IntegrationInstance def _customize_envionment(client: IntegrationInstance): # Insert our "disable_network_activation" file here client.write_to_file( - '/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg', - 'disable_network_activation: true\n', + "/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg", + "disable_network_activation: true\n", ) - client.execute('cloud-init clean --logs') + client.execute("cloud-init clean --logs") client.restart() @@ -20,13 +20,14 @@ def _customize_envionment(client: IntegrationInstance): def test_network_activation_disabled(client: IntegrationInstance): """Test that the network is not activated during init mode.""" _customize_envionment(client) - result = client.execute('systemctl status google-guest-agent.service') + result = client.execute("systemctl status google-guest-agent.service") if not result.ok: raise AssertionError( - 'google-guest-agent is not active:\n%s', result.stdout) - log = client.read_from_file('/var/log/cloud-init.log') + "google-guest-agent is not active:\n%s", result.stdout + ) + log = client.read_from_file("/var/log/cloud-init.log") assert "Running command ['netplan', 'apply']" not in log - assert 'Not bringing up newly configured network interfaces' in log - assert 'Bringing up newly configured network interfaces' not in log + assert "Not bringing up newly configured network interfaces" in log + assert "Bringing up newly configured network interfaces" not in log diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 8f66bf43..793f729e 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -1,8 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. -from enum import Enum import logging import os import uuid +from enum import Enum from tempfile import NamedTemporaryFile from pycloudlib.instance import BaseInstance @@ -13,20 +13,21 @@ from tests.integration_tests.util import retry try: from typing import TYPE_CHECKING + if TYPE_CHECKING: from tests.integration_tests.clouds import ( # noqa: F401 - IntegrationCloud + IntegrationCloud, ) except ImportError: pass -log = logging.getLogger('integration_testing') +log = logging.getLogger("integration_testing") def _get_tmp_path(): tmp_filename = str(uuid.uuid4()) - return '/var/tmp/{}.tmp'.format(tmp_filename) + return "/var/tmp/{}.tmp".format(tmp_filename) class CloudInitSource(Enum): @@ -37,6 +38,7 @@ class CloudInitSource(Enum): explanation of these values. If the value set there can't be parsed into one of these values, an exception will be raised """ + NONE = 1 IN_PLACE = 2 PROPOSED = 3 @@ -51,8 +53,12 @@ class CloudInitSource(Enum): class IntegrationInstance: - def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance, - settings=integration_settings): + def __init__( + self, + cloud: "IntegrationCloud", + instance: BaseInstance, + settings=integration_settings, + ): self.cloud = cloud self.instance = instance self.settings = settings @@ -69,41 +75,44 @@ class IntegrationInstance: self.instance.restart() def execute(self, command, *, use_sudo=True) -> Result: - if self.instance.username == 'root' and use_sudo is False: - raise Exception('Root user cannot run unprivileged') + if self.instance.username == "root" and use_sudo is False: + raise Exception("Root user cannot run unprivileged") return self.instance.execute(command, use_sudo=use_sudo) def pull_file(self, remote_path, local_path): # First copy to a temporary directory because of permissions issues tmp_path = _get_tmp_path() - self.instance.execute('cp {} {}'.format(str(remote_path), tmp_path)) + self.instance.execute("cp {} {}".format(str(remote_path), tmp_path)) self.instance.pull_file(tmp_path, str(local_path)) def push_file(self, local_path, remote_path): # First push to a temporary directory because of permissions issues tmp_path = _get_tmp_path() self.instance.push_file(str(local_path), tmp_path) - self.execute('mv {} {}'.format(tmp_path, str(remote_path))) + self.execute("mv {} {}".format(tmp_path, str(remote_path))) def read_from_file(self, remote_path) -> str: - result = self.execute('cat {}'.format(remote_path)) + result = self.execute("cat {}".format(remote_path)) if result.failed: # TODO: Raise here whatever pycloudlib raises when it has # a consistent error response raise IOError( - 'Failed reading remote file via cat: {}\n' - 'Return code: {}\n' - 'Stderr: {}\n' - 'Stdout: {}'.format( - remote_path, result.return_code, - result.stderr, result.stdout) + "Failed reading remote file via cat: {}\n" + "Return code: {}\n" + "Stderr: {}\n" + "Stdout: {}".format( + remote_path, + result.return_code, + result.stderr, + result.stdout, + ) ) return result.stdout def write_to_file(self, remote_path, contents: str): # Writes file locally and then pushes it rather # than writing the file directly on the instance - with NamedTemporaryFile('w', delete=False) as tmp_file: + with NamedTemporaryFile("w", delete=False) as tmp_file: tmp_file.write(contents) try: @@ -113,7 +122,7 @@ class IntegrationInstance: def snapshot(self): image_id = self.cloud.snapshot(self.instance) - log.info('Created new image: %s', image_id) + log.info("Created new image: %s", image_id) return image_id def install_new_cloud_init( @@ -133,10 +142,11 @@ class IntegrationInstance: else: raise Exception( "Specified to install {} which isn't supported here".format( - source) + source + ) ) - version = self.execute('cloud-init -v').split()[-1] - log.info('Installed cloud-init version: %s', version) + version = self.execute("cloud-init -v").split()[-1] + log.info("Installed cloud-init version: %s", version) if clean: self.instance.clean() if take_snapshot: @@ -149,38 +159,39 @@ class IntegrationInstance: @retry(tries=30, delay=1) def install_proposed_image(self): - log.info('Installing proposed image') + log.info("Installing proposed image") assert self.execute( 'echo deb "http://archive.ubuntu.com/ubuntu ' '$(lsb_release -sc)-proposed main" >> ' - '/etc/apt/sources.list.d/proposed.list' + "/etc/apt/sources.list.d/proposed.list" ).ok - assert self.execute('apt-get update -q').ok - assert self.execute('apt-get install -qy cloud-init').ok + assert self.execute("apt-get update -q").ok + assert self.execute("apt-get install -qy cloud-init").ok @retry(tries=30, delay=1) def install_ppa(self): - log.info('Installing PPA') - assert self.execute('add-apt-repository {} -y'.format( - self.settings.CLOUD_INIT_SOURCE) + log.info("Installing PPA") + assert self.execute( + "add-apt-repository {} -y".format(self.settings.CLOUD_INIT_SOURCE) ).ok - assert self.execute('apt-get update -q').ok - assert self.execute('apt-get install -qy cloud-init').ok + assert self.execute("apt-get update -q").ok + assert self.execute("apt-get install -qy cloud-init").ok @retry(tries=30, delay=1) def install_deb(self): - log.info('Installing deb package') + log.info("Installing deb package") deb_path = integration_settings.CLOUD_INIT_SOURCE deb_name = os.path.basename(deb_path) - remote_path = '/var/tmp/{}'.format(deb_name) + remote_path = "/var/tmp/{}".format(deb_name) self.push_file( local_path=integration_settings.CLOUD_INIT_SOURCE, - remote_path=remote_path) - assert self.execute('dpkg -i {path}'.format(path=remote_path)).ok + remote_path=remote_path, + ) + assert self.execute("dpkg -i {path}".format(path=remote_path)).ok @retry(tries=30, delay=1) def upgrade_cloud_init(self): - log.info('Upgrading cloud-init to latest version in archive') + log.info("Upgrading cloud-init to latest version in archive") assert self.execute("apt-get update -q").ok assert self.execute("apt-get install -qy cloud-init").ok diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index e4a790c2..641ce297 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -1,6 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. import os - from distutils.util import strtobool ################################################################## @@ -22,7 +21,7 @@ RUN_UNSTABLE = False # gce # oci # openstack -PLATFORM = 'lxd_container' +PLATFORM = "lxd_container" # The cloud-specific instance type to run. E.g., a1.medium on AWS # If the pycloudlib instance provides a default, this can be left None @@ -34,7 +33,7 @@ INSTANCE_TYPE = None # <image_id>[::<os>[::<release>]]. If given, os and release should describe # the image specified by image_id. (Ubuntu releases are converted to this # format internally; in this case, to "focal::ubuntu::focal".) -OS_IMAGE = 'focal' +OS_IMAGE = "focal" # Populate if you want to use a pre-launched instance instead of # creating a new one. The exact contents will be platform dependent @@ -66,7 +65,7 @@ EXISTING_INSTANCE_ID = None # Install from a PPA. It MUST start with 'ppa:' # <file path> # A path to a valid package to be uploaded and installed -CLOUD_INIT_SOURCE = 'NONE' +CLOUD_INIT_SOURCE = "NONE" # Before an instance is torn down, we run `cloud-init collect-logs` # and transfer them locally. These settings specify when to collect these @@ -75,8 +74,8 @@ CLOUD_INIT_SOURCE = 'NONE' # 'ALWAYS' # 'ON_ERROR' # 'NEVER' -COLLECT_LOGS = 'ON_ERROR' -LOCAL_LOG_PATH = '/tmp/cloud_init_test_logs' +COLLECT_LOGS = "ON_ERROR" +LOCAL_LOG_PATH = "/tmp/cloud_init_test_logs" ################################################################## # SSH KEY SETTINGS @@ -124,7 +123,7 @@ except ImportError: current_settings = [var for var in locals() if var.isupper()] for setting in current_settings: env_setting = os.getenv( - 'CLOUD_INIT_{}'.format(setting), globals()[setting] + "CLOUD_INIT_{}".format(setting), globals()[setting] ) if isinstance(env_setting, str): try: diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py index f5f6c813..48f398d1 100644 --- a/tests/integration_tests/modules/test_apt.py +++ b/tests/integration_tests/modules/test_apt.py @@ -3,12 +3,11 @@ import re import pytest -from cloudinit.config import cc_apt_configure from cloudinit import gpg +from cloudinit.config import cc_apt_configure from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance - USER_DATA = """\ #cloud-config apt: @@ -104,14 +103,15 @@ class TestApt: """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg in human readable format. Mimics the output of apt-key finger """ - list_cmd = ' '.join(gpg.GPG_LIST) + ' ' + list_cmd = " ".join(gpg.GPG_LIST) + " " keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS) print(keys) files = class_client.execute( - 'ls ' + cc_apt_configure.APT_TRUSTED_GPG_DIR) + "ls " + cc_apt_configure.APT_TRUSTED_GPG_DIR + ) for file in files.split(): path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file - keys += class_client.execute(list_cmd + path) or '' + keys += class_client.execute(list_cmd + path) or "" return keys def test_sources_list(self, class_client: IntegrationInstance): @@ -124,8 +124,8 @@ class TestApt: (This is ported from `tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml`.) """ - sources_list = class_client.read_from_file('/etc/apt/sources.list') - assert 6 == len(sources_list.rstrip().split('\n')) + sources_list = class_client.read_from_file("/etc/apt/sources.list") + assert 6 == len(sources_list.rstrip().split("\n")) for expected_re in EXPECTED_REGEXES: assert re.search(expected_re, sources_list) is not None @@ -136,7 +136,7 @@ class TestApt: Ported from tests/cloud_tests/testcases/modules/apt_configure_conf.py """ apt_config = class_client.read_from_file( - '/etc/apt/apt.conf.d/94cloud-init-config' + "/etc/apt/apt.conf.d/94cloud-init-config" ) assert 'Assume-Yes "true";' in apt_config assert 'Fix-Broken "true";' in apt_config @@ -149,40 +149,43 @@ class TestApt: """ release = ImageSpecification.from_os_image().release ppa_path_contents = class_client.read_from_file( - '/etc/apt/sources.list.d/' - 'simplestreams-dev-ubuntu-trunk-{}.list'.format(release) + "/etc/apt/sources.list.d/" + "simplestreams-dev-ubuntu-trunk-{}.list".format(release) ) assert ( - 'http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu' - ) in ppa_path_contents + "http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu" + in ppa_path_contents + ) assert TEST_PPA_KEY in self.get_keys(class_client) def test_signed_by(self, class_client: IntegrationInstance): - """Test the apt signed-by functionality. - """ + """Test the apt signed-by functionality.""" release = ImageSpecification.from_os_image().release source = ( "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_signed_by.gpg] " "http://ppa.launchpad.net/juju/stable/ubuntu" - " {} main".format(release)) + " {} main".format(release) + ) path_contents = class_client.read_from_file( - '/etc/apt/sources.list.d/test_signed_by.list') + "/etc/apt/sources.list.d/test_signed_by.list" + ) assert path_contents == source key = class_client.execute( - 'gpg --no-default-keyring --with-fingerprint --list-keys ' - '--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg') + "gpg --no-default-keyring --with-fingerprint --list-keys " + "--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg" + ) assert TEST_SIGNED_BY_KEY in key def test_bad_key(self, class_client: IntegrationInstance): - """Test the apt signed-by functionality. - """ + """Test the apt signed-by functionality.""" with pytest.raises(OSError): class_client.read_from_file( - '/etc/apt/trusted.list.d/test_bad_key.gpg') + "/etc/apt/trusted.list.d/test_bad_key.gpg" + ) def test_key(self, class_client: IntegrationInstance): """Test the apt key functionality. @@ -191,12 +194,13 @@ class TestApt: tests/cloud_tests/testcases/modules/apt_configure_sources_key.py """ test_archive_contents = class_client.read_from_file( - '/etc/apt/sources.list.d/test_key.list' + "/etc/apt/sources.list.d/test_key.list" ) assert ( - 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu' - ) in test_archive_contents + "http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu" + in test_archive_contents + ) assert TEST_KEY in self.get_keys(class_client) def test_keyserver(self, class_client: IntegrationInstance): @@ -206,12 +210,13 @@ class TestApt: tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py """ test_keyserver_contents = class_client.read_from_file( - '/etc/apt/sources.list.d/test_keyserver.list' + "/etc/apt/sources.list.d/test_keyserver.list" ) assert ( - 'http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu' - ) in test_keyserver_contents + "http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu" + in test_keyserver_contents + ) assert TEST_KEYSERVER_KEY in self.get_keys(class_client) @@ -221,7 +226,7 @@ class TestApt: Ported from tests/cloud_tests/testcases/modules/apt_pipelining_os.py """ conf_exists = class_client.execute( - 'test -f /etc/apt/apt.conf.d/90cloud-init-pipelining' + "test -f /etc/apt/apt.conf.d/90cloud-init-pipelining" ).ok assert conf_exists is False @@ -237,7 +242,7 @@ apt: - arches: - default """ -DEFAULT_DATA = _DEFAULT_DATA.format(uri='') +DEFAULT_DATA = _DEFAULT_DATA.format(uri="") @pytest.mark.ubuntu @@ -249,9 +254,9 @@ class TestDefaults: When no uri is provided. """ - zone = class_client.execute('cloud-init query v1.availability_zone') - sources_list = class_client.read_from_file('/etc/apt/sources.list') - assert '{}.clouds.archive.ubuntu.com'.format(zone) in sources_list + zone = class_client.execute("cloud-init query v1.availability_zone") + sources_list = class_client.read_from_file("/etc/apt/sources.list") + assert "{}.clouds.archive.ubuntu.com".format(zone) in sources_list def test_security(self, class_client: IntegrationInstance): """Test apt default security sources. @@ -259,12 +264,12 @@ class TestDefaults: Ported from tests/cloud_tests/testcases/modules/apt_configure_security.py """ - sources_list = class_client.read_from_file('/etc/apt/sources.list') + sources_list = class_client.read_from_file("/etc/apt/sources.list") # 3 lines from main, universe, and multiverse - assert 3 == sources_list.count('deb http://security.ubuntu.com/ubuntu') + assert 3 == sources_list.count("deb http://security.ubuntu.com/ubuntu") assert 3 == sources_list.count( - '# deb-src http://security.ubuntu.com/ubuntu' + "# deb-src http://security.ubuntu.com/ubuntu" ) @@ -280,10 +285,10 @@ def test_default_primary_with_uri(client: IntegrationInstance): Ported from tests/cloud_tests/testcases/modules/apt_configure_primary.py """ - sources_list = client.read_from_file('/etc/apt/sources.list') - assert 'archive.ubuntu.com' not in sources_list + sources_list = client.read_from_file("/etc/apt/sources.list") + assert "archive.ubuntu.com" not in sources_list - assert 'something.random.invalid' in sources_list + assert "something.random.invalid" in sources_list DISABLED_DATA = """\ @@ -310,7 +315,7 @@ class TestDisabled: sources_list = class_client.execute( "cat /etc/apt/sources.list | grep -v '^#'" ).strip() - assert '' == sources_list + assert "" == sources_list def test_disable_apt_pipelining(self, class_client: IntegrationInstance): """Test disabling of apt pipelining. @@ -319,7 +324,7 @@ class TestDisabled: tests/cloud_tests/testcases/modules/apt_pipelining_disable.py """ conf = class_client.read_from_file( - '/etc/apt/apt.conf.d/90cloud-init-pipelining' + "/etc/apt/apt.conf.d/90cloud-init-pipelining" ) assert 'Acquire::http::Pipeline-Depth "0";' in conf @@ -338,8 +343,7 @@ apt: @pytest.mark.user_data(APT_PROXY_DATA) def test_apt_proxy(client: IntegrationInstance): """Test the apt proxy data gets written correctly.""" - out = client.read_from_file( - '/etc/apt/apt.conf.d/90cloud-init-aptproxy') + out = client.read_from_file("/etc/apt/apt.conf.d/90cloud-init-aptproxy") assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py index 89c01a9c..d514fc62 100644 --- a/tests/integration_tests/modules/test_ca_certs.py +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -10,7 +10,6 @@ import os.path import pytest - USER_DATA = """\ #cloud-config ca-certs: diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py index 3f41b34d..97bfe52d 100644 --- a/tests/integration_tests/modules/test_cli.py +++ b/tests/integration_tests/modules/test_cli.py @@ -7,7 +7,6 @@ import pytest from tests.integration_tests.instances import IntegrationInstance - VALID_USER_DATA = """\ #cloud-config runcmd: @@ -27,9 +26,9 @@ def test_valid_userdata(client: IntegrationInstance): PR #575 """ - result = client.execute('cloud-init devel schema --system') + result = client.execute("cloud-init devel schema --system") assert result.ok - assert 'Valid cloud-config: system userdata' == result.stdout.strip() + assert "Valid cloud-config: system userdata" == result.stdout.strip() @pytest.mark.sru_2020_11 @@ -39,7 +38,7 @@ def test_invalid_userdata(client: IntegrationInstance): PR #575 """ - result = client.execute('cloud-init devel schema --system') + result = client.execute("cloud-init devel schema --system") assert not result.ok - assert 'Cloud config schema errors' in result.stderr + assert "Cloud config schema errors" in result.stderr assert 'needs to begin with "#cloud-config"' in result.stderr diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py index 26a8397d..c88f40d3 100644 --- a/tests/integration_tests/modules/test_combined.py +++ b/tests/integration_tests/modules/test_combined.py @@ -6,9 +6,10 @@ the same instance launch. Most independent module coherence tests can go here. """ import json -import pytest import re +import pytest + from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import ( @@ -76,7 +77,7 @@ class TestCombined: Also tests LP 1511485: final_message is silent. """ client = class_client - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") expected = ( "This is my final message!\n" r"\d+\.\d+.*\n" @@ -94,10 +95,10 @@ class TestCombined: configuring the archives. """ client = class_client - log = client.read_from_file('/var/log/cloud-init.log') - assert 'W: Failed to fetch' not in log - assert 'W: Some index files failed to download' not in log - assert 'E: Unable to locate package ntp' not in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "W: Failed to fetch" not in log + assert "W: Some index files failed to download" not in log + assert "E: Unable to locate package ntp" not in log def test_byobu(self, class_client: IntegrationInstance): """Test byobu configured as enabled by default.""" @@ -107,22 +108,18 @@ class TestCombined: def test_configured_locale(self, class_client: IntegrationInstance): """Test locale can be configured correctly.""" client = class_client - default_locale = client.read_from_file('/etc/default/locale') - assert 'LANG=en_GB.UTF-8' in default_locale + default_locale = client.read_from_file("/etc/default/locale") + assert "LANG=en_GB.UTF-8" in default_locale - locale_a = client.execute('locale -a') - verify_ordered_items_in_text([ - 'en_GB.utf8', - 'en_US.utf8' - ], locale_a) + locale_a = client.execute("locale -a") + verify_ordered_items_in_text(["en_GB.utf8", "en_US.utf8"], locale_a) locale_gen = client.execute( "cat /etc/locale.gen | grep -v '^#' | uniq" ) - verify_ordered_items_in_text([ - 'en_GB.UTF-8', - 'en_US.UTF-8' - ], locale_gen) + verify_ordered_items_in_text( + ["en_GB.UTF-8", "en_US.UTF-8"], locale_gen + ) def test_random_seed_data(self, class_client: IntegrationInstance): """Integration test for the random seed module. @@ -141,12 +138,12 @@ class TestCombined: def test_rsyslog(self, class_client: IntegrationInstance): """Test rsyslog is configured correctly.""" client = class_client - assert 'My test log' in client.read_from_file('/var/tmp/rsyslog.log') + assert "My test log" in client.read_from_file("/var/tmp/rsyslog.log") def test_runcmd(self, class_client: IntegrationInstance): """Test runcmd works as expected""" client = class_client - assert 'hello world' == client.read_from_file('/var/tmp/runcmd_output') + assert "hello world" == client.read_from_file("/var/tmp/runcmd_output") @retry(tries=30, delay=1) def test_ssh_import_id(self, class_client: IntegrationInstance): @@ -160,11 +157,10 @@ class TestCombined: /home/ubuntu; this will need modification to run on other OSes. """ client = class_client - ssh_output = client.read_from_file( - "/home/ubuntu/.ssh/authorized_keys") + ssh_output = client.read_from_file("/home/ubuntu/.ssh/authorized_keys") - assert '# ssh-import-id gh:powersj' in ssh_output - assert '# ssh-import-id lp:smoser' in ssh_output + assert "# ssh-import-id gh:powersj" in ssh_output + assert "# ssh-import-id lp:smoser" in ssh_output def test_snap(self, class_client: IntegrationInstance): """Integration test for the snap module. @@ -185,21 +181,22 @@ class TestCombined: """ client = class_client timezone_output = client.execute( - 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"') + 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"' + ) assert timezone_output.strip() == "HDT" def test_no_problems(self, class_client: IntegrationInstance): """Test no errors, warnings, or tracebacks""" client = class_client - status_file = client.read_from_file('/run/cloud-init/status.json') - status_json = json.loads(status_file)['v1'] - for stage in ('init', 'init-local', 'modules-config', 'modules-final'): - assert status_json[stage]['errors'] == [] - result_file = client.read_from_file('/run/cloud-init/result.json') - result_json = json.loads(result_file)['v1'] - assert result_json['errors'] == [] - - log = client.read_from_file('/var/log/cloud-init.log') + status_file = client.read_from_file("/run/cloud-init/status.json") + status_json = json.loads(status_file)["v1"] + for stage in ("init", "init-local", "modules-config", "modules-final"): + assert status_json[stage]["errors"] == [] + result_file = client.read_from_file("/run/cloud-init/result.json") + result_json = json.loads(result_file)["v1"] + assert result_json["errors"] == [] + + log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) def test_correct_datasource_detected( @@ -228,73 +225,81 @@ class TestCombined: ) def _check_common_metadata(self, data): - assert data['base64_encoded_keys'] == [] - assert data['merged_cfg'] == 'redacted for non-root user' + assert data["base64_encoded_keys"] == [] + assert data["merged_cfg"] == "redacted for non-root user" image_spec = ImageSpecification.from_os_image() - assert data['sys_info']['dist'][0] == image_spec.os + assert data["sys_info"]["dist"][0] == image_spec.os - v1_data = data['v1'] - assert re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']) - assert v1_data['variant'] == image_spec.os - assert v1_data['distro'] == image_spec.os - assert v1_data['distro_release'] == image_spec.release - assert v1_data['machine'] == 'x86_64' - assert re.match(r'3.\d\.\d', v1_data['python_version']) + v1_data = data["v1"] + assert re.match(r"\d\.\d+\.\d+-\d+", v1_data["kernel_release"]) + assert v1_data["variant"] == image_spec.os + assert v1_data["distro"] == image_spec.os + assert v1_data["distro_release"] == image_spec.release + assert v1_data["machine"] == "x86_64" + assert re.match(r"3.\d\.\d", v1_data["python_version"]) @pytest.mark.lxd_container def test_instance_json_lxd(self, class_client: IntegrationInstance): client = class_client instance_json_file = client.read_from_file( - '/run/cloud-init/instance-data.json') + "/run/cloud-init/instance-data.json" + ) data = json.loads(instance_json_file) self._check_common_metadata(data) - v1_data = data['v1'] - assert v1_data['cloud_name'] == 'unknown' - assert v1_data['platform'] == 'lxd' - assert v1_data['subplatform'] == ( - 'seed-dir (/var/lib/cloud/seed/nocloud-net)') - assert v1_data['availability_zone'] is None - assert v1_data['instance_id'] == client.instance.name - assert v1_data['local_hostname'] == client.instance.name - assert v1_data['region'] is None + v1_data = data["v1"] + assert v1_data["cloud_name"] == "unknown" + assert v1_data["platform"] == "lxd" + assert ( + v1_data["subplatform"] + == "seed-dir (/var/lib/cloud/seed/nocloud-net)" + ) + assert v1_data["availability_zone"] is None + assert v1_data["instance_id"] == client.instance.name + assert v1_data["local_hostname"] == client.instance.name + assert v1_data["region"] is None @pytest.mark.lxd_vm def test_instance_json_lxd_vm(self, class_client: IntegrationInstance): client = class_client instance_json_file = client.read_from_file( - '/run/cloud-init/instance-data.json') + "/run/cloud-init/instance-data.json" + ) data = json.loads(instance_json_file) self._check_common_metadata(data) - v1_data = data['v1'] - assert v1_data['cloud_name'] == 'unknown' - assert v1_data['platform'] == 'lxd' - assert any([ - '/var/lib/cloud/seed/nocloud-net' in v1_data['subplatform'], - '/dev/sr0' in v1_data['subplatform'] - ]) - assert v1_data['availability_zone'] is None - assert v1_data['instance_id'] == client.instance.name - assert v1_data['local_hostname'] == client.instance.name - assert v1_data['region'] is None + v1_data = data["v1"] + assert v1_data["cloud_name"] == "unknown" + assert v1_data["platform"] == "lxd" + assert any( + [ + "/var/lib/cloud/seed/nocloud-net" in v1_data["subplatform"], + "/dev/sr0" in v1_data["subplatform"], + ] + ) + assert v1_data["availability_zone"] is None + assert v1_data["instance_id"] == client.instance.name + assert v1_data["local_hostname"] == client.instance.name + assert v1_data["region"] is None @pytest.mark.ec2 def test_instance_json_ec2(self, class_client: IntegrationInstance): client = class_client instance_json_file = client.read_from_file( - '/run/cloud-init/instance-data.json') + "/run/cloud-init/instance-data.json" + ) data = json.loads(instance_json_file) - v1_data = data['v1'] - assert v1_data['cloud_name'] == 'aws' - assert v1_data['platform'] == 'ec2' - assert v1_data['subplatform'].startswith('metadata') - assert v1_data[ - 'availability_zone'] == client.instance.availability_zone - assert v1_data['instance_id'] == client.instance.name - assert v1_data['local_hostname'].startswith('ip-') - assert v1_data['region'] == client.cloud.cloud_instance.region + v1_data = data["v1"] + assert v1_data["cloud_name"] == "aws" + assert v1_data["platform"] == "ec2" + assert v1_data["subplatform"].startswith("metadata") + assert ( + v1_data["availability_zone"] == client.instance.availability_zone + ) + assert v1_data["instance_id"] == client.instance.name + assert v1_data["local_hostname"].startswith("ip-") + assert v1_data["region"] == client.cloud.cloud_instance.region @pytest.mark.gce def test_instance_json_gce(self, class_client: IntegrationInstance): diff --git a/tests/integration_tests/modules/test_command_output.py b/tests/integration_tests/modules/test_command_output.py index 8429873f..96525cac 100644 --- a/tests/integration_tests/modules/test_command_output.py +++ b/tests/integration_tests/modules/test_command_output.py @@ -8,7 +8,6 @@ import pytest from tests.integration_tests.instances import IntegrationInstance - USER_DATA = """\ #cloud-config output: { all: "| tee -a /var/log/cloud-init-test-output" } @@ -18,5 +17,5 @@ final_message: "should be last line in cloud-init-test-output file" @pytest.mark.user_data(USER_DATA) def test_runcmd(client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init-test-output') - assert 'should be last line in cloud-init-test-output file' in log + log = client.read_from_file("/var/log/cloud-init-test-output") + assert "should be last line in cloud-init-test-output file" in log diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py index 9c9edc46..22277331 100644 --- a/tests/integration_tests/modules/test_disk_setup.py +++ b/tests/integration_tests/modules/test_disk_setup.py @@ -1,25 +1,29 @@ import json import os -import pytest from uuid import uuid4 + +import pytest from pycloudlib.lxd.instance import LXDInstance from cloudinit.subp import subp from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import verify_clean_log -DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) +DISK_PATH = "/tmp/test_disk_setup_{}".format(uuid4()) def setup_and_mount_lxd_disk(instance: LXDInstance): - subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( - instance.name, DISK_PATH).split()) + subp( + "lxc config device add {} test-disk-setup-disk disk source={}".format( + instance.name, DISK_PATH + ).split() + ) @pytest.yield_fixture def create_disk(): # 640k should be enough for anybody - subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split()) + subp("dd if=/dev/zero of={} bs=1k count=640".format(DISK_PATH).split()) yield os.remove(DISK_PATH) @@ -54,21 +58,21 @@ class TestDeviceAliases: """Test devices aliases work on disk setup/mount""" def test_device_alias(self, create_disk, client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") assert ( - "updated disk_setup device entry 'my_alias' to '/dev/sdb'" - ) in log - assert 'changed my_alias.1 => /dev/sdb1' in log - assert 'changed my_alias.2 => /dev/sdb2' in log + "updated disk_setup device entry 'my_alias' to '/dev/sdb'" in log + ) + assert "changed my_alias.1 => /dev/sdb1" in log + assert "changed my_alias.2 => /dev/sdb2" in log verify_clean_log(log) - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 2 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['children'][0]['mountpoint'] == '/mnt1' - assert sdb['children'][1]['name'] == 'sdb2' - assert sdb['children'][1]['mountpoint'] == '/mnt2' + lsblk = json.loads(client.execute("lsblk --json")) + sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] + assert len(sdb["children"]) == 2 + assert sdb["children"][0]["name"] == "sdb1" + assert sdb["children"][0]["mountpoint"] == "/mnt1" + assert sdb["children"][1]["name"] == "sdb2" + assert sdb["children"][1]["mountpoint"] == "/mnt2" PARTPROBE_USERDATA = """\ @@ -121,13 +125,13 @@ class TestPartProbeAvailability: def _verify_first_disk_setup(self, client, log): verify_clean_log(log) - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 2 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['children'][0]['mountpoint'] == '/mnt1' - assert sdb['children'][1]['name'] == 'sdb2' - assert sdb['children'][1]['mountpoint'] == '/mnt2' + lsblk = json.loads(client.execute("lsblk --json")) + sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] + assert len(sdb["children"]) == 2 + assert sdb["children"][0]["name"] == "sdb1" + assert sdb["children"][0]["mountpoint"] == "/mnt1" + assert sdb["children"][1]["name"] == "sdb2" + assert sdb["children"][1]["mountpoint"] == "/mnt2" # Not bionic or xenial because the LXD agent gets in the way of us # changing the userdata @@ -148,13 +152,13 @@ class TestPartProbeAvailability: with a warning and a traceback. When partprobe is in use, everything should work successfully. """ - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") self._verify_first_disk_setup(client, log) # Update our userdata and cloud.cfg to mount then perform new disk # setup client.write_to_file( - '/var/lib/cloud/seed/nocloud-net/user-data', + "/var/lib/cloud/seed/nocloud-net/user-data", UPDATED_PARTPROBE_USERDATA, ) client.execute( @@ -162,17 +166,17 @@ class TestPartProbeAvailability: "/etc/cloud/cloud.cfg" ) - client.execute('cloud-init clean --logs') + client.execute("cloud-init clean --logs") client.restart() # Assert new setup works as expected verify_clean_log(log) - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 1 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['children'][0]['mountpoint'] == '/mnt3' + lsblk = json.loads(client.execute("lsblk --json")) + sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] + assert len(sdb["children"]) == 1 + assert sdb["children"][0]["name"] == "sdb1" + assert sdb["children"][0]["mountpoint"] == "/mnt3" def test_disk_setup_no_partprobe( self, create_disk, client: IntegrationInstance @@ -180,11 +184,11 @@ class TestPartProbeAvailability: """Ensure disk setup still works as expected without partprobe.""" # We can't do this part in a bootcmd because the path has already # been found by the time we get to the bootcmd - client.execute('rm $(which partprobe)') - client.execute('cloud-init clean --logs') + client.execute("rm $(which partprobe)") + client.execute("cloud-init clean --logs") client.restart() - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") self._verify_first_disk_setup(client, log) - assert 'partprobe' not in log + assert "partprobe" not in log diff --git a/tests/integration_tests/modules/test_growpart.py b/tests/integration_tests/modules/test_growpart.py index af1e3a15..67251817 100644 --- a/tests/integration_tests/modules/test_growpart.py +++ b/tests/integration_tests/modules/test_growpart.py @@ -1,22 +1,26 @@ +import json import os -import pytest import pathlib -import json from uuid import uuid4 + +import pytest from pycloudlib.lxd.instance import LXDInstance from cloudinit.subp import subp from tests.integration_tests.instances import IntegrationInstance -DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) +DISK_PATH = "/tmp/test_disk_setup_{}".format(uuid4()) def setup_and_mount_lxd_disk(instance: LXDInstance): - subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( - instance.name, DISK_PATH).split()) + subp( + "lxc config device add {} test-disk-setup-disk disk source={}".format( + instance.name, DISK_PATH + ).split() + ) -@pytest.fixture(scope='class', autouse=True) +@pytest.fixture(scope="class", autouse=True) def create_disk(): """Create 16M sparse file""" pathlib.Path(DISK_PATH).touch() @@ -50,13 +54,15 @@ class TestGrowPart: """Test growpart""" def test_grow_part(self, client: IntegrationInstance): - """Verify """ - log = client.read_from_file('/var/log/cloud-init.log') - assert ("cc_growpart.py[INFO]: '/dev/sdb1' resized:" - " changed (/dev/sdb, 1) from") in log - - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 1 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['size'] == '16M' + """Verify""" + log = client.read_from_file("/var/log/cloud-init.log") + assert ( + "cc_growpart.py[INFO]: '/dev/sdb1' resized:" + " changed (/dev/sdb, 1) from" in log + ) + + lsblk = json.loads(client.execute("lsblk --json")) + sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] + assert len(sdb["children"]) == 1 + assert sdb["children"][0]["name"] == "sdb1" + assert sdb["size"] == "16M" diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index f5abc86f..0bad761e 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -1,8 +1,9 @@ -import pytest import time -import yaml from collections import namedtuple +import pytest +import yaml + from tests.integration_tests.instances import IntegrationInstance USER_DATA = """\ @@ -12,28 +13,28 @@ updates: when: ['hotplug'] """ -ip_addr = namedtuple('ip_addr', 'interface state ip4 ip6') +ip_addr = namedtuple("ip_addr", "interface state ip4 ip6") def _wait_till_hotplug_complete(client, expected_runs=1): for _ in range(60): - log = client.read_from_file('/var/log/cloud-init.log') - if log.count('Exiting hotplug handler') == expected_runs: + log = client.read_from_file("/var/log/cloud-init.log") + if log.count("Exiting hotplug handler") == expected_runs: return log time.sleep(1) - raise Exception('Waiting for hotplug handler failed') + raise Exception("Waiting for hotplug handler failed") def _get_ip_addr(client): ips = [] - lines = client.execute('ip --brief addr').split('\n') + lines = client.execute("ip --brief addr").split("\n") for line in lines: attributes = line.split() interface, state = attributes[0], attributes[1] ip4_cidr = attributes[2] if len(attributes) > 2 else None ip6_cidr = attributes[3] if len(attributes) > 3 else None - ip4 = ip4_cidr.split('/')[0] if ip4_cidr else None - ip6 = ip6_cidr.split('/')[0] if ip6_cidr else None + ip4 = ip4_cidr.split("/")[0] if ip4_cidr else None + ip6 = ip6_cidr.split("/")[0] if ip6_cidr else None ip = ip_addr(interface, state, ip4, ip6) ips.append(ip) return ips @@ -47,10 +48,10 @@ def _get_ip_addr(client): @pytest.mark.user_data(USER_DATA) def test_hotplug_add_remove(client: IntegrationInstance): ips_before = _get_ip_addr(client) - log = client.read_from_file('/var/log/cloud-init.log') - assert 'Exiting hotplug handler' not in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "Exiting hotplug handler" not in log assert client.execute( - 'test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules' + "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" ).ok # Add new NIC @@ -62,11 +63,11 @@ def test_hotplug_add_remove(client: IntegrationInstance): assert len(ips_after_add) == len(ips_before) + 1 assert added_ip not in [ip.ip4 for ip in ips_before] assert added_ip in [ip.ip4 for ip in ips_after_add] - assert new_addition.state == 'UP' + assert new_addition.state == "UP" - netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml') + netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml") config = yaml.safe_load(netplan_cfg) - assert new_addition.interface in config['network']['ethernets'] + assert new_addition.interface in config["network"]["ethernets"] # Remove new NIC client.instance.remove_network_interface(added_ip) @@ -75,37 +76,37 @@ def test_hotplug_add_remove(client: IntegrationInstance): assert len(ips_after_remove) == len(ips_before) assert added_ip not in [ip.ip4 for ip in ips_after_remove] - netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml') + netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml") config = yaml.safe_load(netplan_cfg) - assert new_addition.interface not in config['network']['ethernets'] + assert new_addition.interface not in config["network"]["ethernets"] - assert 'enabled' == client.execute( - 'cloud-init devel hotplug-hook -s net query' + assert "enabled" == client.execute( + "cloud-init devel hotplug-hook -s net query" ) @pytest.mark.openstack def test_no_hotplug_in_userdata(client: IntegrationInstance): ips_before = _get_ip_addr(client) - log = client.read_from_file('/var/log/cloud-init.log') - assert 'Exiting hotplug handler' not in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "Exiting hotplug handler" not in log assert client.execute( - 'test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules' + "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" ).failed # Add new NIC client.instance.add_network_interface() - log = client.read_from_file('/var/log/cloud-init.log') - assert 'hotplug-hook' not in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "hotplug-hook" not in log ips_after_add = _get_ip_addr(client) if len(ips_after_add) == len(ips_before) + 1: # We can see the device, but it should not have been brought up new_ip = [ip for ip in ips_after_add if ip not in ips_before][0] - assert new_ip.state == 'DOWN' + assert new_ip.state == "DOWN" else: assert len(ips_after_add) == len(ips_before) - assert 'disabled' == client.execute( - 'cloud-init devel hotplug-hook -s net query' + assert "disabled" == client.execute( + "cloud-init devel hotplug-hook -s net query" ) diff --git a/tests/integration_tests/modules/test_jinja_templating.py b/tests/integration_tests/modules/test_jinja_templating.py index fe8eff1a..7788c6f0 100644 --- a/tests/integration_tests/modules/test_jinja_templating.py +++ b/tests/integration_tests/modules/test_jinja_templating.py @@ -4,7 +4,6 @@ import pytest from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import verify_ordered_items_in_text - USER_DATA = """\ ## template: jinja #cloud-config @@ -23,12 +22,12 @@ def test_runcmd_with_variable_substitution(client: IntegrationInstance): we can also substitute variables from instance-data-sensitive LP: #1931392. """ - hostname = client.execute('hostname').stdout.strip() + hostname = client.execute("hostname").stdout.strip() expected = [ hostname, - ('Merged cloud-init system config from /etc/cloud/cloud.cfg and ' - '/etc/cloud/cloud.cfg.d/'), - hostname + "Merged cloud-init system config from /etc/cloud/cloud.cfg and " + "/etc/cloud/cloud.cfg.d/", + hostname, ] - output = client.read_from_file('/var/tmp/runcmd_output') + output = client.read_from_file("/var/tmp/runcmd_output") verify_ordered_items_in_text(expected, output) diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py index e79db3c7..50899982 100644 --- a/tests/integration_tests/modules/test_keys_to_console.py +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -36,6 +36,7 @@ users: @pytest.mark.user_data(BLACKLIST_USER_DATA) class TestKeysToConsoleBlacklist: """Test that the blacklist options work as expected.""" + @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"]) def test_excluded_keys(self, class_client, key_type): syslog = class_client.read_from_file("/var/log/syslog") @@ -55,6 +56,7 @@ class TestAllKeysToConsoleBlacklist: """Test that when key blacklist contains all key types that no header/footer are output. """ + def test_header_excluded(self, class_client): syslog = class_client.read_from_file("/var/log/syslog") assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog @@ -67,6 +69,7 @@ class TestAllKeysToConsoleBlacklist: @pytest.mark.user_data(DISABLED_USER_DATA) class TestKeysToConsoleDisabled: """Test that output can be fully disabled.""" + @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"]) def test_keys_excluded(self, class_client, key_type): syslog = class_client.read_from_file("/var/log/syslog") @@ -90,7 +93,7 @@ class TestKeysToConsoleEnabled: """Test that output can be enabled disabled.""" def test_duplicate_messaging_console_log(self, class_client): - class_client.execute('cloud-init status --wait --long').ok + class_client.execute("cloud-init status --wait --long").ok try: console_log = class_client.instance.console_log() except NotImplementedError: @@ -98,13 +101,13 @@ class TestKeysToConsoleEnabled: # log pytest.skip("NotImplementedError when requesting console log") return - if console_log.lower() == 'no console output': + if console_log.lower() == "no console output": # This test retries because we might not have the full console log # on the first fetch. However, if we have no console output # at all, we don't want to keep retrying as that would trigger # another 5 minute wait on the pycloudlib side, which could # leave us waiting for a couple hours - pytest.fail('no console output') + pytest.fail("no console output") return msg = "no authorized SSH keys fingerprints found for user barfoo." assert 1 == console_log.count(msg) diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py index 65dce3c7..2cb3f4f3 100644 --- a/tests/integration_tests/modules/test_lxd_bridge.py +++ b/tests/integration_tests/modules/test_lxd_bridge.py @@ -8,7 +8,6 @@ import yaml from tests.integration_tests.util import verify_clean_log - USER_DATA = """\ #cloud-config lxd: @@ -29,7 +28,6 @@ lxd: @pytest.mark.no_container @pytest.mark.user_data(USER_DATA) class TestLxdBridge: - @pytest.mark.parametrize("binary_name", ["lxc", "lxd"]) def test_binaries_installed(self, class_client, binary_name): """Check that the expected LXD binaries are installed""" diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py index c777a641..fc62e63b 100644 --- a/tests/integration_tests/modules/test_ntp_servers.py +++ b/tests/integration_tests/modules/test_ntp_servers.py @@ -9,8 +9,8 @@ and ``tests/cloud_tests/testcases/modules/ntp_chrony.yaml``) """ import re -import yaml import pytest +import yaml from tests.integration_tests.instances import IntegrationInstance @@ -33,13 +33,13 @@ EXPECTED_POOLS = yaml.safe_load(USER_DATA)["ntp"]["pools"] @pytest.mark.user_data(USER_DATA) class TestNtpServers: - def test_ntp_installed(self, class_client: IntegrationInstance): """Test that `ntpd --version` succeeds, indicating installation.""" assert class_client.execute("ntpd --version").ok - def test_dist_config_file_is_empty(self, - class_client: IntegrationInstance): + def test_dist_config_file_is_empty( + self, class_client: IntegrationInstance + ): """Test that the distributed config file is empty. (This test is skipped on all currently supported Ubuntu releases, so @@ -56,13 +56,13 @@ class TestNtpServers: assert re.search( r"^server {} iburst".format(expected_server), ntp_conf, - re.MULTILINE + re.MULTILINE, ) for expected_pool in EXPECTED_POOLS: assert re.search( r"^pool {} iburst".format(expected_pool), ntp_conf, - re.MULTILINE + re.MULTILINE, ) def test_ntpq_servers(self, class_client: IntegrationInstance): @@ -84,12 +84,12 @@ ntp: @pytest.mark.user_data(CHRONY_DATA) def test_chrony(client: IntegrationInstance): - if client.execute('test -f /etc/chrony.conf').ok: - chrony_conf = '/etc/chrony.conf' + if client.execute("test -f /etc/chrony.conf").ok: + chrony_conf = "/etc/chrony.conf" else: - chrony_conf = '/etc/chrony/chrony.conf' + chrony_conf = "/etc/chrony/chrony.conf" contents = client.read_from_file(chrony_conf) - assert 'server 172.16.15.14' in contents + assert "server 172.16.15.14" in contents TIMESYNCD_DATA = """\ @@ -105,9 +105,9 @@ ntp: @pytest.mark.user_data(TIMESYNCD_DATA) def test_timesyncd(client: IntegrationInstance): contents = client.read_from_file( - '/etc/systemd/timesyncd.conf.d/cloud-init.conf' + "/etc/systemd/timesyncd.conf.d/cloud-init.conf" ) - assert 'NTP=172.16.15.14' in contents + assert "NTP=172.16.15.14" in contents EMPTY_NTP = """\ @@ -121,8 +121,8 @@ ntp: @pytest.mark.user_data(EMPTY_NTP) def test_empty_ntp(client: IntegrationInstance): - assert client.execute('ntpd --version').ok - assert client.execute('test -f /etc/ntp.conf.dist').failed - assert 'pool.ntp.org iburst' in client.execute( + assert client.execute("ntpd --version").ok + assert client.execute("test -f /etc/ntp.conf.dist").failed + assert "pool.ntp.org iburst" in client.execute( 'grep -v "^#" /etc/ntp.conf' ) diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index 28d741bc..d668d81c 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -13,8 +13,8 @@ NOTE: the testcase for this looks for the command in history.log as """ import re -import pytest +import pytest USER_DATA = """\ #cloud-config @@ -29,7 +29,6 @@ package_upgrade: true @pytest.mark.ubuntu @pytest.mark.user_data(USER_DATA) class TestPackageUpdateUpgradeInstall: - def assert_package_installed(self, pkg_out, name, version=None): """Check dpkg-query --show output for matching package name. @@ -38,7 +37,8 @@ class TestPackageUpdateUpgradeInstall: version. """ pkg_match = re.search( - "^%s\t(?P<version>.*)$" % name, pkg_out, re.MULTILINE) + "^%s\t(?P<version>.*)$" % name, pkg_out, re.MULTILINE + ) if pkg_match: installed_version = pkg_match.group("version") if not version: @@ -46,8 +46,10 @@ class TestPackageUpdateUpgradeInstall: if installed_version.startswith(version): return # Success raise AssertionError( - "Expected package version %s-%s not found. Found %s" % - name, version, installed_version) + "Expected package version %s-%s not found. Found %s" % name, + version, + installed_version, + ) raise AssertionError("Package not installed: %s" % name) def test_new_packages_are_installed(self, class_client): @@ -58,11 +60,13 @@ class TestPackageUpdateUpgradeInstall: def test_packages_were_updated(self, class_client): out = class_client.execute( - "grep ^Commandline: /var/log/apt/history.log") + "grep ^Commandline: /var/log/apt/history.log" + ) assert ( "Commandline: /usr/bin/apt-get --option=Dpkg::Options" "::=--force-confold --option=Dpkg::options::=--force-unsafe-io " - "--assume-yes --quiet install sl tree") in out + "--assume-yes --quiet install sl tree" in out + ) def test_packages_were_upgraded(self, class_client): """Test cloud-init-output for install & upgrade stuff.""" diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py index 00fdeaea..33527e1e 100644 --- a/tests/integration_tests/modules/test_persistence.py +++ b/tests/integration_tests/modules/test_persistence.py @@ -10,21 +10,23 @@ from tests.integration_tests.util import ( verify_ordered_items_in_text, ) - -PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') -TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl' +PICKLE_PATH = Path("/var/lib/cloud/instance/obj.pkl") +TEST_PICKLE = ASSETS_DIR / "trusty_with_mime.pkl" @pytest.mark.lxd_container def test_log_message_on_missing_version_file(client: IntegrationInstance): client.push_file(TEST_PICKLE, PICKLE_PATH) client.restart() - assert client.execute('cloud-init status --wait').ok - log = client.read_from_file('/var/log/cloud-init.log') - verify_ordered_items_in_text([ - "Unable to unpickle datasource: 'MIMEMultipart' object has no " - "attribute 'policy'. Ignoring current cache.", - 'no cache found', - 'Searching for local data source', - 'SUCCESS: found local data from DataSourceNoCloud' - ], log) + assert client.execute("cloud-init status --wait").ok + log = client.read_from_file("/var/log/cloud-init.log") + verify_ordered_items_in_text( + [ + "Unable to unpickle datasource: 'MIMEMultipart' object has no " + "attribute 'policy'. Ignoring current cache.", + "no cache found", + "Searching for local data source", + "SUCCESS: found local data from DataSourceNoCloud", + ], + log, + ) diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py index 5f3a32ac..a629029d 100644 --- a/tests/integration_tests/modules/test_power_state_change.py +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -30,7 +30,7 @@ def _detect_reboot(instance: IntegrationInstance): instance.instance.wait() for _ in range(600): try: - log = instance.read_from_file('/var/log/cloud-init.log') + log = instance.read_from_file("/var/log/cloud-init.log") boot_count = log.count("running 'init-local'") if boot_count == 1: instance.instance.wait() @@ -40,11 +40,11 @@ def _detect_reboot(instance: IntegrationInstance): pass time.sleep(1) else: - raise Exception('Could not detect reboot') + raise Exception("Could not detect reboot") def _can_connect(instance): - return instance.execute('true').ok + return instance.execute("true").ok # This test is marked unstable because even though it should be able to @@ -55,36 +55,44 @@ def _can_connect(instance): @pytest.mark.ubuntu @pytest.mark.lxd_container class TestPowerChange: - @pytest.mark.parametrize('mode,delay,timeout,expected', [ - ('poweroff', 'now', '10', 'will execute: shutdown -P now msg'), - ('reboot', 'now', '0', 'will execute: shutdown -r now msg'), - ('halt', '+1', '0', 'will execute: shutdown -H +1 msg'), - ]) - def test_poweroff(self, session_cloud: IntegrationCloud, - mode, delay, timeout, expected): + @pytest.mark.parametrize( + "mode,delay,timeout,expected", + [ + ("poweroff", "now", "10", "will execute: shutdown -P now msg"), + ("reboot", "now", "0", "will execute: shutdown -r now msg"), + ("halt", "+1", "0", "will execute: shutdown -H +1 msg"), + ], + ) + def test_poweroff( + self, session_cloud: IntegrationCloud, mode, delay, timeout, expected + ): with session_cloud.launch( user_data=USER_DATA.format( - delay=delay, mode=mode, timeout=timeout, condition='true'), - launch_kwargs={'wait': False}, + delay=delay, mode=mode, timeout=timeout, condition="true" + ), + launch_kwargs={"wait": False}, ) as instance: - if mode == 'reboot': + if mode == "reboot": _detect_reboot(instance) else: instance.instance.wait_for_stop() instance.instance.start(wait=True) - log = instance.read_from_file('/var/log/cloud-init.log') + log = instance.read_from_file("/var/log/cloud-init.log") assert _can_connect(instance) lines_to_check = [ - 'Running module power-state-change', + "Running module power-state-change", expected, "running 'init-local'", - 'config-power-state-change already ran', + "config-power-state-change already ran", ] verify_ordered_items_in_text(lines_to_check, log) - @pytest.mark.user_data(USER_DATA.format(delay='0', mode='poweroff', - timeout='0', condition='false')) + @pytest.mark.user_data( + USER_DATA.format( + delay="0", mode="poweroff", timeout="0", condition="false" + ) + ) def test_poweroff_false_condition(self, client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") assert _can_connect(client) - assert 'Condition was false. Will not perform state change' in log + assert "Condition was false. Will not perform state change" in log diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py index f40a6ca3..1bd9cee4 100644 --- a/tests/integration_tests/modules/test_puppet.py +++ b/tests/integration_tests/modules/test_puppet.py @@ -15,9 +15,9 @@ puppet: @pytest.mark.user_data(SERVICE_DATA) def test_puppet_service(client: IntegrationInstance): """Basic test that puppet gets installed and runs.""" - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) - assert client.execute('systemctl is-active puppet').ok + assert client.execute("systemctl is-active puppet").ok assert "Running command ['puppet', 'agent'" not in log @@ -35,5 +35,5 @@ puppet: @pytest.mark.user_data(EXEC_DATA) def test_pupet_exec(client: IntegrationInstance): """Basic test that puppet gets installed and runs.""" - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") assert "Running command ['puppet', 'agent', '--noop']" in log diff --git a/tests/integration_tests/modules/test_set_hostname.py b/tests/integration_tests/modules/test_set_hostname.py index e7f7f6b6..ae0aeae9 100644 --- a/tests/integration_tests/modules/test_set_hostname.py +++ b/tests/integration_tests/modules/test_set_hostname.py @@ -11,7 +11,6 @@ after the system is boot. import pytest - USER_DATA_HOSTNAME = """\ #cloud-config hostname: cloudinit2 @@ -34,7 +33,6 @@ fqdn: cloudinit2.test.io @pytest.mark.ci class TestHostname: - @pytest.mark.user_data(USER_DATA_HOSTNAME) def test_hostname(self, client): hostname_output = client.execute("hostname") @@ -59,6 +57,8 @@ class TestHostname: assert "cloudinit2.i9n.cloud-init.io" in fqdn_output.strip() host_output = client.execute("grep ^127 /etc/hosts") - assert '127.0.1.1 {} {}'.format( - fqdn_output, hostname_output) in host_output - assert '127.0.0.1 localhost' in host_output + assert ( + "127.0.1.1 {} {}".format(fqdn_output, hostname_output) + in host_output + ) + assert "127.0.0.1 localhost" in host_output diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py index ac9db19d..e0f8b692 100644 --- a/tests/integration_tests/modules/test_set_password.py +++ b/tests/integration_tests/modules/test_set_password.py @@ -15,7 +15,6 @@ import yaml from tests.integration_tests.util import retry - COMMON_USER_DATA = """\ #cloud-config ssh_pwauth: yes @@ -42,7 +41,9 @@ Uh69tP4GSrGW5XKHxMLiKowJgm/" lock_passwd: false """ -LIST_USER_DATA = COMMON_USER_DATA + """ +LIST_USER_DATA = ( + COMMON_USER_DATA + + """ chpasswd: list: - tom:mypassword123! @@ -50,8 +51,11 @@ chpasswd: - harry:RANDOM - mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89 """ +) -STRING_USER_DATA = COMMON_USER_DATA + """ +STRING_USER_DATA = ( + COMMON_USER_DATA + + """ chpasswd: list: | tom:mypassword123! @@ -59,6 +63,7 @@ chpasswd: harry:RANDOM mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89 """ +) USERS_DICTS = yaml.safe_load(COMMON_USER_DATA)["users"] USERS_PASSWD_VALUES = { @@ -141,13 +146,13 @@ class Mixin: # log pytest.skip("NotImplementedError when requesting console log") return - if console_log.lower() == 'no console output': + if console_log.lower() == "no console output": # This test retries because we might not have the full console log # on the first fetch. However, if we have no console output # at all, we don't want to keep retrying as that would trigger # another 5 minute wait on the pycloudlib side, which could # leave us waiting for a couple hours - pytest.fail('no console output') + pytest.fail("no console output") return assert "dick:" in console_log assert "harry:" in console_log diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py index cf14d0b0..89b49576 100644 --- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py @@ -14,7 +14,6 @@ import pytest from tests.integration_tests.util import retry - USER_DATA_SSH_AUTHKEY_DISABLE = """\ #cloud-config no_ssh_fingerprints: true @@ -32,13 +31,13 @@ ssh_authorized_keys: @pytest.mark.ci class TestSshAuthkeyFingerprints: - @pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_DISABLE) def test_ssh_authkey_fingerprints_disable(self, client): cloudinit_output = client.read_from_file("/var/log/cloud-init.log") assert ( "Skipping module named ssh-authkey-fingerprints, " - "logging of SSH fingerprints disabled") in cloudinit_output + "logging of SSH fingerprints disabled" in cloudinit_output + ) # retry decorator here because it can take some time to be reflected # in syslog @@ -47,7 +46,7 @@ class TestSshAuthkeyFingerprints: def test_ssh_authkey_fingerprints_enable(self, client): syslog_output = client.read_from_file("/var/log/syslog") - assert re.search(r'256 SHA256:.*(ECDSA)', syslog_output) is not None - assert re.search(r'256 SHA256:.*(ED25519)', syslog_output) is not None - assert re.search(r'1024 SHA256:.*(DSA)', syslog_output) is None - assert re.search(r'2048 SHA256:.*(RSA)', syslog_output) is None + assert re.search(r"256 SHA256:.*(ECDSA)", syslog_output) is not None + assert re.search(r"256 SHA256:.*(ED25519)", syslog_output) is not None + assert re.search(r"1024 SHA256:.*(DSA)", syslog_output) is None + assert re.search(r"2048 SHA256:.*(RSA)", syslog_output) is None diff --git a/tests/integration_tests/modules/test_ssh_generate.py b/tests/integration_tests/modules/test_ssh_generate.py index 60c36982..1dd0adf1 100644 --- a/tests/integration_tests/modules/test_ssh_generate.py +++ b/tests/integration_tests/modules/test_ssh_generate.py @@ -10,7 +10,6 @@ keys were created. import pytest - USER_DATA = """\ #cloud-config ssh_genkeytypes: @@ -23,28 +22,27 @@ authkey_hash: sha512 @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestSshKeysGenerate: - @pytest.mark.parametrize( - "ssh_key_path", ( + "ssh_key_path", + ( "/etc/ssh/ssh_host_dsa_key.pub", "/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_rsa_key.pub", "/etc/ssh/ssh_host_rsa_key", - ) + ), ) def test_ssh_keys_not_generated(self, ssh_key_path, class_client): - out = class_client.execute( - "test -e {}".format(ssh_key_path) - ) + out = class_client.execute("test -e {}".format(ssh_key_path)) assert out.failed @pytest.mark.parametrize( - "ssh_key_path", ( + "ssh_key_path", + ( "/etc/ssh/ssh_host_ecdsa_key.pub", "/etc/ssh/ssh_host_ecdsa_key", "/etc/ssh/ssh_host_ed25519_key.pub", "/etc/ssh/ssh_host_ed25519_key", - ) + ), ) def test_ssh_keys_generated(self, ssh_key_path, class_client): out = class_client.read_from_file(ssh_key_path) diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py index 6aae96ae..b79f18eb 100644 --- a/tests/integration_tests/modules/test_ssh_keys_provided.py +++ b/tests/integration_tests/modules/test_ssh_keys_provided.py @@ -9,7 +9,6 @@ system. import pytest - USER_DATA = """\ #cloud-config disable_root: false @@ -82,44 +81,33 @@ ssh_keys: @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestSshKeysProvided: - @pytest.mark.parametrize( "config_path,expected_out", ( ( "/etc/ssh/ssh_host_dsa_key.pub", - ( - "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" - "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM" - ), + "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" + "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM", ), ( "/etc/ssh/ssh_host_dsa_key", - ( - "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" - "hOVAfzZ6+jklP" - ), + "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" + "hOVAfzZ6+jklP", ), ( "/etc/ssh/ssh_host_rsa_key.pub", - ( - "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" - "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4" - ), + "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" + "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4", ), ( "/etc/ssh/ssh_host_rsa_key", - ( - "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" - "RQvLZpMRdywBm" - ), + "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" + "RQvLZpMRdywBm", ), ( "/etc/ssh/ssh_host_rsa_key-cert.pub", - ( - "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" - "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD" - ), + "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" + "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD", ), ( "/etc/ssh/sshd_config", @@ -127,33 +115,25 @@ class TestSshKeysProvided: ), ( "/etc/ssh/ssh_host_ecdsa_key.pub", - ( - "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" - "BBFsS5Tvky/IC/dXhE/afxxU" - ), + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" + "BBFsS5Tvky/IC/dXhE/afxxU", ), ( "/etc/ssh/ssh_host_ecdsa_key", - ( - "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" - "5mpZqxgX4vcgb" - ), + "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" + "5mpZqxgX4vcgb", ), ( "/etc/ssh/ssh_host_ed25519_key.pub", - ( - "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" - "G15dqjQ2XkNVOEnb5" - ), + "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" + "G15dqjQ2XkNVOEnb5", ), ( "/etc/ssh/ssh_host_ed25519_key", - ( - "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" - "OhteXao0Nl5DVThJ2+Q" - ), + "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" + "OhteXao0Nl5DVThJ2+Q", ), - ) + ), ) def test_ssh_provided_keys(self, config_path, expected_out, class_client): out = class_client.read_from_file(config_path).strip() diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py index b39454e6..8330a1ce 100644 --- a/tests/integration_tests/modules/test_ssh_keysfile.py +++ b/tests/integration_tests/modules/test_ssh_keysfile.py @@ -1,15 +1,16 @@ +from io import StringIO + import paramiko import pytest -from io import StringIO from paramiko.ssh_exception import SSHException from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import get_test_rsa_keypair -TEST_USER1_KEYS = get_test_rsa_keypair('test1') -TEST_USER2_KEYS = get_test_rsa_keypair('test2') -TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') +TEST_USER1_KEYS = get_test_rsa_keypair("test1") +TEST_USER2_KEYS = get_test_rsa_keypair("test2") +TEST_DEFAULT_KEYS = get_test_rsa_keypair("test3") _USERDATA = """\ #cloud-config @@ -26,7 +27,7 @@ users: ssh_authorized_keys: - {user2} """.format( - bootcmd='{bootcmd}', + bootcmd="{bootcmd}", default=TEST_DEFAULT_KEYS.public_key, user1=TEST_USER1_KEYS.public_key, user2=TEST_USER2_KEYS.public_key, @@ -37,9 +38,9 @@ def common_verify(client, expected_keys): for user, filename, keys in expected_keys: # Ensure key is in the key file contents = client.read_from_file(filename) - if user in ['ubuntu', 'root']: - lines = contents.split('\n') - if user == 'root': + if user in ["ubuntu", "root"]: + lines = contents.split("\n") + if user == "root": # Our personal public key gets added by pycloudlib in # addition to the default `ssh_authorized_keys` assert len(lines) == 2 @@ -54,8 +55,9 @@ def common_verify(client, expected_keys): # Ensure we can actually connect ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - paramiko_key = paramiko.RSAKey.from_private_key(StringIO( - keys.private_key)) + paramiko_key = paramiko.RSAKey.from_private_key( + StringIO(keys.private_key) + ) # Will fail with AuthenticationException if # we cannot connect @@ -71,8 +73,11 @@ def common_verify(client, expected_keys): other_users = [u[0] for u in expected_keys if u[2] != keys] for other_user in other_users: with pytest.raises(SSHException): - print('trying to connect as {} with key from {}'.format( - other_user, user)) + print( + "trying to connect as {} with key from {}".format( + other_user, user + ) + ) ssh.connect( client.instance.ip, username=other_user, @@ -83,37 +88,38 @@ def common_verify(client, expected_keys): # Ensure we haven't messed with any /home permissions # See LP: #1940233 - home_dir = '/home/{}'.format(user) + home_dir = "/home/{}".format(user) # Home permissions aren't consistent between releases. On ubuntu # this can change to 750 once focal is unsupported. if ImageSpecification.from_os_image().release in ("bionic", "focal"): - home_perms = '755' + home_perms = "755" else: - home_perms = '750' - if user == 'root': - home_dir = '/root' - home_perms = '700' - assert '{} {}'.format(user, home_perms) == client.execute( + home_perms = "750" + if user == "root": + home_dir = "/root" + home_perms = "700" + assert "{} {}".format(user, home_perms) == client.execute( 'stat -c "%U %a" {}'.format(home_dir) ) if client.execute("test -d {}/.ssh".format(home_dir)).ok: - assert '{} 700'.format(user) == client.execute( + assert "{} 700".format(user) == client.execute( 'stat -c "%U %a" {}/.ssh'.format(home_dir) ) - assert '{} 600'.format(user) == client.execute( + assert "{} 600".format(user) == client.execute( 'stat -c "%U %a" {}'.format(filename) ) # Also ensure ssh-keygen works as expected - client.execute('mkdir {}/.ssh'.format(home_dir)) + client.execute("mkdir {}/.ssh".format(home_dir)) assert client.execute( "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format( - home_dir) + home_dir + ) ).ok - assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir)) - assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir)) + assert client.execute("test -f {}/.ssh/id_rsa".format(home_dir)) + assert client.execute("test -f {}/.ssh/id_rsa.pub".format(home_dir)) - assert 'root 755' == client.execute('stat -c "%U %a" /home') + assert "root 755" == client.execute('stat -c "%U %a" /home') DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""') @@ -123,75 +129,96 @@ DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""') @pytest.mark.user_data(DEFAULT_KEYS_USERDATA) def test_authorized_keys_default(client: IntegrationInstance): expected_keys = [ - ('test_user1', '/home/test_user1/.ssh/authorized_keys', - TEST_USER1_KEYS), - ('test_user2', '/home/test_user2/.ssh/authorized_keys', - TEST_USER2_KEYS), - ('ubuntu', '/home/ubuntu/.ssh/authorized_keys', - TEST_DEFAULT_KEYS), - ('root', '/root/.ssh/authorized_keys', TEST_DEFAULT_KEYS), + ( + "test_user1", + "/home/test_user1/.ssh/authorized_keys", + TEST_USER1_KEYS, + ), + ( + "test_user2", + "/home/test_user2/.ssh/authorized_keys", + TEST_USER2_KEYS, + ), + ("ubuntu", "/home/ubuntu/.ssh/authorized_keys", TEST_DEFAULT_KEYS), + ("root", "/root/.ssh/authorized_keys", TEST_DEFAULT_KEYS), ] common_verify(client, expected_keys) -AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(bootcmd=( - "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " - "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' " - "/etc/ssh/sshd_config")) +AUTHORIZED_KEYS2_USERDATA = _USERDATA.format( + bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' " + "/etc/ssh/sshd_config" + ) +) @pytest.mark.ubuntu @pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA) def test_authorized_keys2(client: IntegrationInstance): expected_keys = [ - ('test_user1', '/home/test_user1/.ssh/authorized_keys2', - TEST_USER1_KEYS), - ('test_user2', '/home/test_user2/.ssh/authorized_keys2', - TEST_USER2_KEYS), - ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', - TEST_DEFAULT_KEYS), - ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), + ( + "test_user1", + "/home/test_user1/.ssh/authorized_keys2", + TEST_USER1_KEYS, + ), + ( + "test_user2", + "/home/test_user2/.ssh/authorized_keys2", + TEST_USER2_KEYS, + ), + ("ubuntu", "/home/ubuntu/.ssh/authorized_keys2", TEST_DEFAULT_KEYS), + ("root", "/root/.ssh/authorized_keys2", TEST_DEFAULT_KEYS), ] common_verify(client, expected_keys) -NESTED_KEYS_USERDATA = _USERDATA.format(bootcmd=( - "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " - "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' " - "/etc/ssh/sshd_config")) +NESTED_KEYS_USERDATA = _USERDATA.format( + bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' " + "/etc/ssh/sshd_config" + ) +) @pytest.mark.ubuntu @pytest.mark.user_data(NESTED_KEYS_USERDATA) def test_nested_keys(client: IntegrationInstance): expected_keys = [ - ('test_user1', '/home/test_user1/foo/bar/ssh/keys', - TEST_USER1_KEYS), - ('test_user2', '/home/test_user2/foo/bar/ssh/keys', - TEST_USER2_KEYS), - ('ubuntu', '/home/ubuntu/foo/bar/ssh/keys', - TEST_DEFAULT_KEYS), - ('root', '/root/foo/bar/ssh/keys', TEST_DEFAULT_KEYS), + ("test_user1", "/home/test_user1/foo/bar/ssh/keys", TEST_USER1_KEYS), + ("test_user2", "/home/test_user2/foo/bar/ssh/keys", TEST_USER2_KEYS), + ("ubuntu", "/home/ubuntu/foo/bar/ssh/keys", TEST_DEFAULT_KEYS), + ("root", "/root/foo/bar/ssh/keys", TEST_DEFAULT_KEYS), ] common_verify(client, expected_keys) -EXTERNAL_KEYS_USERDATA = _USERDATA.format(bootcmd=( - "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " - "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' " - "/etc/ssh/sshd_config")) +EXTERNAL_KEYS_USERDATA = _USERDATA.format( + bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' " + "/etc/ssh/sshd_config" + ) +) @pytest.mark.ubuntu @pytest.mark.user_data(EXTERNAL_KEYS_USERDATA) def test_external_keys(client: IntegrationInstance): expected_keys = [ - ('test_user1', '/etc/ssh/authorized_keys/test_user1/keys', - TEST_USER1_KEYS), - ('test_user2', '/etc/ssh/authorized_keys/test_user2/keys', - TEST_USER2_KEYS), - ('ubuntu', '/etc/ssh/authorized_keys/ubuntu/keys', - TEST_DEFAULT_KEYS), - ('root', '/etc/ssh/authorized_keys/root/keys', TEST_DEFAULT_KEYS), + ( + "test_user1", + "/etc/ssh/authorized_keys/test_user1/keys", + TEST_USER1_KEYS, + ), + ( + "test_user2", + "/etc/ssh/authorized_keys/test_user2/keys", + TEST_USER2_KEYS, + ), + ("ubuntu", "/etc/ssh/authorized_keys/ubuntu/keys", TEST_DEFAULT_KEYS), + ("root", "/etc/ssh/authorized_keys/root/keys", TEST_DEFAULT_KEYS), ] common_verify(client, expected_keys) diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py index fffa0746..e4a4241f 100644 --- a/tests/integration_tests/modules/test_user_events.py +++ b/tests/integration_tests/modules/test_user_events.py @@ -3,8 +3,9 @@ This is currently limited to applying network config on BOOT events. """ -import pytest import re + +import pytest import yaml from tests.integration_tests.instances import IntegrationInstance @@ -13,16 +14,16 @@ from tests.integration_tests.instances import IntegrationInstance def _add_dummy_bridge_to_netplan(client: IntegrationInstance): # Update netplan configuration to ensure it doesn't change on reboot netplan = yaml.safe_load( - client.execute('cat /etc/netplan/50-cloud-init.yaml') + client.execute("cat /etc/netplan/50-cloud-init.yaml") ) # Just a dummy bridge to do nothing try: - netplan['network']['bridges']['dummy0'] = {'dhcp4': False} + netplan["network"]["bridges"]["dummy0"] = {"dhcp4": False} except KeyError: - netplan['network']['bridges'] = {'dummy0': {'dhcp4': False}} + netplan["network"]["bridges"] = {"dummy0": {"dhcp4": False}} dumped_netplan = yaml.dump(netplan) - client.write_to_file('/etc/netplan/50-cloud-init.yaml', dumped_netplan) + client.write_to_file("/etc/netplan/50-cloud-init.yaml", dumped_netplan) @pytest.mark.lxd_container @@ -32,19 +33,19 @@ def _add_dummy_bridge_to_netplan(client: IntegrationInstance): @pytest.mark.oci @pytest.mark.openstack def test_boot_event_disabled_by_default(client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init.log') - if 'network config is disabled' in log: + log = client.read_from_file("/var/log/cloud-init.log") + if "network config is disabled" in log: pytest.skip("network config disabled. Test doesn't apply") - assert 'Applying network configuration' in log - assert 'dummy0' not in client.execute('ls /sys/class/net') + assert "Applying network configuration" in log + assert "dummy0" not in client.execute("ls /sys/class/net") _add_dummy_bridge_to_netplan(client) - client.execute('rm /var/log/cloud-init.log') + client.execute("rm /var/log/cloud-init.log") client.restart() - log2 = client.read_from_file('/var/log/cloud-init.log') + log2 = client.read_from_file("/var/log/cloud-init.log") - if 'cache invalid in datasource' in log2: + if "cache invalid in datasource" in log2: # Invalid cache will get cleared, meaning we'll create a new # "instance" and apply networking config, so events aren't # really relevant here @@ -53,8 +54,9 @@ def test_boot_event_disabled_by_default(client: IntegrationInstance): # We attempt to apply network config twice on every boot. # Ensure neither time works. assert 2 == len( - re.findall(r"Event Denied: scopes=\['network'\] EventType=boot[^-]", - log2) + re.findall( + r"Event Denied: scopes=\['network'\] EventType=boot[^-]", log2 + ) ) assert 2 == log2.count( "Event Denied: scopes=['network'] EventType=boot-legacy" @@ -64,30 +66,30 @@ def test_boot_event_disabled_by_default(client: IntegrationInstance): " nor datasource network update allowed" ) - assert 'dummy0' in client.execute('ls /sys/class/net') + assert "dummy0" in client.execute("ls /sys/class/net") def _test_network_config_applied_on_reboot(client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init.log') - if 'network config is disabled' in log: + log = client.read_from_file("/var/log/cloud-init.log") + if "network config is disabled" in log: pytest.skip("network config disabled. Test doesn't apply") - assert 'Applying network configuration' in log - assert 'dummy0' not in client.execute('ls /sys/class/net') + assert "Applying network configuration" in log + assert "dummy0" not in client.execute("ls /sys/class/net") _add_dummy_bridge_to_netplan(client) client.execute('echo "" > /var/log/cloud-init.log') client.restart() - log = client.read_from_file('/var/log/cloud-init.log') - if 'cache invalid in datasource' in log: + log = client.read_from_file("/var/log/cloud-init.log") + if "cache invalid in datasource" in log: # Invalid cache will get cleared, meaning we'll create a new # "instance" and apply networking config, so events aren't # really relevant here pytest.skip("Test only valid for existing instances") - assert 'Event Allowed: scope=network EventType=boot' in log - assert 'Applying network configuration' in log - assert 'dummy0' not in client.execute('ls /sys/class/net') + assert "Event Allowed: scope=network EventType=boot" in log + assert "Applying network configuration" in log + assert "dummy0" not in client.execute("ls /sys/class/net") @pytest.mark.azure diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index bcb17b7f..3d1358ce 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -11,7 +11,6 @@ import pytest from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance - USER_DATA = """\ #cloud-config # Add groups to the system @@ -84,7 +83,9 @@ class TestUsersGroups: assert re.search(regex, result.stdout) is not None, ( "'getent {}' resulted in '{}', " "but expected to match regex {}".format( - ' '.join(getent_args), result.stdout, regex)) + " ".join(getent_args), result.stdout, regex + ) + ) def test_user_root_in_secret(self, class_client): """Test root user is in 'secret' group.""" @@ -105,19 +106,21 @@ def test_sudoers_includedir(client: IntegrationInstance): https://github.com/canonical/cloud-init/pull/783 """ if ImageSpecification.from_os_image().release in [ - 'xenial', 'bionic', 'focal' + "xenial", + "bionic", + "focal", ]: raise pytest.skip( - 'Test requires version of sudo installed on groovy and later' + "Test requires version of sudo installed on groovy and later" ) client.execute("sed -i 's/#include/@include/g' /etc/sudoers") - sudoers = client.read_from_file('/etc/sudoers') - if '@includedir /etc/sudoers.d' not in sudoers: + sudoers = client.read_from_file("/etc/sudoers") + if "@includedir /etc/sudoers.d" not in sudoers: client.execute("echo '@includedir /etc/sudoers.d' >> /etc/sudoers") client.instance.clean() client.restart() - sudoers = client.read_from_file('/etc/sudoers') + sudoers = client.read_from_file("/etc/sudoers") - assert '#includedir' not in sudoers - assert sudoers.count('includedir /etc/sudoers.d') == 1 + assert "#includedir" not in sudoers + assert sudoers.count("includedir /etc/sudoers.d") == 1 diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py index f28079d4..3168cd60 100644 --- a/tests/integration_tests/modules/test_version_change.py +++ b/tests/integration_tests/modules/test_version_change.py @@ -5,39 +5,40 @@ import pytest from tests.integration_tests.instances import IntegrationInstance from tests.integration_tests.util import ASSETS_DIR, verify_clean_log - -PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') -TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl' +PICKLE_PATH = Path("/var/lib/cloud/instance/obj.pkl") +TEST_PICKLE = ASSETS_DIR / "test_version_change.pkl" def _assert_no_pickle_problems(log): - assert 'Failed loading pickled blob' not in log + assert "Failed loading pickled blob" not in log verify_clean_log(log) def test_reboot_without_version_change(client: IntegrationInstance): - log = client.read_from_file('/var/log/cloud-init.log') - assert 'Python version change detected' not in log - assert 'Cache compatibility status is currently unknown.' not in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "Python version change detected" not in log + assert "Cache compatibility status is currently unknown." not in log _assert_no_pickle_problems(log) client.restart() - log = client.read_from_file('/var/log/cloud-init.log') - assert 'Python version change detected' not in log - assert 'Could not determine Python version used to write cache' not in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "Python version change detected" not in log + assert "Could not determine Python version used to write cache" not in log _assert_no_pickle_problems(log) # Now ensure that loading a bad pickle gives us problems client.push_file(TEST_PICKLE, PICKLE_PATH) client.restart() - log = client.read_from_file('/var/log/cloud-init.log') + log = client.read_from_file("/var/log/cloud-init.log") # no cache found is an "expected" upgrade error, and # "Failed" means we're unable to load the pickle - assert any([ - 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log, - 'no cache found' in log - ]) + assert any( + [ + "Failed loading pickled blob from {}".format(PICKLE_PATH) in log, + "no cache found" in log, + ] + ) @pytest.mark.ec2 @@ -54,8 +55,8 @@ def test_cache_purged_on_version_change(client: IntegrationInstance): client.push_file(TEST_PICKLE, PICKLE_PATH) client.execute("echo '1.0' > /var/lib/cloud/data/python-version") client.restart() - log = client.read_from_file('/var/log/cloud-init.log') - assert 'Python version change detected. Purging cache' in log + log = client.read_from_file("/var/log/cloud-init.log") + assert "Python version change detected. Purging cache" in log _assert_no_pickle_problems(log) @@ -65,11 +66,11 @@ def test_log_message_on_missing_version_file(client: IntegrationInstance): client.execute("rm /var/lib/cloud/data/python-version") client.execute("rm /var/log/cloud-init.log") client.restart() - log = client.read_from_file('/var/log/cloud-init.log') - if 'no cache found' not in log: + log = client.read_from_file("/var/log/cloud-init.log") + if "no cache found" not in log: # We don't expect the python version file to exist if we have no # pre-existing cache assert ( - 'Writing python-version file. ' - 'Cache compatibility status is currently unknown.' - ) in log + "Writing python-version file. " + "Cache compatibility status is currently unknown." in log + ) diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py index 1d532fac..1eb7e945 100644 --- a/tests/integration_tests/modules/test_write_files.py +++ b/tests/integration_tests/modules/test_write_files.py @@ -7,8 +7,8 @@ and then checks if those files were created during boot. ``tests/cloud_tests/testcases/modules/write_files.yaml``.)""" import base64 -import pytest +import pytest ASCII_TEXT = "ASCII text" B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8")) @@ -50,25 +50,30 @@ write_files: defer: true owner: 'myuser' permissions: '0644' -""".format(B64_CONTENT.decode("ascii")) +""".format( + B64_CONTENT.decode("ascii") +) @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestWriteFiles: - @pytest.mark.parametrize( - "cmd,expected_out", ( + "cmd,expected_out", + ( ("file /root/file_b64", ASCII_TEXT), ("md5sum </root/file_binary", "3801184b97bb8c6e63fa0e1eae2920d7"), - ("sha256sum </root/file_binary", ( + ( + "sha256sum </root/file_binary", "2c791c4037ea5bd7e928d6a87380f8ba" - "7a803cd83d5e4f269e28f5090f0f2c9a" - )), - ("file /root/file_gzip", - "POSIX shell script, ASCII text executable"), + "7a803cd83d5e4f269e28f5090f0f2c9a", + ), + ( + "file /root/file_gzip", + "POSIX shell script, ASCII text executable", + ), ("file /root/file_text", ASCII_TEXT), - ) + ), ) def test_write_files(self, cmd, expected_out, class_client): out = class_client.execute(cmd) @@ -82,6 +87,7 @@ class TestWriteFiles: """ out = class_client.read_from_file("/home/testuser/my-file") assert "echo 'hello world!'" == out - assert class_client.execute( - 'stat -c "%U %a" /home/testuser/my-file' - ) == 'myuser 644' + assert ( + class_client.execute('stat -c "%U %a" /home/testuser/my-file') + == "myuser 644" + ) diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index 0ba4754c..e53ea998 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -1,14 +1,14 @@ import json import logging import os + import pytest from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud from tests.integration_tests.conftest import get_validated_source from tests.integration_tests.util import verify_clean_log - -LOG = logging.getLogger('integration_testing.test_upgrade') +LOG = logging.getLogger("integration_testing.test_upgrade") LOG_TEMPLATE = """\n\ === `systemd-analyze` before: @@ -46,8 +46,10 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): if not source.installs_new_version(): pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) return # type checking doesn't understand that skip raises - if (ImageSpecification.from_os_image().release == 'bionic' and - session_cloud.settings.PLATFORM == 'lxd_vm'): + if ( + ImageSpecification.from_os_image().release == "bionic" + and session_cloud.settings.PLATFORM == "lxd_vm" + ): # The issues that we see on Bionic VMs don't appear anywhere # else, including when calling KVM directly. It likely has to # do with the extra lxd-agent setup happening on bionic. @@ -57,32 +59,34 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): return launch_kwargs = { - 'image_id': session_cloud.released_image_id, + "image_id": session_cloud.released_image_id, } with session_cloud.launch( - launch_kwargs=launch_kwargs, user_data=USER_DATA, + launch_kwargs=launch_kwargs, + user_data=USER_DATA, ) as instance: # get pre values - pre_hostname = instance.execute('hostname') - pre_cloud_id = instance.execute('cloud-id') - pre_result = instance.execute('cat /run/cloud-init/result.json') - pre_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml') - pre_systemd_analyze = instance.execute('systemd-analyze') - pre_systemd_blame = instance.execute('systemd-analyze blame') - pre_cloud_analyze = instance.execute('cloud-init analyze show') - pre_cloud_blame = instance.execute('cloud-init analyze blame') + pre_hostname = instance.execute("hostname") + pre_cloud_id = instance.execute("cloud-id") + pre_result = instance.execute("cat /run/cloud-init/result.json") + pre_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml") + pre_systemd_analyze = instance.execute("systemd-analyze") + pre_systemd_blame = instance.execute("systemd-analyze blame") + pre_cloud_analyze = instance.execute("cloud-init analyze show") + pre_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues pre-upgrade - log = instance.read_from_file('/var/log/cloud-init.log') - assert not json.loads(pre_result)['v1']['errors'] + log = instance.read_from_file("/var/log/cloud-init.log") + assert not json.loads(pre_result)["v1"]["errors"] try: verify_clean_log(log) except AssertionError: LOG.warning( - 'There were errors/warnings/tracebacks pre-upgrade. ' - 'Any failures may be due to pre-upgrade problem') + "There were errors/warnings/tracebacks pre-upgrade. " + "Any failures may be due to pre-upgrade problem" + ) # Upgrade instance.install_new_cloud_init(source, take_snapshot=False) @@ -91,27 +95,27 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): # have broken across re-constitution of a cached datasource. Some # platforms invalidate their datasource cache on reboot, so we run # it here to ensure we get a dirty run. - assert instance.execute('cloud-init init').ok + assert instance.execute("cloud-init init").ok # Reboot - instance.execute('hostname something-else') + instance.execute("hostname something-else") instance.restart() - assert instance.execute('cloud-init status --wait --long').ok + assert instance.execute("cloud-init status --wait --long").ok # get post values - post_hostname = instance.execute('hostname') - post_cloud_id = instance.execute('cloud-id') - post_result = instance.execute('cat /run/cloud-init/result.json') - post_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml') - post_systemd_analyze = instance.execute('systemd-analyze') - post_systemd_blame = instance.execute('systemd-analyze blame') - post_cloud_analyze = instance.execute('cloud-init analyze show') - post_cloud_blame = instance.execute('cloud-init analyze blame') + post_hostname = instance.execute("hostname") + post_cloud_id = instance.execute("cloud-id") + post_result = instance.execute("cat /run/cloud-init/result.json") + post_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml") + post_systemd_analyze = instance.execute("systemd-analyze") + post_systemd_blame = instance.execute("systemd-analyze blame") + post_cloud_analyze = instance.execute("cloud-init analyze show") + post_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues post-upgrade - assert not json.loads(pre_result)['v1']['errors'] + assert not json.loads(pre_result)["v1"]["errors"] - log = instance.read_from_file('/var/log/cloud-init.log') + log = instance.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) # Ensure important things stayed the same @@ -120,36 +124,46 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): try: assert pre_result == post_result except AssertionError: - if instance.settings.PLATFORM == 'azure': + if instance.settings.PLATFORM == "azure": pre_json = json.loads(pre_result) post_json = json.loads(post_result) - assert pre_json['v1']['datasource'].startswith( - 'DataSourceAzure') - assert post_json['v1']['datasource'].startswith( - 'DataSourceAzure') + assert pre_json["v1"]["datasource"].startswith( + "DataSourceAzure" + ) + assert post_json["v1"]["datasource"].startswith( + "DataSourceAzure" + ) assert pre_network == post_network # Calculate and log all the boot numbers pre_analyze_totals = [ - x for x in pre_cloud_analyze.splitlines() - if x.startswith('Finished stage') or x.startswith('Total Time') + x + for x in pre_cloud_analyze.splitlines() + if x.startswith("Finished stage") or x.startswith("Total Time") ] post_analyze_totals = [ - x for x in post_cloud_analyze.splitlines() - if x.startswith('Finished stage') or x.startswith('Total Time') + x + for x in post_cloud_analyze.splitlines() + if x.startswith("Finished stage") or x.startswith("Total Time") ] # pylint: disable=logging-format-interpolation - LOG.info(LOG_TEMPLATE.format( - pre_systemd_analyze=pre_systemd_analyze, - post_systemd_analyze=post_systemd_analyze, - pre_systemd_blame='\n'.join(pre_systemd_blame.splitlines()[:10]), - post_systemd_blame='\n'.join(post_systemd_blame.splitlines()[:10]), - pre_analyze_totals='\n'.join(pre_analyze_totals), - post_analyze_totals='\n'.join(post_analyze_totals), - pre_cloud_blame='\n'.join(pre_cloud_blame.splitlines()[:10]), - post_cloud_blame='\n'.join(post_cloud_blame.splitlines()[:10]), - )) + LOG.info( + LOG_TEMPLATE.format( + pre_systemd_analyze=pre_systemd_analyze, + post_systemd_analyze=post_systemd_analyze, + pre_systemd_blame="\n".join( + pre_systemd_blame.splitlines()[:10] + ), + post_systemd_blame="\n".join( + post_systemd_blame.splitlines()[:10] + ), + pre_analyze_totals="\n".join(pre_analyze_totals), + post_analyze_totals="\n".join(post_analyze_totals), + pre_cloud_blame="\n".join(pre_cloud_blame.splitlines()[:10]), + post_cloud_blame="\n".join(post_cloud_blame.splitlines()[:10]), + ) + ) @pytest.mark.ci @@ -157,18 +171,18 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): def test_subsequent_boot_of_upgraded_package(session_cloud: IntegrationCloud): source = get_validated_source(session_cloud) if not source.installs_new_version(): - if os.environ.get('TRAVIS'): + if os.environ.get("TRAVIS"): # If this isn't running on CI, we should know pytest.fail(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) else: pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) return # type checking doesn't understand that skip raises - launch_kwargs = {'image_id': session_cloud.released_image_id} + launch_kwargs = {"image_id": session_cloud.released_image_id} with session_cloud.launch(launch_kwargs=launch_kwargs) as instance: instance.install_new_cloud_init( source, take_snapshot=False, clean=False ) instance.restart() - assert instance.execute('cloud-init status --wait --long').ok + assert instance.execute("cloud-init status --wait --long").ok diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index e40d80fe..31fe69c0 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -3,16 +3,15 @@ import logging import multiprocessing import os import time -from contextlib import contextmanager from collections import namedtuple +from contextlib import contextmanager from pathlib import Path +log = logging.getLogger("integration_testing") +key_pair = namedtuple("key_pair", "public_key private_key") -log = logging.getLogger('integration_testing') -key_pair = namedtuple('key_pair', 'public_key private_key') - -ASSETS_DIR = Path('tests/integration_tests/assets') -KEY_PATH = ASSETS_DIR / 'keys' +ASSETS_DIR = Path("tests/integration_tests/assets") +KEY_PATH = ASSETS_DIR / "keys" def verify_ordered_items_in_text(to_verify: list, text: str): @@ -30,26 +29,27 @@ def verify_ordered_items_in_text(to_verify: list, text: str): def verify_clean_log(log): """Assert no unexpected tracebacks or warnings in logs""" - warning_count = log.count('WARN') + warning_count = log.count("WARN") expected_warnings = 0 - traceback_count = log.count('Traceback') + traceback_count = log.count("Traceback") expected_tracebacks = 0 warning_texts = [ # Consistently on all Azure launches: # azure.py[WARNING]: No lease found; using default endpoint - 'No lease found; using default endpoint' + "No lease found; using default endpoint" ] traceback_texts = [] - if 'oracle' in log: + if "oracle" in log: # LP: #1842752 - lease_exists_text = 'Stderr: RTNETLINK answers: File exists' + lease_exists_text = "Stderr: RTNETLINK answers: File exists" warning_texts.append(lease_exists_text) traceback_texts.append(lease_exists_text) # LP: #1833446 fetch_error_text = ( - 'UrlError: 404 Client Error: Not Found for url: ' - 'http://169.254.169.254/latest/meta-data/') + "UrlError: 404 Client Error: Not Found for url: " + "http://169.254.169.254/latest/meta-data/" + ) warning_texts.append(fetch_error_text) traceback_texts.append(fetch_error_text) # Oracle has a file in /etc/cloud/cloud.cfg.d that contains @@ -59,7 +59,7 @@ def verify_clean_log(log): # ssh_redirect_user: true # This can trigger a warning about opc having no public key warning_texts.append( - 'Unable to disable SSH logins for opc given ssh_redirect_user' + "Unable to disable SSH logins for opc given ssh_redirect_user" ) for warning_text in warning_texts: @@ -82,7 +82,7 @@ def emit_dots_on_travis(): It should be wrapped selectively around operations that are known to take a long time. """ - if os.environ.get('TRAVIS') != "true": + if os.environ.get("TRAVIS") != "true": # If we aren't on Travis, don't do anything. yield return @@ -100,9 +100,9 @@ def emit_dots_on_travis(): dot_process.terminate() -def get_test_rsa_keypair(key_name: str = 'test1') -> key_pair: - private_key_path = KEY_PATH / 'id_rsa.{}'.format(key_name) - public_key_path = KEY_PATH / 'id_rsa.{}.pub'.format(key_name) +def get_test_rsa_keypair(key_name: str = "test1") -> key_pair: + private_key_path = KEY_PATH / "id_rsa.{}".format(key_name) + public_key_path = KEY_PATH / "id_rsa.{}.pub".format(key_name) with public_key_path.open() as public_file: public_key = public_file.read() with private_key_path.open() as private_file: @@ -121,6 +121,7 @@ def retry(*, tries: int = 30, delay: int = 1): def try_something_that_may_not_be_ready(): ... """ + def _retry(func): @functools.wraps(func) def wrapper(*args, **kwargs): @@ -135,5 +136,7 @@ def retry(*, tries: int = 30, delay: int = 1): else: if last_error: raise last_error + return wrapper + return _retry diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py index d89ed443..657cb399 100644 --- a/tests/unittests/__init__.py +++ b/tests/unittests/__init__.py @@ -4,6 +4,7 @@ try: # For test cases, avoid the following UserWarning to stderr: # You don't have the C version of NameMapper installed ... from Cheetah import NameMapper as _nm + _nm.C_VERSION = True except ImportError: pass diff --git a/tests/unittests/analyze/test_boot.py b/tests/unittests/analyze/test_boot.py index fd878b44..68db69ec 100644 --- a/tests/unittests/analyze/test_boot.py +++ b/tests/unittests/analyze/test_boot.py @@ -1,89 +1,93 @@ import os -from cloudinit.analyze.__main__ import (analyze_boot, get_parser) + +from cloudinit.analyze.__main__ import analyze_boot, get_parser +from cloudinit.analyze.show import ( + CONTAINER_CODE, + FAIL_CODE, + SystemctlReader, + dist_check_timestamp, +) from tests.unittests.helpers import CiTestCase, mock -from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \ - FAIL_CODE, CONTAINER_CODE err_code = (FAIL_CODE, -1, -1, -1) class TestDistroChecker(CiTestCase): - def test_blank_distro(self): self.assertEqual(err_code, dist_check_timestamp()) - @mock.patch('cloudinit.util.is_FreeBSD', return_value=True) + @mock.patch("cloudinit.util.is_FreeBSD", return_value=True) def test_freebsd_gentoo_cant_find(self, m_is_FreeBSD): self.assertEqual(err_code, dist_check_timestamp()) - @mock.patch('cloudinit.subp.subp', return_value=(0, 1)) + @mock.patch("cloudinit.subp.subp", return_value=(0, 1)) def test_subp_fails(self, m_subp): self.assertEqual(err_code, dist_check_timestamp()) class TestSystemCtlReader(CiTestCase): - def test_systemctl_invalid_property(self): - reader = SystemctlReader('dummyProperty') + reader = SystemctlReader("dummyProperty") with self.assertRaises(RuntimeError): reader.parse_epoch_as_float() def test_systemctl_invalid_parameter(self): - reader = SystemctlReader('dummyProperty', 'dummyParameter') + reader = SystemctlReader("dummyProperty", "dummyParameter") with self.assertRaises(RuntimeError): reader.parse_epoch_as_float() - @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) + @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None)) def test_systemctl_works_correctly_threshold(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') + reader = SystemctlReader("dummyProperty", "dummyParameter") self.assertEqual(1.0, reader.parse_epoch_as_float()) thresh = 1.0 - reader.parse_epoch_as_float() self.assertTrue(thresh < 1e-6) self.assertTrue(thresh > (-1 * 1e-6)) - @mock.patch('cloudinit.subp.subp', return_value=('U=0', None)) + @mock.patch("cloudinit.subp.subp", return_value=("U=0", None)) def test_systemctl_succeed_zero(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') + reader = SystemctlReader("dummyProperty", "dummyParameter") self.assertEqual(0.0, reader.parse_epoch_as_float()) - @mock.patch('cloudinit.subp.subp', return_value=('U=1', None)) + @mock.patch("cloudinit.subp.subp", return_value=("U=1", None)) def test_systemctl_succeed_distinct(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') + reader = SystemctlReader("dummyProperty", "dummyParameter") val1 = reader.parse_epoch_as_float() - m_subp.return_value = ('U=2', None) - reader2 = SystemctlReader('dummyProperty', 'dummyParameter') + m_subp.return_value = ("U=2", None) + reader2 = SystemctlReader("dummyProperty", "dummyParameter") val2 = reader2.parse_epoch_as_float() self.assertNotEqual(val1, val2) - @mock.patch('cloudinit.subp.subp', return_value=('100', None)) + @mock.patch("cloudinit.subp.subp", return_value=("100", None)) def test_systemctl_epoch_not_splittable(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') + reader = SystemctlReader("dummyProperty", "dummyParameter") with self.assertRaises(IndexError): reader.parse_epoch_as_float() - @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None)) + @mock.patch("cloudinit.subp.subp", return_value=("U=foobar", None)) def test_systemctl_cannot_convert_epoch_to_float(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') + reader = SystemctlReader("dummyProperty", "dummyParameter") with self.assertRaises(ValueError): reader.parse_epoch_as_float() class TestAnalyzeBoot(CiTestCase): - def set_up_dummy_file_ci(self, path, log_path): - infh = open(path, 'w+') - infh.write('2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. ' - '19.1-1-gbaa47854-0ubuntu1~18.04.1 running \'init-local\' ' - 'at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds.') + infh = open(path, "w+") + infh.write( + "2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. " + "19.1-1-gbaa47854-0ubuntu1~18.04.1 running 'init-local' " + "at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds." + ) infh.close() - outfh = open(log_path, 'w+') + outfh = open(log_path, "w+") outfh.close() def set_up_dummy_file(self, path, log_path): - infh = open(path, 'w+') - infh.write('dummy data') + infh = open(path, "w+") + infh.write("dummy data") infh.close() - outfh = open(log_path, 'w+') + outfh = open(log_path, "w+") outfh.close() def remove_dummy_file(self, path, log_path): @@ -92,46 +96,47 @@ class TestAnalyzeBoot(CiTestCase): if os.path.isfile(log_path): os.remove(log_path) - @mock.patch('cloudinit.analyze.show.dist_check_timestamp', - return_value=err_code) + @mock.patch( + "cloudinit.analyze.show.dist_check_timestamp", return_value=err_code + ) def test_boot_invalid_distro(self, m_dist_check_timestamp): path = os.path.dirname(os.path.abspath(__file__)) - log_path = path + '/boot-test.log' - path += '/dummy.log' + log_path = path + "/boot-test.log" + path += "/dummy.log" self.set_up_dummy_file(path, log_path) parser = get_parser() - args = parser.parse_args(args=['boot', '-i', path, '-o', - log_path]) - name_default = '' + args = parser.parse_args(args=["boot", "-i", path, "-o", log_path]) + name_default = "" analyze_boot(name_default, args) # now args have been tested, go into outfile and make sure error # message is in the outfile - outfh = open(args.outfile, 'r') + outfh = open(args.outfile, "r") data = outfh.read() - err_string = 'Your Linux distro or container does not support this ' \ - 'functionality.\nYou must be running a Kernel ' \ - 'Telemetry supported distro.\nPlease check ' \ - 'https://cloudinit.readthedocs.io/en/latest/topics' \ - '/analyze.html for more information on supported ' \ - 'distros.\n' + err_string = ( + "Your Linux distro or container does not support this " + "functionality.\nYou must be running a Kernel " + "Telemetry supported distro.\nPlease check " + "https://cloudinit.readthedocs.io/en/latest/topics" + "/analyze.html for more information on supported " + "distros.\n" + ) self.remove_dummy_file(path, log_path) self.assertEqual(err_string, data) @mock.patch("cloudinit.util.is_container", return_value=True) - @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) + @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None)) def test_container_no_ci_log_line(self, m_is_container, m_subp): path = os.path.dirname(os.path.abspath(__file__)) - log_path = path + '/boot-test.log' - path += '/dummy.log' + log_path = path + "/boot-test.log" + path += "/dummy.log" self.set_up_dummy_file(path, log_path) parser = get_parser() - args = parser.parse_args(args=['boot', '-i', path, '-o', - log_path]) - name_default = '' + args = parser.parse_args(args=["boot", "-i", path, "-o", log_path]) + name_default = "" finish_code = analyze_boot(name_default, args) @@ -139,22 +144,30 @@ class TestAnalyzeBoot(CiTestCase): self.assertEqual(FAIL_CODE, finish_code) @mock.patch("cloudinit.util.is_container", return_value=True) - @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) - @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{ - 'name': 'init-local', 'description': 'starting search', 'timestamp': - 100000}]) - @mock.patch('cloudinit.analyze.show.dist_check_timestamp', - return_value=(CONTAINER_CODE, 1, 1, 1)) + @mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None)) + @mock.patch( + "cloudinit.analyze.__main__._get_events", + return_value=[ + { + "name": "init-local", + "description": "starting search", + "timestamp": 100000, + } + ], + ) + @mock.patch( + "cloudinit.analyze.show.dist_check_timestamp", + return_value=(CONTAINER_CODE, 1, 1, 1), + ) def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g): path = os.path.dirname(os.path.abspath(__file__)) - log_path = path + '/boot-test.log' - path += '/dummy.log' + log_path = path + "/boot-test.log" + path += "/dummy.log" self.set_up_dummy_file_ci(path, log_path) parser = get_parser() - args = parser.parse_args(args=['boot', '-i', path, '-o', - log_path]) - name_default = '' + args = parser.parse_args(args=["boot", "-i", path, "-o", log_path]) + name_default = "" finish_code = analyze_boot(name_default, args) self.remove_dummy_file(path, log_path) diff --git a/tests/unittests/analyze/test_dump.py b/tests/unittests/analyze/test_dump.py index e3683bbf..56bbf97f 100644 --- a/tests/unittests/analyze/test_dump.py +++ b/tests/unittests/analyze/test_dump.py @@ -4,50 +4,54 @@ from datetime import datetime from textwrap import dedent from cloudinit.analyze.dump import ( - dump_events, parse_ci_logline, parse_timestamp) -from cloudinit.util import write_file + dump_events, + parse_ci_logline, + parse_timestamp, +) from cloudinit.subp import which +from cloudinit.util import write_file from tests.unittests.helpers import CiTestCase, mock, skipIf class TestParseTimestamp(CiTestCase): - def test_parse_timestamp_handles_cloud_init_default_format(self): """Logs with cloud-init detailed formats will be properly parsed.""" - trusty_fmt = '%Y-%m-%d %H:%M:%S,%f' - trusty_stamp = '2016-09-12 14:39:20,839' + trusty_fmt = "%Y-%m-%d %H:%M:%S,%f" + trusty_stamp = "2016-09-12 14:39:20,839" dt = datetime.strptime(trusty_stamp, trusty_fmt) self.assertEqual( - float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp)) + float(dt.strftime("%s.%f")), parse_timestamp(trusty_stamp) + ) def test_parse_timestamp_handles_syslog_adding_year(self): """Syslog timestamps lack a year. Add year and properly parse.""" - syslog_fmt = '%b %d %H:%M:%S %Y' - syslog_stamp = 'Aug 08 15:12:51' + syslog_fmt = "%b %d %H:%M:%S %Y" + syslog_stamp = "Aug 08 15:12:51" # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) self.assertEqual( - float(dt.strftime('%s.%f')), - parse_timestamp(syslog_stamp)) + float(dt.strftime("%s.%f")), parse_timestamp(syslog_stamp) + ) def test_parse_timestamp_handles_journalctl_format_adding_year(self): """Journalctl precise timestamps lack a year. Add year and parse.""" - journal_fmt = '%b %d %H:%M:%S.%f %Y' - journal_stamp = 'Aug 08 17:15:50.606811' + journal_fmt = "%b %d %H:%M:%S.%f %Y" + journal_stamp = "Aug 08 17:15:50.606811" # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) self.assertEqual( - float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp)) + float(dt.strftime("%s.%f")), parse_timestamp(journal_stamp) + ) @skipIf(not which("date"), "'date' command not available.") def test_parse_unexpected_timestamp_format_with_date_command(self): """Dump sends unexpected timestamp formats to date for processing.""" - new_fmt = '%H:%M %m/%d %Y' - new_stamp = '17:15 08/08' + new_fmt = "%H:%M %m/%d %Y" + new_stamp = "17:15 08/08" # convert stamp ourselves by adding the missing year value year = datetime.now().year dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) @@ -55,15 +59,20 @@ class TestParseTimestamp(CiTestCase): # use date(1) with self.allow_subp(["date"]): self.assertEqual( - float(dt.strftime('%s.%f')), parse_timestamp(new_stamp)) + float(dt.strftime("%s.%f")), parse_timestamp(new_stamp) + ) class TestParseCILogLine(CiTestCase): - def test_parse_logline_returns_none_without_separators(self): """When no separators are found, parse_ci_logline returns None.""" expected_parse_ignores = [ - '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT'] + "", + "-", + "adsf-asdf", + "2017-05-22 18:02:01,088", + "CLOUDINIT", + ] for parse_ignores in expected_parse_ignores: self.assertIsNone(parse_ci_logline(parse_ignores)) @@ -72,79 +81,95 @@ class TestParseCILogLine(CiTestCase): line = ( "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9" " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up" - " 6.26 seconds.") + " 6.26 seconds." + ) dt = datetime.strptime( - '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f') - timestamp = float(dt.strftime('%s.%f')) + "2017-08-08 20:05:07,147", "%Y-%m-%d %H:%M:%S,%f" + ) + timestamp = float(dt.strftime("%s.%f")) expected = { - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp} + "description": "starting search for local datasources", + "event_type": "start", + "name": "init-local", + "origin": "cloudinit", + "timestamp": timestamp, + } self.assertEqual(expected, parse_ci_logline(line)) def test_parse_logline_returns_event_for_journalctl_logs(self): """parse_ci_logline returns an event parse from journalctl format.""" - line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]" - " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at" - " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.") + line = ( + "Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]" + " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at" + " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds." + ) year = datetime.now().year dt = datetime.strptime( - 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') - timestamp = float(dt.strftime('%s.%f')) + "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y" + ) + timestamp = float(dt.strftime("%s.%f")) expected = { - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp} + "description": "starting search for local datasources", + "event_type": "start", + "name": "init-local", + "origin": "cloudinit", + "timestamp": timestamp, + } self.assertEqual(expected, parse_ci_logline(line)) @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") - def test_parse_logline_returns_event_for_finish_events(self, - m_parse_from_date): + def test_parse_logline_returns_event_for_finish_events( + self, m_parse_from_date + ): """parse_ci_logline returns a finish event for a parsed log line.""" - line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]' - ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running' - ' modules for final') + line = ( + "2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]" + " handlers.py[DEBUG]: finish: modules-final: SUCCESS: running" + " modules for final" + ) expected = { - 'description': 'running modules for final', - 'event_type': 'finish', - 'name': 'modules-final', - 'origin': 'cloudinit', - 'result': 'SUCCESS', - 'timestamp': 1472594005.972} + "description": "running modules for final", + "event_type": "finish", + "name": "modules-final", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1472594005.972, + } m_parse_from_date.return_value = "1472594005.972" self.assertEqual(expected, parse_ci_logline(line)) m_parse_from_date.assert_has_calls( - [mock.call("2016-08-30 21:53:25.972325+00:00")]) + [mock.call("2016-08-30 21:53:25.972325+00:00")] + ) def test_parse_logline_returns_event_for_amazon_linux_2_line(self): line = ( "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:" - " init-local/check-cache: attempting to read from cache [check]") + " init-local/check-cache: attempting to read from cache [check]" + ) # Generate the expected value using `datetime`, so that TZ # determination is consistent with the code under test. timestamp_dt = datetime.strptime( "Apr 30 19:39:11", "%b %d %H:%M:%S" ).replace(year=datetime.now().year) expected = { - 'description': 'attempting to read from cache [check]', - 'event_type': 'start', - 'name': 'init-local/check-cache', - 'origin': 'cloudinit', - 'timestamp': timestamp_dt.timestamp()} + "description": "attempting to read from cache [check]", + "event_type": "start", + "name": "init-local/check-cache", + "origin": "cloudinit", + "timestamp": timestamp_dt.timestamp(), + } self.assertEqual(expected, parse_ci_logline(line)) -SAMPLE_LOGS = dedent("""\ +SAMPLE_LOGS = dedent( + """\ Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\ 06:51:06 +0000. Up 1.0 seconds. 2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\ modules-final: SUCCESS: running modules for final -""") +""" +) class TestDumpEvents(CiTestCase): @@ -158,51 +183,65 @@ class TestDumpEvents(CiTestCase): expected_data = SAMPLE_LOGS.splitlines() self.assertEqual( [mock.call("2016-08-30 21:53:25.972325+00:00")], - m_parse_from_date.call_args_list) + m_parse_from_date.call_args_list, + ) self.assertEqual(expected_data, data) year = datetime.now().year dt1 = datetime.strptime( - 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') - timestamp1 = float(dt1.strftime('%s.%f')) - expected_events = [{ - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp1}, { - 'description': 'running modules for final', - 'event_type': 'finish', - 'name': 'modules-final', - 'origin': 'cloudinit', - 'result': 'SUCCESS', - 'timestamp': 1472594005.972}] + "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y" + ) + timestamp1 = float(dt1.strftime("%s.%f")) + expected_events = [ + { + "description": "starting search for local datasources", + "event_type": "start", + "name": "init-local", + "origin": "cloudinit", + "timestamp": timestamp1, + }, + { + "description": "running modules for final", + "event_type": "finish", + "name": "modules-final", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1472594005.972, + }, + ] self.assertEqual(expected_events, events) @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") def test_dump_events_with_cisource(self, m_parse_from_date): """Cisource file is read and parsed into a tuple of events and data.""" - tmpfile = self.tmp_path('logfile') + tmpfile = self.tmp_path("logfile") write_file(tmpfile, SAMPLE_LOGS) m_parse_from_date.return_value = 1472594005.972 events, data = dump_events(cisource=open(tmpfile)) year = datetime.now().year dt1 = datetime.strptime( - 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') - timestamp1 = float(dt1.strftime('%s.%f')) - expected_events = [{ - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp1}, { - 'description': 'running modules for final', - 'event_type': 'finish', - 'name': 'modules-final', - 'origin': 'cloudinit', - 'result': 'SUCCESS', - 'timestamp': 1472594005.972}] + "Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y" + ) + timestamp1 = float(dt1.strftime("%s.%f")) + expected_events = [ + { + "description": "starting search for local datasources", + "event_type": "start", + "name": "init-local", + "origin": "cloudinit", + "timestamp": timestamp1, + }, + { + "description": "running modules for final", + "event_type": "finish", + "name": "modules-final", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1472594005.972, + }, + ] self.assertEqual(expected_events, events) self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data]) m_parse_from_date.assert_has_calls( - [mock.call("2016-08-30 21:53:25.972325+00:00")]) + [mock.call("2016-08-30 21:53:25.972325+00:00")] + ) diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py index e1c64e2f..842e8dfd 100644 --- a/tests/unittests/cmd/devel/test_hotplug_hook.py +++ b/tests/unittests/cmd/devel/test_hotplug_hook.py @@ -1,8 +1,9 @@ -import pytest from collections import namedtuple from unittest import mock from unittest.mock import call +import pytest + from cloudinit.cmd.devel.hotplug_hook import handle_hotplug from cloudinit.distros import Distro from cloudinit.event import EventType @@ -11,9 +12,8 @@ from cloudinit.net.network_state import NetworkState from cloudinit.sources import DataSource from cloudinit.stages import Init - -hotplug_args = namedtuple('hotplug_args', 'udevaction, subsystem, devpath') -FAKE_MAC = '11:22:33:44:55:66' +hotplug_args = namedtuple("hotplug_args", "udevaction, subsystem, devpath") +FAKE_MAC = "11:22:33:44:55:66" @pytest.yield_fixture @@ -26,28 +26,28 @@ def mocks(): m_init.fetch.return_value = m_datasource read_sys_net = mock.patch( - 'cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe', - return_value=FAKE_MAC + "cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe", + return_value=FAKE_MAC, ) update_event_enabled = mock.patch( - 'cloudinit.stages.update_event_enabled', + "cloudinit.stages.update_event_enabled", return_value=True, ) m_network_state = mock.MagicMock(spec=NetworkState) parse_net = mock.patch( - 'cloudinit.cmd.devel.hotplug_hook.parse_net_config_data', - return_value=m_network_state + "cloudinit.cmd.devel.hotplug_hook.parse_net_config_data", + return_value=m_network_state, ) m_activator = mock.MagicMock(spec=NetworkActivator) select_activator = mock.patch( - 'cloudinit.cmd.devel.hotplug_hook.activators.select_activator', - return_value=m_activator + "cloudinit.cmd.devel.hotplug_hook.activators.select_activator", + return_value=m_activator, ) - sleep = mock.patch('time.sleep') + sleep = mock.patch("time.sleep") read_sys_net.start() update_event_enabled.start() @@ -55,7 +55,7 @@ def mocks(): select_activator.start() m_sleep = sleep.start() - yield namedtuple('mocks', 'm_init m_network_state m_activator m_sleep')( + yield namedtuple("mocks", "m_init m_network_state m_activator m_sleep")( m_init=m_init, m_network_state=m_network_state, m_activator=m_activator, @@ -72,42 +72,43 @@ def mocks(): class TestUnsupportedActions: def test_unsupported_subsystem(self, mocks): with pytest.raises( - Exception, - match='cannot handle events for subsystem: not_real' + Exception, match="cannot handle events for subsystem: not_real" ): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - subsystem='not_real', - udevaction='add' + devpath="/dev/fake", + subsystem="not_real", + udevaction="add", ) def test_unsupported_udevaction(self, mocks): - with pytest.raises(ValueError, match='Unknown action: not_real'): + with pytest.raises(ValueError, match="Unknown action: not_real"): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - udevaction='not_real', - subsystem='net' + devpath="/dev/fake", + udevaction="not_real", + subsystem="net", ) class TestHotplug: def test_succcessful_add(self, mocks): init = mocks.m_init - mocks.m_network_state.iter_interfaces.return_value = [{ - 'mac_address': FAKE_MAC, - }] + mocks.m_network_state.iter_interfaces.return_value = [ + { + "mac_address": FAKE_MAC, + } + ] handle_hotplug( hotplug_init=init, - devpath='/dev/fake', - udevaction='add', - subsystem='net' + devpath="/dev/fake", + udevaction="add", + subsystem="net", + ) + init.datasource.update_metadata_if_supported.assert_called_once_with( + [EventType.HOTPLUG] ) - init.datasource.update_metadata_if_supported.assert_called_once_with([ - EventType.HOTPLUG - ]) - mocks.m_activator.bring_up_interface.assert_called_once_with('fake') + mocks.m_activator.bring_up_interface.assert_called_once_with("fake") mocks.m_activator.bring_down_interface.assert_not_called() init._write_to_cache.assert_called_once_with() @@ -116,113 +117,120 @@ class TestHotplug: mocks.m_network_state.iter_interfaces.return_value = [{}] handle_hotplug( hotplug_init=init, - devpath='/dev/fake', - udevaction='remove', - subsystem='net' + devpath="/dev/fake", + udevaction="remove", + subsystem="net", ) - init.datasource.update_metadata_if_supported.assert_called_once_with([ - EventType.HOTPLUG - ]) - mocks.m_activator.bring_down_interface.assert_called_once_with('fake') + init.datasource.update_metadata_if_supported.assert_called_once_with( + [EventType.HOTPLUG] + ) + mocks.m_activator.bring_down_interface.assert_called_once_with("fake") mocks.m_activator.bring_up_interface.assert_not_called() init._write_to_cache.assert_called_once_with() def test_update_event_disabled(self, mocks, caplog): init = mocks.m_init with mock.patch( - 'cloudinit.stages.update_event_enabled', - return_value=False + "cloudinit.stages.update_event_enabled", return_value=False ): handle_hotplug( hotplug_init=init, - devpath='/dev/fake', - udevaction='remove', - subsystem='net' + devpath="/dev/fake", + udevaction="remove", + subsystem="net", ) - assert 'hotplug not enabled for event of type' in caplog.text + assert "hotplug not enabled for event of type" in caplog.text init.datasource.update_metadata_if_supported.assert_not_called() mocks.m_activator.bring_up_interface.assert_not_called() mocks.m_activator.bring_down_interface.assert_not_called() init._write_to_cache.assert_not_called() def test_update_metadata_failed(self, mocks): - mocks.m_init.datasource.update_metadata_if_supported.return_value = \ + mocks.m_init.datasource.update_metadata_if_supported.return_value = ( False + ) with pytest.raises( - RuntimeError, match='Datasource .* not updated for event hotplug' + RuntimeError, match="Datasource .* not updated for event hotplug" ): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - udevaction='remove', - subsystem='net' + devpath="/dev/fake", + udevaction="remove", + subsystem="net", ) def test_detect_hotplugged_device_not_detected_on_add(self, mocks): mocks.m_network_state.iter_interfaces.return_value = [{}] with pytest.raises( RuntimeError, - match='Failed to detect {} in updated metadata'.format(FAKE_MAC) + match="Failed to detect {} in updated metadata".format(FAKE_MAC), ): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - udevaction='add', - subsystem='net' + devpath="/dev/fake", + udevaction="add", + subsystem="net", ) def test_detect_hotplugged_device_detected_on_remove(self, mocks): - mocks.m_network_state.iter_interfaces.return_value = [{ - 'mac_address': FAKE_MAC, - }] + mocks.m_network_state.iter_interfaces.return_value = [ + { + "mac_address": FAKE_MAC, + } + ] with pytest.raises( - RuntimeError, - match='Failed to detect .* in updated metadata' + RuntimeError, match="Failed to detect .* in updated metadata" ): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - udevaction='remove', - subsystem='net' + devpath="/dev/fake", + udevaction="remove", + subsystem="net", ) def test_apply_failed_on_add(self, mocks): - mocks.m_network_state.iter_interfaces.return_value = [{ - 'mac_address': FAKE_MAC, - }] + mocks.m_network_state.iter_interfaces.return_value = [ + { + "mac_address": FAKE_MAC, + } + ] mocks.m_activator.bring_up_interface.return_value = False with pytest.raises( - RuntimeError, match='Failed to bring up device: /dev/fake' + RuntimeError, match="Failed to bring up device: /dev/fake" ): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - udevaction='add', - subsystem='net' + devpath="/dev/fake", + udevaction="add", + subsystem="net", ) def test_apply_failed_on_remove(self, mocks): mocks.m_network_state.iter_interfaces.return_value = [{}] mocks.m_activator.bring_down_interface.return_value = False with pytest.raises( - RuntimeError, match='Failed to bring down device: /dev/fake' + RuntimeError, match="Failed to bring down device: /dev/fake" ): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - udevaction='remove', - subsystem='net' + devpath="/dev/fake", + udevaction="remove", + subsystem="net", ) def test_retry(self, mocks): with pytest.raises(RuntimeError): handle_hotplug( hotplug_init=mocks.m_init, - devpath='/dev/fake', - udevaction='add', - subsystem='net' + devpath="/dev/fake", + udevaction="add", + subsystem="net", ) assert mocks.m_sleep.call_count == 5 assert mocks.m_sleep.call_args_list == [ - call(1), call(3), call(5), call(10), call(30) + call(1), + call(3), + call(5), + call(10), + call(30), ] diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py index 18bdcdda..73ed3c65 100644 --- a/tests/unittests/cmd/devel/test_logs.py +++ b/tests/unittests/cmd/devel/test_logs.py @@ -1,167 +1,213 @@ # This file is part of cloud-init. See LICENSE file for license information. -from datetime import datetime import os +from datetime import datetime from io import StringIO from cloudinit.cmd.devel import logs from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE -from tests.unittests.helpers import ( - FilesystemMockingTestCase, mock, wrap_and_call) from cloudinit.subp import subp from cloudinit.util import ensure_dir, load_file, write_file +from tests.unittests.helpers import ( + FilesystemMockingTestCase, + mock, + wrap_and_call, +) -@mock.patch('cloudinit.cmd.devel.logs.os.getuid') +@mock.patch("cloudinit.cmd.devel.logs.os.getuid") class TestCollectLogs(FilesystemMockingTestCase): - def setUp(self): super(TestCollectLogs, self).setUp() self.new_root = self.tmp_dir() - self.run_dir = self.tmp_path('run', self.new_root) + self.run_dir = self.tmp_path("run", self.new_root) def test_collect_logs_with_userdata_requires_root_user(self, m_getuid): """collect-logs errors when non-root user collects userdata .""" m_getuid.return_value = 100 # non-root - output_tarfile = self.tmp_path('logs.tgz') - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + output_tarfile = self.tmp_path("logs.tgz") + with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr: self.assertEqual( - 1, logs.collect_logs(output_tarfile, include_userdata=True)) + 1, logs.collect_logs(output_tarfile, include_userdata=True) + ) self.assertEqual( - 'To include userdata, root user is required.' - ' Try sudo cloud-init collect-logs\n', - m_stderr.getvalue()) + "To include userdata, root user is required." + " Try sudo cloud-init collect-logs\n", + m_stderr.getvalue(), + ) def test_collect_logs_creates_tarfile(self, m_getuid): """collect-logs creates a tarfile with all related cloud-init info.""" m_getuid.return_value = 100 - log1 = self.tmp_path('cloud-init.log', self.new_root) - write_file(log1, 'cloud-init-log') - log2 = self.tmp_path('cloud-init-output.log', self.new_root) - write_file(log2, 'cloud-init-output-log') + log1 = self.tmp_path("cloud-init.log", self.new_root) + write_file(log1, "cloud-init-log") + log2 = self.tmp_path("cloud-init-output.log", self.new_root) + write_file(log2, "cloud-init-output-log") ensure_dir(self.run_dir) - write_file(self.tmp_path('results.json', self.run_dir), 'results') - write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), - 'sensitive') - output_tarfile = self.tmp_path('logs.tgz') + write_file(self.tmp_path("results.json", self.run_dir), "results") + write_file( + self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), + "sensitive", + ) + output_tarfile = self.tmp_path("logs.tgz") - date = datetime.utcnow().date().strftime('%Y-%m-%d') - date_logdir = 'cloud-init-logs-{0}'.format(date) + date = datetime.utcnow().date().strftime("%Y-%m-%d") + date_logdir = "cloud-init-logs-{0}".format(date) - version_out = '/usr/bin/cloud-init 18.2fake\n' + version_out = "/usr/bin/cloud-init 18.2fake\n" expected_subp = { - ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): - '0.7fake\n', - ('cloud-init', '--version'): version_out, - ('dmesg',): 'dmesg-out\n', - ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', - ('tar', 'czvf', output_tarfile, date_logdir): '' + ( + "dpkg-query", + "--show", + "-f=${Version}\n", + "cloud-init", + ): "0.7fake\n", + ("cloud-init", "--version"): version_out, + ("dmesg",): "dmesg-out\n", + ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n", + ("tar", "czvf", output_tarfile, date_logdir): "", } def fake_subp(cmd): cmd_tuple = tuple(cmd) if cmd_tuple not in expected_subp: raise AssertionError( - 'Unexpected command provided to subp: {0}'.format(cmd)) - if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: + "Unexpected command provided to subp: {0}".format(cmd) + ) + if cmd == ["tar", "czvf", output_tarfile, date_logdir]: subp(cmd) # Pass through tar cmd so we can check output - return expected_subp[cmd_tuple], '' + return expected_subp[cmd_tuple], "" fake_stderr = mock.MagicMock() wrap_and_call( - 'cloudinit.cmd.devel.logs', - {'subp': {'side_effect': fake_subp}, - 'sys.stderr': {'new': fake_stderr}, - 'CLOUDINIT_LOGS': {'new': [log1, log2]}, - 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, - logs.collect_logs, output_tarfile, include_userdata=False) + "cloudinit.cmd.devel.logs", + { + "subp": {"side_effect": fake_subp}, + "sys.stderr": {"new": fake_stderr}, + "CLOUDINIT_LOGS": {"new": [log1, log2]}, + "CLOUDINIT_RUN_DIR": {"new": self.run_dir}, + }, + logs.collect_logs, + output_tarfile, + include_userdata=False, + ) # unpack the tarfile and check file contents - subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) + subp(["tar", "zxvf", output_tarfile, "-C", self.new_root]) out_logdir = self.tmp_path(date_logdir, self.new_root) self.assertFalse( os.path.exists( - os.path.join(out_logdir, 'run', 'cloud-init', - INSTANCE_JSON_SENSITIVE_FILE)), - 'Unexpected file found: %s' % INSTANCE_JSON_SENSITIVE_FILE) + os.path.join( + out_logdir, + "run", + "cloud-init", + INSTANCE_JSON_SENSITIVE_FILE, + ) + ), + "Unexpected file found: %s" % INSTANCE_JSON_SENSITIVE_FILE, + ) + self.assertEqual( + "0.7fake\n", load_file(os.path.join(out_logdir, "dpkg-version")) + ) self.assertEqual( - '0.7fake\n', - load_file(os.path.join(out_logdir, 'dpkg-version'))) - self.assertEqual(version_out, - load_file(os.path.join(out_logdir, 'version'))) + version_out, load_file(os.path.join(out_logdir, "version")) + ) self.assertEqual( - 'cloud-init-log', - load_file(os.path.join(out_logdir, 'cloud-init.log'))) + "cloud-init-log", + load_file(os.path.join(out_logdir, "cloud-init.log")), + ) self.assertEqual( - 'cloud-init-output-log', - load_file(os.path.join(out_logdir, 'cloud-init-output.log'))) + "cloud-init-output-log", + load_file(os.path.join(out_logdir, "cloud-init-output.log")), + ) self.assertEqual( - 'dmesg-out\n', - load_file(os.path.join(out_logdir, 'dmesg.txt'))) + "dmesg-out\n", load_file(os.path.join(out_logdir, "dmesg.txt")) + ) self.assertEqual( - 'journal-out\n', - load_file(os.path.join(out_logdir, 'journal.txt'))) + "journal-out\n", load_file(os.path.join(out_logdir, "journal.txt")) + ) self.assertEqual( - 'results', + "results", load_file( - os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) - fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) + os.path.join(out_logdir, "run", "cloud-init", "results.json") + ), + ) + fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) def test_collect_logs_includes_optional_userdata(self, m_getuid): """collect-logs include userdata when --include-userdata is set.""" m_getuid.return_value = 0 - log1 = self.tmp_path('cloud-init.log', self.new_root) - write_file(log1, 'cloud-init-log') - log2 = self.tmp_path('cloud-init-output.log', self.new_root) - write_file(log2, 'cloud-init-output-log') - userdata = self.tmp_path('user-data.txt', self.new_root) - write_file(userdata, 'user-data') + log1 = self.tmp_path("cloud-init.log", self.new_root) + write_file(log1, "cloud-init-log") + log2 = self.tmp_path("cloud-init-output.log", self.new_root) + write_file(log2, "cloud-init-output-log") + userdata = self.tmp_path("user-data.txt", self.new_root) + write_file(userdata, "user-data") ensure_dir(self.run_dir) - write_file(self.tmp_path('results.json', self.run_dir), 'results') - write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), - 'sensitive') - output_tarfile = self.tmp_path('logs.tgz') + write_file(self.tmp_path("results.json", self.run_dir), "results") + write_file( + self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), + "sensitive", + ) + output_tarfile = self.tmp_path("logs.tgz") - date = datetime.utcnow().date().strftime('%Y-%m-%d') - date_logdir = 'cloud-init-logs-{0}'.format(date) + date = datetime.utcnow().date().strftime("%Y-%m-%d") + date_logdir = "cloud-init-logs-{0}".format(date) - version_out = '/usr/bin/cloud-init 18.2fake\n' + version_out = "/usr/bin/cloud-init 18.2fake\n" expected_subp = { - ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): - '0.7fake', - ('cloud-init', '--version'): version_out, - ('dmesg',): 'dmesg-out\n', - ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', - ('tar', 'czvf', output_tarfile, date_logdir): '' + ( + "dpkg-query", + "--show", + "-f=${Version}\n", + "cloud-init", + ): "0.7fake", + ("cloud-init", "--version"): version_out, + ("dmesg",): "dmesg-out\n", + ("journalctl", "--boot=0", "-o", "short-precise"): "journal-out\n", + ("tar", "czvf", output_tarfile, date_logdir): "", } def fake_subp(cmd): cmd_tuple = tuple(cmd) if cmd_tuple not in expected_subp: raise AssertionError( - 'Unexpected command provided to subp: {0}'.format(cmd)) - if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: + "Unexpected command provided to subp: {0}".format(cmd) + ) + if cmd == ["tar", "czvf", output_tarfile, date_logdir]: subp(cmd) # Pass through tar cmd so we can check output - return expected_subp[cmd_tuple], '' + return expected_subp[cmd_tuple], "" fake_stderr = mock.MagicMock() wrap_and_call( - 'cloudinit.cmd.devel.logs', - {'subp': {'side_effect': fake_subp}, - 'sys.stderr': {'new': fake_stderr}, - 'CLOUDINIT_LOGS': {'new': [log1, log2]}, - 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, - 'USER_DATA_FILE': {'new': userdata}}, - logs.collect_logs, output_tarfile, include_userdata=True) + "cloudinit.cmd.devel.logs", + { + "subp": {"side_effect": fake_subp}, + "sys.stderr": {"new": fake_stderr}, + "CLOUDINIT_LOGS": {"new": [log1, log2]}, + "CLOUDINIT_RUN_DIR": {"new": self.run_dir}, + "USER_DATA_FILE": {"new": userdata}, + }, + logs.collect_logs, + output_tarfile, + include_userdata=True, + ) # unpack the tarfile and check file contents - subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) + subp(["tar", "zxvf", output_tarfile, "-C", self.new_root]) out_logdir = self.tmp_path(date_logdir, self.new_root) self.assertEqual( - 'user-data', - load_file(os.path.join(out_logdir, 'user-data.txt'))) + "user-data", load_file(os.path.join(out_logdir, "user-data.txt")) + ) self.assertEqual( - 'sensitive', - load_file(os.path.join(out_logdir, 'run', 'cloud-init', - INSTANCE_JSON_SENSITIVE_FILE))) - fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) + "sensitive", + load_file( + os.path.join( + out_logdir, + "run", + "cloud-init", + INSTANCE_JSON_SENSITIVE_FILE, + ) + ), + ) + fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile) diff --git a/tests/unittests/cmd/devel/test_render.py b/tests/unittests/cmd/devel/test_render.py index c7ddca3d..4afc64f0 100644 --- a/tests/unittests/cmd/devel/test_render.py +++ b/tests/unittests/cmd/devel/test_render.py @@ -1,21 +1,21 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +from collections import namedtuple from io import StringIO -from collections import namedtuple from cloudinit.cmd.devel import render from cloudinit.helpers import Paths from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja from cloudinit.util import ensure_dir, write_file +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja class TestRender(CiTestCase): with_logs = True - args = namedtuple('renderargs', 'user_data instance_data debug') + args = namedtuple("renderargs", "user_data instance_data debug") def setUp(self): super(TestRender, self).setUp() @@ -23,122 +23,132 @@ class TestRender(CiTestCase): def test_handle_args_error_on_missing_user_data(self): """When user_data file path does not exist, log an error.""" - absent_file = self.tmp_path('user-data', dir=self.tmp) - instance_data = self.tmp_path('instance-data', dir=self.tmp) - write_file(instance_data, '{}') + absent_file = self.tmp_path("user-data", dir=self.tmp) + instance_data = self.tmp_path("instance-data", dir=self.tmp) + write_file(instance_data, "{}") args = self.args( - user_data=absent_file, instance_data=instance_data, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) + user_data=absent_file, instance_data=instance_data, debug=False + ) + with mock.patch("sys.stderr", new_callable=StringIO): + self.assertEqual(1, render.handle_args("anyname", args)) self.assertIn( - 'Missing user-data file: %s' % absent_file, - self.logs.getvalue()) + "Missing user-data file: %s" % absent_file, self.logs.getvalue() + ) def test_handle_args_error_on_missing_instance_data(self): """When instance_data file path does not exist, log an error.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - absent_file = self.tmp_path('instance-data', dir=self.tmp) + user_data = self.tmp_path("user-data", dir=self.tmp) + absent_file = self.tmp_path("instance-data", dir=self.tmp) args = self.args( - user_data=user_data, instance_data=absent_file, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) + user_data=user_data, instance_data=absent_file, debug=False + ) + with mock.patch("sys.stderr", new_callable=StringIO): + self.assertEqual(1, render.handle_args("anyname", args)) self.assertIn( - 'Missing instance-data.json file: %s' % absent_file, - self.logs.getvalue()) + "Missing instance-data.json file: %s" % absent_file, + self.logs.getvalue(), + ) def test_handle_args_defaults_instance_data(self): """When no instance_data argument, default to configured run_dir.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - run_dir = self.tmp_path('run_dir', dir=self.tmp) + user_data = self.tmp_path("user-data", dir=self.tmp) + run_dir = self.tmp_path("run_dir", dir=self.tmp) ensure_dir(run_dir) - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + paths = Paths({"run_dir": run_dir}) + self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths") self.m_paths.return_value = paths - args = self.args( - user_data=user_data, instance_data=None, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) + args = self.args(user_data=user_data, instance_data=None, debug=False) + with mock.patch("sys.stderr", new_callable=StringIO): + self.assertEqual(1, render.handle_args("anyname", args)) json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) self.assertIn( - 'Missing instance-data.json file: %s' % json_file, - self.logs.getvalue()) + "Missing instance-data.json file: %s" % json_file, + self.logs.getvalue(), + ) def test_handle_args_root_fallback_from_sensitive_instance_data(self): """When root user defaults to sensitive.json.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - run_dir = self.tmp_path('run_dir', dir=self.tmp) + user_data = self.tmp_path("user-data", dir=self.tmp) + run_dir = self.tmp_path("run_dir", dir=self.tmp) ensure_dir(run_dir) - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + paths = Paths({"run_dir": run_dir}) + self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths") self.m_paths.return_value = paths - args = self.args( - user_data=user_data, instance_data=None, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - with mock.patch('os.getuid') as m_getuid: + args = self.args(user_data=user_data, instance_data=None, debug=False) + with mock.patch("sys.stderr", new_callable=StringIO): + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 - self.assertEqual(1, render.handle_args('anyname', args)) + self.assertEqual(1, render.handle_args("anyname", args)) json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) self.assertIn( - 'WARNING: Missing root-readable %s. Using redacted %s' % ( - json_sensitive, json_file), self.logs.getvalue()) + "WARNING: Missing root-readable %s. Using redacted %s" + % (json_sensitive, json_file), + self.logs.getvalue(), + ) self.assertIn( - 'ERROR: Missing instance-data.json file: %s' % json_file, - self.logs.getvalue()) + "ERROR: Missing instance-data.json file: %s" % json_file, + self.logs.getvalue(), + ) def test_handle_args_root_uses_sensitive_instance_data(self): """When root user, and no instance-data arg, use sensitive.json.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') - run_dir = self.tmp_path('run_dir', dir=self.tmp) + user_data = self.tmp_path("user-data", dir=self.tmp) + write_file(user_data, "##template: jinja\nrendering: {{ my_var }}") + run_dir = self.tmp_path("run_dir", dir=self.tmp) ensure_dir(run_dir) json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) write_file(json_sensitive, '{"my-var": "jinja worked"}') - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + paths = Paths({"run_dir": run_dir}) + self.add_patch("cloudinit.cmd.devel.render.read_cfg_paths", "m_paths") self.m_paths.return_value = paths - args = self.args( - user_data=user_data, instance_data=None, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: + args = self.args(user_data=user_data, instance_data=None, debug=False) + with mock.patch("sys.stderr", new_callable=StringIO): + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 - self.assertEqual(0, render.handle_args('anyname', args)) - self.assertIn('rendering: jinja worked', m_stdout.getvalue()) + self.assertEqual(0, render.handle_args("anyname", args)) + self.assertIn("rendering: jinja worked", m_stdout.getvalue()) @skipUnlessJinja() def test_handle_args_renders_instance_data_vars_in_template(self): """If user_data file is a jinja template render instance-data vars.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') - instance_data = self.tmp_path('instance-data', dir=self.tmp) + user_data = self.tmp_path("user-data", dir=self.tmp) + write_file(user_data, "##template: jinja\nrendering: {{ my_var }}") + instance_data = self.tmp_path("instance-data", dir=self.tmp) write_file(instance_data, '{"my-var": "jinja worked"}') args = self.args( - user_data=user_data, instance_data=instance_data, debug=True) - with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err: - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, render.handle_args('anyname', args)) + user_data=user_data, instance_data=instance_data, debug=True + ) + with mock.patch("sys.stderr", new_callable=StringIO) as m_console_err: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: + self.assertEqual(0, render.handle_args("anyname", args)) self.assertIn( - 'DEBUG: Converted jinja variables\n{', self.logs.getvalue()) + "DEBUG: Converted jinja variables\n{", self.logs.getvalue() + ) self.assertIn( - 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue()) - self.assertEqual('rendering: jinja worked', m_stdout.getvalue()) + "DEBUG: Converted jinja variables\n{", m_console_err.getvalue() + ) + self.assertEqual("rendering: jinja worked", m_stdout.getvalue()) @skipUnlessJinja() def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self): """If user_data file has invalid jinja operations log warnings.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - write_file(user_data, '##template: jinja\nrendering: {{ my-var }}') - instance_data = self.tmp_path('instance-data', dir=self.tmp) + user_data = self.tmp_path("user-data", dir=self.tmp) + write_file(user_data, "##template: jinja\nrendering: {{ my-var }}") + instance_data = self.tmp_path("instance-data", dir=self.tmp) write_file(instance_data, '{"my-var": "jinja worked"}') args = self.args( - user_data=user_data, instance_data=instance_data, debug=True) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) + user_data=user_data, instance_data=instance_data, debug=True + ) + with mock.patch("sys.stderr", new_callable=StringIO): + self.assertEqual(1, render.handle_args("anyname", args)) self.assertIn( - 'WARNING: Ignoring jinja template for %s: Undefined jinja' + "WARNING: Ignoring jinja template for %s: Undefined jinja" ' variable: "my-var". Jinja tried subtraction. Perhaps you meant' ' "my_var"?' % user_data, - self.logs.getvalue()) + self.logs.getvalue(), + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py index 3bb0ee9b..7d12017e 100644 --- a/tests/unittests/cmd/test_clean.py +++ b/tests/unittests/cmd/test_clean.py @@ -1,29 +1,31 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.cmd import clean -from cloudinit.util import ensure_dir, sym_link, write_file -from tests.unittests.helpers import CiTestCase, wrap_and_call, mock -from collections import namedtuple import os +from collections import namedtuple from io import StringIO -mypaths = namedtuple('MyPaths', 'cloud_dir') +from cloudinit.cmd import clean +from cloudinit.util import ensure_dir, sym_link, write_file +from tests.unittests.helpers import CiTestCase, mock, wrap_and_call +mypaths = namedtuple("MyPaths", "cloud_dir") -class TestClean(CiTestCase): +class TestClean(CiTestCase): def setUp(self): super(TestClean, self).setUp() self.new_root = self.tmp_dir() - self.artifact_dir = self.tmp_path('artifacts', self.new_root) - self.log1 = self.tmp_path('cloud-init.log', self.new_root) - self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) + self.artifact_dir = self.tmp_path("artifacts", self.new_root) + self.log1 = self.tmp_path("cloud-init.log", self.new_root) + self.log2 = self.tmp_path("cloud-init-output.log", self.new_root) class FakeInit(object): - cfg = {'def_log_file': self.log1, - 'output': {'all': '|tee -a {0}'.format(self.log2)}} + cfg = { + "def_log_file": self.log1, + "output": {"all": "|tee -a {0}".format(self.log2)}, + } # Ensure cloud_dir has a trailing slash, to match real behaviour - paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir)) + paths = mypaths(cloud_dir="{}/".format(self.artifact_dir)) def __init__(self, ds_deps): pass @@ -35,110 +37,133 @@ class TestClean(CiTestCase): def test_remove_artifacts_removes_logs(self): """remove_artifacts removes logs when remove_logs is True.""" - write_file(self.log1, 'cloud-init-log') - write_file(self.log2, 'cloud-init-output-log') + write_file(self.log1, "cloud-init-log") + write_file(self.log2, "cloud-init-output-log") self.assertFalse( - os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') + os.path.exists(self.artifact_dir), "Unexpected artifacts dir" + ) retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=True) - self.assertFalse(os.path.exists(self.log1), 'Unexpected file') - self.assertFalse(os.path.exists(self.log2), 'Unexpected file') + "cloudinit.cmd.clean", + {"Init": {"side_effect": self.init_class}}, + clean.remove_artifacts, + remove_logs=True, + ) + self.assertFalse(os.path.exists(self.log1), "Unexpected file") + self.assertFalse(os.path.exists(self.log2), "Unexpected file") self.assertEqual(0, retcode) def test_remove_artifacts_preserves_logs(self): """remove_artifacts leaves logs when remove_logs is False.""" - write_file(self.log1, 'cloud-init-log') - write_file(self.log2, 'cloud-init-output-log') + write_file(self.log1, "cloud-init-log") + write_file(self.log2, "cloud-init-output-log") retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) - self.assertTrue(os.path.exists(self.log1), 'Missing expected file') - self.assertTrue(os.path.exists(self.log2), 'Missing expected file') + "cloudinit.cmd.clean", + {"Init": {"side_effect": self.init_class}}, + clean.remove_artifacts, + remove_logs=False, + ) + self.assertTrue(os.path.exists(self.log1), "Missing expected file") + self.assertTrue(os.path.exists(self.log2), "Missing expected file") self.assertEqual(0, retcode) def test_remove_artifacts_removes_unlinks_symlinks(self): """remove_artifacts cleans artifacts dir unlinking any symlinks.""" - dir1 = os.path.join(self.artifact_dir, 'dir1') + dir1 = os.path.join(self.artifact_dir, "dir1") ensure_dir(dir1) - symlink = os.path.join(self.artifact_dir, 'mylink') + symlink = os.path.join(self.artifact_dir, "mylink") sym_link(dir1, symlink) retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) + "cloudinit.cmd.clean", + {"Init": {"side_effect": self.init_class}}, + clean.remove_artifacts, + remove_logs=False, + ) self.assertEqual(0, retcode) for path in (dir1, symlink): self.assertFalse( - os.path.exists(path), - 'Unexpected {0} dir'.format(path)) + os.path.exists(path), "Unexpected {0} dir".format(path) + ) def test_remove_artifacts_removes_artifacts_skipping_seed(self): """remove_artifacts cleans artifacts dir with exception of seed dir.""" dirs = [ self.artifact_dir, - os.path.join(self.artifact_dir, 'seed'), - os.path.join(self.artifact_dir, 'dir1'), - os.path.join(self.artifact_dir, 'dir2')] + os.path.join(self.artifact_dir, "seed"), + os.path.join(self.artifact_dir, "dir1"), + os.path.join(self.artifact_dir, "dir2"), + ] for _dir in dirs: ensure_dir(_dir) retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) + "cloudinit.cmd.clean", + {"Init": {"side_effect": self.init_class}}, + clean.remove_artifacts, + remove_logs=False, + ) self.assertEqual(0, retcode) for expected_dir in dirs[:2]: self.assertTrue( os.path.exists(expected_dir), - 'Missing {0} dir'.format(expected_dir)) + "Missing {0} dir".format(expected_dir), + ) for deleted_dir in dirs[2:]: self.assertFalse( os.path.exists(deleted_dir), - 'Unexpected {0} dir'.format(deleted_dir)) + "Unexpected {0} dir".format(deleted_dir), + ) def test_remove_artifacts_removes_artifacts_removes_seed(self): """remove_artifacts removes seed dir when remove_seed is True.""" dirs = [ self.artifact_dir, - os.path.join(self.artifact_dir, 'seed'), - os.path.join(self.artifact_dir, 'dir1'), - os.path.join(self.artifact_dir, 'dir2')] + os.path.join(self.artifact_dir, "seed"), + os.path.join(self.artifact_dir, "dir1"), + os.path.join(self.artifact_dir, "dir2"), + ] for _dir in dirs: ensure_dir(_dir) retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False, remove_seed=True) + "cloudinit.cmd.clean", + {"Init": {"side_effect": self.init_class}}, + clean.remove_artifacts, + remove_logs=False, + remove_seed=True, + ) self.assertEqual(0, retcode) self.assertTrue( - os.path.exists(self.artifact_dir), 'Missing artifact dir') + os.path.exists(self.artifact_dir), "Missing artifact dir" + ) for deleted_dir in dirs[1:]: self.assertFalse( os.path.exists(deleted_dir), - 'Unexpected {0} dir'.format(deleted_dir)) + "Unexpected {0} dir".format(deleted_dir), + ) def test_remove_artifacts_returns_one_on_errors(self): """remove_artifacts returns non-zero on failure and prints an error.""" ensure_dir(self.artifact_dir) - ensure_dir(os.path.join(self.artifact_dir, 'dir1')) + ensure_dir(os.path.join(self.artifact_dir, "dir1")) - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr: retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'del_dir': {'side_effect': OSError('oops')}, - 'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) + "cloudinit.cmd.clean", + { + "del_dir": {"side_effect": OSError("oops")}, + "Init": {"side_effect": self.init_class}, + }, + clean.remove_artifacts, + remove_logs=False, + ) self.assertEqual(1, retcode) self.assertEqual( - 'Error:\nCould not remove %s/dir1: oops\n' % self.artifact_dir, - m_stderr.getvalue()) + "Error:\nCould not remove %s/dir1: oops\n" % self.artifact_dir, + m_stderr.getvalue(), + ) def test_handle_clean_args_reboots(self): """handle_clean_args_reboots when reboot arg is provided.""" @@ -147,32 +172,40 @@ class TestClean(CiTestCase): def fake_subp(cmd, capture): called_cmds.append((cmd, capture)) - return '', '' + return "", "" - myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') + myargs = namedtuple("MyArgs", "remove_logs remove_seed reboot") cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'subp': {'side_effect': fake_subp}, - 'Init': {'side_effect': self.init_class}}, - clean.handle_clean_args, name='does not matter', args=cmdargs) + "cloudinit.cmd.clean", + { + "subp": {"side_effect": fake_subp}, + "Init": {"side_effect": self.init_class}, + }, + clean.handle_clean_args, + name="does not matter", + args=cmdargs, + ) self.assertEqual(0, retcode) - self.assertEqual( - [(['shutdown', '-r', 'now'], False)], called_cmds) + self.assertEqual([(["shutdown", "-r", "now"], False)], called_cmds) def test_status_main(self): - '''clean.main can be run as a standalone script.''' - write_file(self.log1, 'cloud-init-log') + """clean.main can be run as a standalone script.""" + write_file(self.log1, "cloud-init-log") with self.assertRaises(SystemExit) as context_manager: wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}, - 'sys.argv': {'new': ['clean', '--logs']}}, - clean.main) + "cloudinit.cmd.clean", + { + "Init": {"side_effect": self.init_class}, + "sys.argv": {"new": ["clean", "--logs"]}, + }, + clean.main, + ) self.assertEqual(0, context_manager.exception.code) self.assertFalse( - os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) + os.path.exists(self.log1), "Unexpected log {0}".format(self.log1) + ) # vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py index 9a010402..42941d4f 100644 --- a/tests/unittests/cmd/test_cloud_id.py +++ b/tests/unittests/cmd/test_cloud_id.py @@ -2,41 +2,45 @@ """Tests for cloud-id command line utility.""" -from cloudinit import util from collections import namedtuple from io import StringIO +from cloudinit import util from cloudinit.cmd import cloud_id - from tests.unittests.helpers import CiTestCase, mock class TestCloudId(CiTestCase): - args = namedtuple('cloudidargs', ('instance_data json long')) + args = namedtuple("cloudidargs", "instance_data json long") def setUp(self): super(TestCloudId, self).setUp() self.tmp = self.tmp_dir() - self.instance_data = self.tmp_path('instance-data.json', dir=self.tmp) + self.instance_data = self.tmp_path("instance-data.json", dir=self.tmp) def test_cloud_id_arg_parser_defaults(self): """Validate the argument defaults when not provided by the end-user.""" - cmd = ['cloud-id'] - with mock.patch('sys.argv', cmd): + cmd = ["cloud-id"] + with mock.patch("sys.argv", cmd): args = cloud_id.get_parser().parse_args() self.assertEqual( - '/run/cloud-init/instance-data.json', - args.instance_data) + "/run/cloud-init/instance-data.json", args.instance_data + ) self.assertEqual(False, args.long) self.assertEqual(False, args.json) def test_cloud_id_arg_parse_overrides(self): """Override argument defaults by specifying values for each param.""" - util.write_file(self.instance_data, '{}') - cmd = ['cloud-id', '--instance-data', self.instance_data, '--long', - '--json'] - with mock.patch('sys.argv', cmd): + util.write_file(self.instance_data, "{}") + cmd = [ + "cloud-id", + "--instance-data", + self.instance_data, + "--long", + "--json", + ] + with mock.patch("sys.argv", cmd): args = cloud_id.get_parser().parse_args() self.assertEqual(self.instance_data, args.instance_data) self.assertEqual(True, args.long) @@ -44,37 +48,40 @@ class TestCloudId(CiTestCase): def test_cloud_id_missing_instance_data_json(self): """Exit error when the provided instance-data.json does not exist.""" - cmd = ['cloud-id', '--instance-data', self.instance_data] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + cmd = ["cloud-id", "--instance-data", self.instance_data] + with mock.patch("sys.argv", cmd): + with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( "Error:\nFile not found '%s'" % self.instance_data, - m_stderr.getvalue()) + m_stderr.getvalue(), + ) def test_cloud_id_non_json_instance_data(self): """Exit error when the provided instance-data.json is not json.""" - cmd = ['cloud-id', '--instance-data', self.instance_data] - util.write_file(self.instance_data, '{') - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + cmd = ["cloud-id", "--instance-data", self.instance_data] + util.write_file(self.instance_data, "{") + with mock.patch("sys.argv", cmd): + with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( "Error:\nFile '%s' is not valid json." % self.instance_data, - m_stderr.getvalue()) + m_stderr.getvalue(), + ) def test_cloud_id_from_cloud_name_in_instance_data(self): """Report canonical cloud-id from cloud_name in instance-data.""" util.write_file( self.instance_data, - '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') - cmd = ['cloud-id', '--instance-data', self.instance_data] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}', + ) + cmd = ["cloud-id", "--instance-data", self.instance_data] + with mock.patch("sys.argv", cmd): + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(0, context_manager.exception.code) @@ -84,10 +91,11 @@ class TestCloudId(CiTestCase): """Report long cloud-id format from cloud_name and region.""" util.write_file( self.instance_data, - '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') - cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}', + ) + cmd = ["cloud-id", "--instance-data", self.instance_data, "--long"] + with mock.patch("sys.argv", cmd): + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(0, context_manager.exception.code) @@ -98,10 +106,11 @@ class TestCloudId(CiTestCase): util.write_file( self.instance_data, '{"v1": {"cloud_name": "aws", "region": "cn-north-1",' - ' "platform": "ec2"}}') - cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + ' "platform": "ec2"}}', + ) + cmd = ["cloud-id", "--instance-data", self.instance_data, "--long"] + with mock.patch("sys.argv", cmd): + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(0, context_manager.exception.code) @@ -112,16 +121,24 @@ class TestCloudId(CiTestCase): util.write_file( self.instance_data, '{"v1": {"cloud_name": "unknown", "region": "dfw",' - ' "platform": "openstack", "public_ssh_keys": []}}') - expected = util.json_dumps({ - 'cloud_id': 'openstack', 'cloud_name': 'unknown', - 'platform': 'openstack', 'public_ssh_keys': [], 'region': 'dfw'}) - cmd = ['cloud-id', '--instance-data', self.instance_data, '--json'] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + ' "platform": "openstack", "public_ssh_keys": []}}', + ) + expected = util.json_dumps( + { + "cloud_id": "openstack", + "cloud_name": "unknown", + "platform": "openstack", + "public_ssh_keys": [], + "region": "dfw", + } + ) + cmd = ["cloud-id", "--instance-data", self.instance_data, "--json"] + with mock.patch("sys.argv", cmd): + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(0, context_manager.exception.code) - self.assertEqual(expected + '\n', m_stdout.getvalue()) + self.assertEqual(expected + "\n", m_stdout.getvalue()) + # vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py index e1ce682b..3e778b0b 100644 --- a/tests/unittests/cmd/test_main.py +++ b/tests/unittests/cmd/test_main.py @@ -1,22 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple import copy import os +from collections import namedtuple from io import StringIO from unittest import mock import pytest -from cloudinit.cmd import main from cloudinit import safeyaml -from cloudinit.util import ( - ensure_dir, load_file, write_file) -from tests.unittests.helpers import ( - FilesystemMockingTestCase, wrap_and_call) +from cloudinit.cmd import main +from cloudinit.util import ensure_dir, load_file, write_file +from tests.unittests.helpers import FilesystemMockingTestCase, wrap_and_call -mypaths = namedtuple('MyPaths', 'run_dir') -myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') +mypaths = namedtuple("MyPaths", "run_dir") +myargs = namedtuple("MyArgs", "debug files force local reporter subcommand") class TestMain(FilesystemMockingTestCase): @@ -26,27 +24,32 @@ class TestMain(FilesystemMockingTestCase): def setUp(self): super(TestMain, self).setUp() self.new_root = self.tmp_dir() - self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) + self.cloud_dir = self.tmp_path("var/lib/cloud/", dir=self.new_root) os.makedirs(self.cloud_dir) - self.replicateTestRoot('simple_ubuntu', self.new_root) + self.replicateTestRoot("simple_ubuntu", self.new_root) self.cfg = { - 'datasource_list': ['None'], - 'runcmd': ['ls /etc'], # test ALL_DISTROS - 'system_info': {'paths': {'cloud_dir': self.cloud_dir, - 'run_dir': self.new_root}}, - 'write_files': [ + "datasource_list": ["None"], + "runcmd": ["ls /etc"], # test ALL_DISTROS + "system_info": { + "paths": { + "cloud_dir": self.cloud_dir, + "run_dir": self.new_root, + } + }, + "write_files": [ { - 'path': '/etc/blah.ini', - 'content': 'blah', - 'permissions': 0o755, + "path": "/etc/blah.ini", + "content": "blah", + "permissions": 0o755, }, ], - 'cloud_init_modules': ['write-files', 'runcmd'], + "cloud_init_modules": ["write-files", "runcmd"], } cloud_cfg = safeyaml.dumps(self.cfg) - ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + ensure_dir(os.path.join(self.new_root, "etc", "cloud")) self.cloud_cfg_file = os.path.join( - self.new_root, 'etc', 'cloud', 'cloud.cfg') + self.new_root, "etc", "cloud", "cloud.cfg" + ) write_file(self.cloud_cfg_file, cloud_cfg) self.patchOS(self.new_root) self.patchUtils(self.new_root) @@ -55,31 +58,44 @@ class TestMain(FilesystemMockingTestCase): def test_main_init_run_net_stops_on_file_no_net(self): """When no-net file is present, main_init does not process modules.""" - stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file - write_file(stop_file, '') + stop_file = os.path.join(self.cloud_dir, "data", "no-net") # stop file + write_file(stop_file, "") cmdargs = myargs( - debug=False, files=None, force=False, local=False, reporter=None, - subcommand='init') + debug=False, + files=None, + force=False, + local=False, + reporter=None, + subcommand="init", + ) (_item1, item2) = wrap_and_call( - 'cloudinit.cmd.main', - {'util.close_stdin': True, - 'netinfo.debug_info': 'my net debug info', - 'util.fixup_output': ('outfmt', 'errfmt')}, - main.main_init, 'init', cmdargs) + "cloudinit.cmd.main", + { + "util.close_stdin": True, + "netinfo.debug_info": "my net debug info", + "util.fixup_output": ("outfmt", "errfmt"), + }, + main.main_init, + "init", + cmdargs, + ) # We should not run write_files module self.assertFalse( - os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), - 'Unexpected run of write_files module produced blah.ini') + os.path.exists(os.path.join(self.new_root, "etc/blah.ini")), + "Unexpected run of write_files module produced blah.ini", + ) self.assertEqual([], item2) # Instancify is called - instance_id_path = 'var/lib/cloud/data/instance-id' + instance_id_path = "var/lib/cloud/data/instance-id" self.assertFalse( os.path.exists(os.path.join(self.new_root, instance_id_path)), - 'Unexpected call to datasource.instancify produced instance-id') + "Unexpected call to datasource.instancify produced instance-id", + ) expected_logs = [ "Exiting. stop file ['{stop_file}'] existed\n".format( - stop_file=stop_file), - 'my net debug info' # netinfo.debug_info + stop_file=stop_file + ), + "my net debug info", # netinfo.debug_info ] for log in expected_logs: self.assertIn(log, self.stderr.getvalue()) @@ -87,97 +103,133 @@ class TestMain(FilesystemMockingTestCase): def test_main_init_run_net_runs_modules(self): """Modules like write_files are run in 'net' mode.""" cmdargs = myargs( - debug=False, files=None, force=False, local=False, reporter=None, - subcommand='init') + debug=False, + files=None, + force=False, + local=False, + reporter=None, + subcommand="init", + ) (_item1, item2) = wrap_and_call( - 'cloudinit.cmd.main', - {'util.close_stdin': True, - 'netinfo.debug_info': 'my net debug info', - 'util.fixup_output': ('outfmt', 'errfmt')}, - main.main_init, 'init', cmdargs) + "cloudinit.cmd.main", + { + "util.close_stdin": True, + "netinfo.debug_info": "my net debug info", + "util.fixup_output": ("outfmt", "errfmt"), + }, + main.main_init, + "init", + cmdargs, + ) self.assertEqual([], item2) # Instancify is called - instance_id_path = 'var/lib/cloud/data/instance-id' + instance_id_path = "var/lib/cloud/data/instance-id" self.assertEqual( - 'iid-datasource-none\n', - os.path.join(load_file( - os.path.join(self.new_root, instance_id_path)))) + "iid-datasource-none\n", + os.path.join( + load_file(os.path.join(self.new_root, instance_id_path)) + ), + ) # modules are run (including write_files) self.assertEqual( - 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + "blah", load_file(os.path.join(self.new_root, "etc/blah.ini")) + ) expected_logs = [ - 'network config is disabled by fallback', # apply_network_config - 'my net debug info', # netinfo.debug_info - 'no previous run detected' + "network config is disabled by fallback", # apply_network_config + "my net debug info", # netinfo.debug_info + "no previous run detected", ] for log in expected_logs: self.assertIn(log, self.stderr.getvalue()) def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): """When local-hostname metadata is present, call cc_set_hostname.""" - self.cfg['datasource'] = { - 'None': {'metadata': {'local-hostname': 'md-hostname'}}} + self.cfg["datasource"] = { + "None": {"metadata": {"local-hostname": "md-hostname"}} + } cloud_cfg = safeyaml.dumps(self.cfg) write_file(self.cloud_cfg_file, cloud_cfg) cmdargs = myargs( - debug=False, files=None, force=False, local=False, reporter=None, - subcommand='init') + debug=False, + files=None, + force=False, + local=False, + reporter=None, + subcommand="init", + ) def set_hostname(name, cfg, cloud, log, args): - self.assertEqual('set-hostname', name) + self.assertEqual("set-hostname", name) updated_cfg = copy.deepcopy(self.cfg) updated_cfg.update( - {'def_log_file': '/var/log/cloud-init.log', - 'log_cfgs': [], - 'syslog_fix_perms': [ - 'syslog:adm', 'root:adm', 'root:wheel', 'root:root' - ], - 'vendor_data': {'enabled': True, 'prefix': []}, - 'vendor_data2': {'enabled': True, 'prefix': []}}) - updated_cfg.pop('system_info') + { + "def_log_file": "/var/log/cloud-init.log", + "log_cfgs": [], + "syslog_fix_perms": [ + "syslog:adm", + "root:adm", + "root:wheel", + "root:root", + ], + "vendor_data": {"enabled": True, "prefix": []}, + "vendor_data2": {"enabled": True, "prefix": []}, + } + ) + updated_cfg.pop("system_info") self.assertEqual(updated_cfg, cfg) self.assertEqual(main.LOG, log) self.assertIsNone(args) (_item1, item2) = wrap_and_call( - 'cloudinit.cmd.main', - {'util.close_stdin': True, - 'netinfo.debug_info': 'my net debug info', - 'cc_set_hostname.handle': {'side_effect': set_hostname}, - 'util.fixup_output': ('outfmt', 'errfmt')}, - main.main_init, 'init', cmdargs) + "cloudinit.cmd.main", + { + "util.close_stdin": True, + "netinfo.debug_info": "my net debug info", + "cc_set_hostname.handle": {"side_effect": set_hostname}, + "util.fixup_output": ("outfmt", "errfmt"), + }, + main.main_init, + "init", + cmdargs, + ) self.assertEqual([], item2) # Instancify is called - instance_id_path = 'var/lib/cloud/data/instance-id' + instance_id_path = "var/lib/cloud/data/instance-id" self.assertEqual( - 'iid-datasource-none\n', - os.path.join(load_file( - os.path.join(self.new_root, instance_id_path)))) + "iid-datasource-none\n", + os.path.join( + load_file(os.path.join(self.new_root, instance_id_path)) + ), + ) # modules are run (including write_files) self.assertEqual( - 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + "blah", load_file(os.path.join(self.new_root, "etc/blah.ini")) + ) expected_logs = [ - 'network config is disabled by fallback', # apply_network_config - 'my net debug info', # netinfo.debug_info - 'no previous run detected' + "network config is disabled by fallback", # apply_network_config + "my net debug info", # netinfo.debug_info + "no previous run detected", ] for log in expected_logs: self.assertIn(log, self.stderr.getvalue()) class TestShouldBringUpInterfaces: - @pytest.mark.parametrize('cfg_disable,args_local,expected', [ - (True, True, False), - (True, False, False), - (False, True, False), - (False, False, True), - ]) + @pytest.mark.parametrize( + "cfg_disable,args_local,expected", + [ + (True, True, False), + (True, False, False), + (False, True, False), + (False, False, True), + ], + ) def test_should_bring_up_interfaces( self, cfg_disable, args_local, expected ): init = mock.Mock() - init.cfg = {'disable_network_activation': cfg_disable} + init.cfg = {"disable_network_activation": cfg_disable} args = mock.Mock() args.local = args_local @@ -185,4 +237,5 @@ class TestShouldBringUpInterfaces: result = main._should_bring_up_interfaces(init, args) assert result == expected + # vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py index b7d02d13..03a73bb5 100644 --- a/tests/unittests/cmd/test_query.py +++ b/tests/unittests/cmd/test_query.py @@ -4,19 +4,21 @@ import errno import gzip import json import os +from collections import namedtuple from io import BytesIO from textwrap import dedent import pytest -from collections import namedtuple from cloudinit.cmd import query from cloudinit.helpers import Paths from cloudinit.sources import ( - REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) -from tests.unittests.helpers import mock - + INSTANCE_JSON_FILE, + INSTANCE_JSON_SENSITIVE_FILE, + REDACT_SENSITIVE_VALUE, +) from cloudinit.util import b64e, write_file +from tests.unittests.helpers import mock def _gzip_data(data): @@ -30,9 +32,10 @@ def _gzip_data(data): class TestQuery: args = namedtuple( - 'queryargs', - ('debug dump_all format instance_data list_keys user_data vendor_data' - ' varname')) + "queryargs", + "debug dump_all format instance_data list_keys user_data vendor_data" + " varname", + ) def _setup_paths(self, tmpdir, ud_val=None, vd_val=None): """Write userdata and vendordata into a tmpdir. @@ -41,153 +44,191 @@ class TestQuery: 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path) """ if ud_val: - user_data = tmpdir.join('user-data') + user_data = tmpdir.join("user-data") write_file(user_data.strpath, ud_val) else: user_data = None if vd_val: - vendor_data = tmpdir.join('vendor-data') + vendor_data = tmpdir.join("vendor-data") write_file(vendor_data.strpath, vd_val) else: vendor_data = None - run_dir = tmpdir.join('run_dir') + run_dir = tmpdir.join("run_dir") run_dir.ensure_dir() - cloud_dir = tmpdir.join('cloud_dir') + cloud_dir = tmpdir.join("cloud_dir") cloud_dir.ensure_dir() return ( Paths( - {'cloud_dir': cloud_dir.strpath, 'run_dir': run_dir.strpath} + {"cloud_dir": cloud_dir.strpath, "run_dir": run_dir.strpath} ), run_dir, user_data, - vendor_data + vendor_data, ) def test_handle_args_error_on_missing_param(self, caplog, capsys): """Error when missing required parameters and print usage.""" args = self.args( - debug=False, dump_all=False, format=None, instance_data=None, - list_keys=False, user_data=None, vendor_data=None, varname=None) + debug=False, + dump_all=False, + format=None, + instance_data=None, + list_keys=False, + user_data=None, + vendor_data=None, + varname=None, + ) with mock.patch( "cloudinit.cmd.query.addLogHandlerCLI", return_value="" ) as m_cli_log: - assert 1 == query.handle_args('anyname', args) + assert 1 == query.handle_args("anyname", args) expected_error = ( - 'Expected one of the options: --all, --format, --list-keys' - ' or varname\n') + "Expected one of the options: --all, --format, --list-keys" + " or varname\n" + ) assert expected_error in caplog.text out, _err = capsys.readouterr() - assert 'usage: query' in out + assert "usage: query" in out assert 1 == m_cli_log.call_count @pytest.mark.parametrize( - "inst_data,varname,expected_error", ( + "inst_data,varname,expected_error", + ( ( '{"v1": {"key-2": "value-2"}}', - 'v1.absent_leaf', - "instance-data 'v1' has no 'absent_leaf'\n" + "v1.absent_leaf", + "instance-data 'v1' has no 'absent_leaf'\n", ), ( '{"v1": {"key-2": "value-2"}}', - 'absent_key', - "Undefined instance-data key 'absent_key'\n" + "absent_key", + "Undefined instance-data key 'absent_key'\n", ), - ) + ), ) def test_handle_args_error_on_invalid_vaname_paths( self, inst_data, varname, expected_error, caplog, tmpdir ): """Error when varname is not a valid instance-data variable path.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write(inst_data) args = self.args( - debug=False, dump_all=False, format=None, + debug=False, + dump_all=False, + format=None, instance_data=instance_data.strpath, - list_keys=False, user_data=None, vendor_data=None, varname=varname + list_keys=False, + user_data=None, + vendor_data=None, + varname=varname, ) paths, _, _, _ = self._setup_paths(tmpdir) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths with mock.patch( "cloudinit.cmd.query.addLogHandlerCLI", return_value="" ): - with mock.patch('cloudinit.cmd.query.load_userdata') as m_lud: + with mock.patch("cloudinit.cmd.query.load_userdata") as m_lud: m_lud.return_value = "ud" - assert 1 == query.handle_args('anyname', args) + assert 1 == query.handle_args("anyname", args) assert expected_error in caplog.text def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): """When instance_data file path does not exist, log an error.""" - absent_fn = tmpdir.join('absent') + absent_fn = tmpdir.join("absent") args = self.args( - debug=False, dump_all=True, format=None, + debug=False, + dump_all=True, + format=None, instance_data=absent_fn.strpath, - list_keys=False, user_data='ud', vendor_data='vd', varname=None) - assert 1 == query.handle_args('anyname', args) + list_keys=False, + user_data="ud", + vendor_data="vd", + varname=None, + ) + assert 1 == query.handle_args("anyname", args) - msg = 'Missing instance-data file: %s' % absent_fn + msg = "Missing instance-data file: %s" % absent_fn assert msg in caplog.text def test_handle_args_error_when_no_read_permission_instance_data( self, caplog, tmpdir ): """When instance_data file is unreadable, log an error.""" - noread_fn = tmpdir.join('unreadable') - noread_fn.write('thou shall not pass') + noread_fn = tmpdir.join("unreadable") + noread_fn.write("thou shall not pass") args = self.args( - debug=False, dump_all=True, format=None, + debug=False, + dump_all=True, + format=None, instance_data=noread_fn.strpath, - list_keys=False, user_data='ud', vendor_data='vd', varname=None) - with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: - m_load.side_effect = OSError(errno.EACCES, 'Not allowed') - assert 1 == query.handle_args('anyname', args) + list_keys=False, + user_data="ud", + vendor_data="vd", + varname=None, + ) + with mock.patch("cloudinit.cmd.query.util.load_file") as m_load: + m_load.side_effect = OSError(errno.EACCES, "Not allowed") + assert 1 == query.handle_args("anyname", args) msg = "No read permission on '%s'. Try sudo" % noread_fn assert msg in caplog.text def test_handle_args_defaults_instance_data(self, caplog, tmpdir): """When no instance_data argument, default to configured run_dir.""" args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=None, vendor_data=None, varname=None) + debug=False, + dump_all=True, + format=None, + instance_data=None, + list_keys=False, + user_data=None, + vendor_data=None, + varname=None, + ) paths, run_dir, _, _ = self._setup_paths(tmpdir) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths - assert 1 == query.handle_args('anyname', args) + assert 1 == query.handle_args("anyname", args) json_file = run_dir.join(INSTANCE_JSON_FILE) - msg = 'Missing instance-data file: %s' % json_file.strpath + msg = "Missing instance-data file: %s" % json_file.strpath assert msg in caplog.text def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir): """When no instance_data argument, root falls back to redacted json.""" args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=None, vendor_data=None, varname=None) + debug=False, + dump_all=True, + format=None, + instance_data=None, + list_keys=False, + user_data=None, + vendor_data=None, + varname=None, + ) paths, run_dir, _, _ = self._setup_paths(tmpdir) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths - with mock.patch('os.getuid') as m_getuid: + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 - assert 1 == query.handle_args('anyname', args) + assert 1 == query.handle_args("anyname", args) json_file = run_dir.join(INSTANCE_JSON_FILE) sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) - msg = ( - 'Missing root-readable %s. Using redacted %s instead.' % - ( - sensitive_file.strpath, json_file.strpath - ) + msg = "Missing root-readable %s. Using redacted %s instead." % ( + sensitive_file.strpath, + json_file.strpath, ) assert msg in caplog.text @pytest.mark.parametrize( - 'ud_src,ud_expected,vd_src,vd_expected', + "ud_src,ud_expected,vd_src,vd_expected", ( - ('hi mom', 'hi mom', 'hi pops', 'hi pops'), - ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'), - (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'), - (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'), - ) + ("hi mom", "hi mom", "hi pops", "hi pops"), + ("ud".encode("utf-8"), "ud", "vd".encode("utf-8"), "vd"), + (_gzip_data(b"ud"), "ud", _gzip_data(b"vd"), "vd"), + (_gzip_data("ud".encode("utf-8")), "ud", _gzip_data(b"vd"), "vd"), + ), ) def test_handle_args_root_processes_user_data( self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir @@ -199,23 +240,29 @@ class TestQuery: sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) sensitive_file.write('{"my-var": "it worked"}') args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=user_data.strpath, - vendor_data=vendor_data.strpath, varname=None) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + debug=False, + dump_all=True, + format=None, + instance_data=None, + list_keys=False, + user_data=user_data.strpath, + vendor_data=vendor_data.strpath, + varname=None, + ) + with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths - with mock.patch('os.getuid') as m_getuid: + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() cmd_output = json.loads(out) - assert "it worked" == cmd_output['my-var'] + assert "it worked" == cmd_output["my-var"] if ud_expected == "ci-b64:": ud_expected = "ci-b64:{}".format(b64e(ud_src)) if vd_expected == "ci-b64:": vd_expected = "ci-b64:{}".format(b64e(vd_src)) - assert ud_expected == cmd_output['userdata'] - assert vd_expected == cmd_output['vendordata'] + assert ud_expected == cmd_output["userdata"] + assert vd_expected == cmd_output["vendordata"] def test_handle_args_user_vendor_data_defaults_to_instance_link( self, capsys, tmpdir @@ -231,13 +278,19 @@ class TestQuery: write_file(vd_path, "instance_link_vd") args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=None, - vendor_data=None, varname=None) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + debug=False, + dump_all=True, + format=None, + instance_data=None, + list_keys=False, + user_data=None, + vendor_data=None, + varname=None, + ) + with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths - with mock.patch('os.getuid', return_value=0): - assert 0 == query.handle_args('anyname', args) + with mock.patch("os.getuid", return_value=0): + assert 0 == query.handle_args("anyname", args) expected = ( '{\n "my-var": "it worked",\n ' '"userdata": "instance_link_ud",\n ' @@ -251,19 +304,25 @@ class TestQuery: ): """When no instance_data argument, root uses sensitive json.""" paths, run_dir, user_data, vendor_data = self._setup_paths( - tmpdir, ud_val='ud', vd_val='vd' + tmpdir, ud_val="ud", vd_val="vd" ) sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) sensitive_file.write('{"my-var": "it worked"}') args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=user_data.strpath, - vendor_data=vendor_data.strpath, varname=None) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + debug=False, + dump_all=True, + format=None, + instance_data=None, + list_keys=False, + user_data=user_data.strpath, + vendor_data=vendor_data.strpath, + varname=None, + ) + with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths - with mock.patch('os.getuid') as m_getuid: + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) expected = ( '{\n "my-var": "it worked",\n ' '"userdata": "ud",\n "vendordata": "vd"\n}\n' @@ -273,68 +332,85 @@ class TestQuery: def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir): """When --all is specified query will dump all instance data vars.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write('{"my-var": "it worked"}') args = self.args( - debug=False, dump_all=True, format=None, - instance_data=instance_data.strpath, list_keys=False, - user_data='ud', vendor_data='vd', varname=None) - with mock.patch('os.getuid') as m_getuid: + debug=False, + dump_all=True, + format=None, + instance_data=instance_data.strpath, + list_keys=False, + user_data="ud", + vendor_data="vd", + varname=None, + ) + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) expected = ( '{\n "my-var": "it worked",\n "userdata": "<%s> file:ud",\n' - ' "vendordata": "<%s> file:vd"\n}\n' % ( - REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE - ) + ' "vendordata": "<%s> file:vd"\n}\n' + % (REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE) ) out, _err = capsys.readouterr() assert expected == out def test_handle_args_returns_top_level_varname(self, capsys, tmpdir): """When the argument varname is passed, report its value.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write('{"my-var": "it worked"}') args = self.args( - debug=False, dump_all=True, format=None, - instance_data=instance_data.strpath, list_keys=False, - user_data='ud', vendor_data='vd', varname='my_var') - with mock.patch('os.getuid') as m_getuid: + debug=False, + dump_all=True, + format=None, + instance_data=instance_data.strpath, + list_keys=False, + user_data="ud", + vendor_data="vd", + varname="my_var", + ) + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() - assert 'it worked\n' == out + assert "it worked\n" == out @pytest.mark.parametrize( - 'inst_data,varname,expected', + "inst_data,varname,expected", ( ( '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}', - 'v1.key_2', - 'value-2\n' + "v1.key_2", + "value-2\n", ), # Assert no jinja underscore-delimited aliases are reported on CLI ( '{"v1": {"something-hyphenated": {"no.underscores":"x",' ' "no-alias": "y"}}, "my-var": "it worked"}', - 'v1.something_hyphenated', - '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n' + "v1.something_hyphenated", + '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n', ), - ) + ), ) def test_handle_args_returns_nested_varname( self, inst_data, varname, expected, capsys, tmpdir ): """If user_data file is a jinja template render instance-data vars.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write(inst_data) args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, user_data='ud', - vendor_data='vd', list_keys=False, varname=varname) - with mock.patch('os.getuid') as m_getuid: + debug=False, + dump_all=False, + format=None, + instance_data=instance_data.strpath, + user_data="ud", + vendor_data="vd", + list_keys=False, + varname=varname, + ) + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() assert expected == out @@ -342,11 +418,13 @@ class TestQuery: self, capsys, tmpdir ): """Any standardized vars under v# are promoted as top-level aliases.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write( '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' - ' "top": "gun"}') - expected = dedent("""\ + ' "top": "gun"}' + ) + expected = dedent( + """\ { "top": "gun", "userdata": "<redacted for non-root user> file:ud", @@ -360,14 +438,21 @@ class TestQuery: "v2_2": "val2.2", "vendordata": "<redacted for non-root user> file:vd" } - """) + """ + ) args = self.args( - debug=False, dump_all=True, format=None, - instance_data=instance_data.strpath, user_data='ud', - vendor_data='vd', list_keys=False, varname=None) - with mock.patch('os.getuid') as m_getuid: + debug=False, + dump_all=True, + format=None, + instance_data=instance_data.strpath, + user_data="ud", + vendor_data="vd", + list_keys=False, + varname=None, + ) + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() assert expected == out @@ -375,18 +460,25 @@ class TestQuery: self, capsys, tmpdir ): """Sort all top-level keys when only --list-keys provided.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write( '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' - ' "top": "gun"}') - expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n' + ' "top": "gun"}' + ) + expected = "top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n" args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, list_keys=True, - user_data='ud', vendor_data='vd', varname=None) - with mock.patch('os.getuid') as m_getuid: + debug=False, + dump_all=False, + format=None, + instance_data=instance_data.strpath, + list_keys=True, + user_data="ud", + vendor_data="vd", + varname=None, + ) + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() assert expected == out @@ -394,18 +486,25 @@ class TestQuery: self, capsys, tmpdir ): """Sort all nested keys of varname object when --list-keys provided.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write( - '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' + - ' {"v2_2": "val2.2"}, "top": "gun"}') - expected = 'v1_1\nv1_2\n' + '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' + + ' {"v2_2": "val2.2"}, "top": "gun"}' + ) + expected = "v1_1\nv1_2\n" args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, list_keys=True, - user_data='ud', vendor_data='vd', varname='v1') - with mock.patch('os.getuid') as m_getuid: + debug=False, + dump_all=False, + format=None, + instance_data=instance_data.strpath, + list_keys=True, + user_data="ud", + vendor_data="vd", + varname="v1", + ) + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) + assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() assert expected == out @@ -413,18 +512,26 @@ class TestQuery: self, caplog, tmpdir ): """Raise an error when --list-keys and varname specify a non-list.""" - instance_data = tmpdir.join('instance-data') + instance_data = tmpdir.join("instance-data") instance_data.write( - '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' + - '{"v2_2": "val2.2"}, "top": "gun"}') + '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' + + '{"v2_2": "val2.2"}, "top": "gun"}' + ) expected_error = "--list-keys provided but 'top' is not a dict" args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, list_keys=True, - user_data='ud', vendor_data='vd', varname='top') - with mock.patch('os.getuid') as m_getuid: + debug=False, + dump_all=False, + format=None, + instance_data=instance_data.strpath, + list_keys=True, + user_data="ud", + vendor_data="vd", + varname="top", + ) + with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 - assert 1 == query.handle_args('anyname', args) + assert 1 == query.handle_args("anyname", args) assert expected_error in caplog.text + # vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py index 49eae043..acd1fea5 100644 --- a/tests/unittests/cmd/test_status.py +++ b/tests/unittests/cmd/test_status.py @@ -1,26 +1,25 @@ # This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple import os +from collections import namedtuple from io import StringIO from textwrap import dedent from cloudinit.atomic_helper import write_json from cloudinit.cmd import status from cloudinit.util import ensure_file -from tests.unittests.helpers import CiTestCase, wrap_and_call, mock +from tests.unittests.helpers import CiTestCase, mock, wrap_and_call -mypaths = namedtuple('MyPaths', 'run_dir') -myargs = namedtuple('MyArgs', 'long wait') +mypaths = namedtuple("MyPaths", "run_dir") +myargs = namedtuple("MyArgs", "long wait") class TestStatus(CiTestCase): - def setUp(self): super(TestStatus, self).setUp() self.new_root = self.tmp_dir() - self.status_file = self.tmp_path('status.json', self.new_root) - self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) + self.status_file = self.tmp_path("status.json", self.new_root) + self.disable_file = self.tmp_path("cloudinit-disable", self.new_root) self.paths = mypaths(run_dir=self.new_root) class FakeInit(object): @@ -35,285 +34,419 @@ class TestStatus(CiTestCase): self.init_class = FakeInit def test__is_cloudinit_disabled_false_on_sysvinit(self): - '''When not in an environment using systemd, return False.''' + """When not in an environment using systemd, return False.""" ensure_file(self.disable_file) # Create the ignored disable file (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': False, - 'get_cmdline': "root=/dev/my-root not-important"}, - status._is_cloudinit_disabled, self.disable_file, self.paths) + "cloudinit.cmd.status", + { + "uses_systemd": False, + "get_cmdline": "root=/dev/my-root not-important", + }, + status._is_cloudinit_disabled, + self.disable_file, + self.paths, + ) self.assertFalse( - is_disabled, 'expected enabled cloud-init on sysvinit') - self.assertEqual('Cloud-init enabled on sysvinit', reason) + is_disabled, "expected enabled cloud-init on sysvinit" + ) + self.assertEqual("Cloud-init enabled on sysvinit", reason) def test__is_cloudinit_disabled_true_on_disable_file(self): - '''When using systemd and disable_file is present return disabled.''' + """When using systemd and disable_file is present return disabled.""" ensure_file(self.disable_file) # Create observed disable file (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': "root=/dev/my-root not-important"}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertTrue(is_disabled, 'expected disabled cloud-init') + "cloudinit.cmd.status", + { + "uses_systemd": True, + "get_cmdline": "root=/dev/my-root not-important", + }, + status._is_cloudinit_disabled, + self.disable_file, + self.paths, + ) + self.assertTrue(is_disabled, "expected disabled cloud-init") self.assertEqual( - 'Cloud-init disabled by {0}'.format(self.disable_file), reason) + "Cloud-init disabled by {0}".format(self.disable_file), reason + ) def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): - '''Not disabled when using systemd and enabled via commandline.''' + """Not disabled when using systemd and enabled via commandline.""" ensure_file(self.disable_file) # Create ignored disable file (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something cloud-init=enabled else'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertFalse(is_disabled, 'expected enabled cloud-init') + "cloudinit.cmd.status", + { + "uses_systemd": True, + "get_cmdline": "something cloud-init=enabled else", + }, + status._is_cloudinit_disabled, + self.disable_file, + self.paths, + ) + self.assertFalse(is_disabled, "expected enabled cloud-init") self.assertEqual( - 'Cloud-init enabled by kernel command line cloud-init=enabled', - reason) + "Cloud-init enabled by kernel command line cloud-init=enabled", + reason, + ) def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): - '''When using systemd and disable_file is present return disabled.''' + """When using systemd and disable_file is present return disabled.""" (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something cloud-init=disabled else'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertTrue(is_disabled, 'expected disabled cloud-init') + "cloudinit.cmd.status", + { + "uses_systemd": True, + "get_cmdline": "something cloud-init=disabled else", + }, + status._is_cloudinit_disabled, + self.disable_file, + self.paths, + ) + self.assertTrue(is_disabled, "expected disabled cloud-init") self.assertEqual( - 'Cloud-init disabled by kernel parameter cloud-init=disabled', - reason) + "Cloud-init disabled by kernel parameter cloud-init=disabled", + reason, + ) def test__is_cloudinit_disabled_true_when_generator_disables(self): - '''When cloud-init-generator doesn't write enabled file return True.''' - enabled_file = os.path.join(self.paths.run_dir, 'enabled') + """When cloud-init-generator doesn't write enabled file return True.""" + enabled_file = os.path.join(self.paths.run_dir, "enabled") self.assertFalse(os.path.exists(enabled_file)) (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertTrue(is_disabled, 'expected disabled cloud-init') - self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) + "cloudinit.cmd.status", + {"uses_systemd": True, "get_cmdline": "something"}, + status._is_cloudinit_disabled, + self.disable_file, + self.paths, + ) + self.assertTrue(is_disabled, "expected disabled cloud-init") + self.assertEqual("Cloud-init disabled by cloud-init-generator", reason) def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): - '''Report enabled when systemd generator creates the enabled file.''' - enabled_file = os.path.join(self.paths.run_dir, 'enabled') + """Report enabled when systemd generator creates the enabled file.""" + enabled_file = os.path.join(self.paths.run_dir, "enabled") ensure_file(enabled_file) (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something ignored'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertFalse(is_disabled, 'expected enabled cloud-init') + "cloudinit.cmd.status", + {"uses_systemd": True, "get_cmdline": "something ignored"}, + status._is_cloudinit_disabled, + self.disable_file, + self.paths, + ) + self.assertFalse(is_disabled, "expected enabled cloud-init") self.assertEqual( - 'Cloud-init enabled by systemd cloud-init-generator', reason) + "Cloud-init enabled by systemd cloud-init-generator", reason + ) def test_status_returns_not_run(self): - '''When status.json does not exist yet, return 'not run'.''' + """When status.json does not exist yet, return 'not run'.""" self.assertFalse( - os.path.exists(self.status_file), 'Unexpected status.json found') + os.path.exists(self.status_file), "Unexpected status.json found" + ) cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) - self.assertEqual('status: not run\n', m_stdout.getvalue()) + self.assertEqual("status: not run\n", m_stdout.getvalue()) def test_status_returns_disabled_long_on_presence_of_disable_file(self): - '''When cloudinit is disabled, return disabled reason.''' + """When cloudinit is disabled, return disabled reason.""" checked_files = [] def fakeexists(filepath): checked_files.append(filepath) - status_file = os.path.join(self.paths.run_dir, 'status.json') + status_file = os.path.join(self.paths.run_dir, "status.json") return bool(not filepath == status_file) cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'os.path.exists': {'side_effect': fakeexists}, - '_is_cloudinit_disabled': (True, 'disabled for some reason'), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "os.path.exists": {"side_effect": fakeexists}, + "_is_cloudinit_disabled": ( + True, + "disabled for some reason", + ), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) self.assertEqual( - [os.path.join(self.paths.run_dir, 'status.json')], - checked_files) - expected = dedent('''\ + [os.path.join(self.paths.run_dir, "status.json")], checked_files + ) + expected = dedent( + """\ status: disabled detail: disabled for some reason - ''') + """ + ) self.assertEqual(expected, m_stdout.getvalue()) def test_status_returns_running_on_no_results_json(self): - '''Report running when status.json exists but result.json does not.''' - result_file = self.tmp_path('result.json', self.new_root) + """Report running when status.json exists but result.json does not.""" + result_file = self.tmp_path("result.json", self.new_root) write_json(self.status_file, {}) self.assertFalse( - os.path.exists(result_file), 'Unexpected result.json found') + os.path.exists(result_file), "Unexpected result.json found" + ) cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) - self.assertEqual('status: running\n', m_stdout.getvalue()) + self.assertEqual("status: running\n", m_stdout.getvalue()) def test_status_returns_running(self): - '''Report running when status exists with an unfinished stage.''' - ensure_file(self.tmp_path('result.json', self.new_root)) - write_json(self.status_file, - {'v1': {'init': {'start': 1, 'finished': None}}}) + """Report running when status exists with an unfinished stage.""" + ensure_file(self.tmp_path("result.json", self.new_root)) + write_json( + self.status_file, {"v1": {"init": {"start": 1, "finished": None}}} + ) cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) - self.assertEqual('status: running\n', m_stdout.getvalue()) + self.assertEqual("status: running\n", m_stdout.getvalue()) def test_status_returns_done(self): - '''Report done results.json exists no stages are unfinished.''' - ensure_file(self.tmp_path('result.json', self.new_root)) + """Report done results.json exists no stages are unfinished.""" + ensure_file(self.tmp_path("result.json", self.new_root)) write_json( self.status_file, - {'v1': {'stage': None, # No current stage running - 'datasource': ( - 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' - '[dsmode=net]'), - 'blah': {'finished': 123.456}, - 'init': {'errors': [], 'start': 124.567, - 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) + { + "v1": { + "stage": None, # No current stage running + "datasource": ( + "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]" + "[dsmode=net]" + ), + "blah": {"finished": 123.456}, + "init": { + "errors": [], + "start": 124.567, + "finished": 125.678, + }, + "init-local": {"start": 123.45, "finished": 123.46}, + } + }, + ) cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) - self.assertEqual('status: done\n', m_stdout.getvalue()) + self.assertEqual("status: done\n", m_stdout.getvalue()) def test_status_returns_done_long(self): - '''Long format of done status includes datasource info.''' - ensure_file(self.tmp_path('result.json', self.new_root)) + """Long format of done status includes datasource info.""" + ensure_file(self.tmp_path("result.json", self.new_root)) write_json( self.status_file, - {'v1': {'stage': None, - 'datasource': ( - 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' - '[dsmode=net]'), - 'init': {'start': 124.567, 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) + { + "v1": { + "stage": None, + "datasource": ( + "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]" + "[dsmode=net]" + ), + "init": {"start": 124.567, "finished": 125.678}, + "init-local": {"start": 123.45, "finished": 123.46}, + } + }, + ) cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) - expected = dedent('''\ + expected = dedent( + """\ status: done time: Thu, 01 Jan 1970 00:02:05 +0000 detail: DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] - ''') + """ + ) self.assertEqual(expected, m_stdout.getvalue()) def test_status_on_errors(self): - '''Reports error when any stage has errors.''' + """Reports error when any stage has errors.""" write_json( self.status_file, - {'v1': {'stage': None, - 'blah': {'errors': [], 'finished': 123.456}, - 'init': {'errors': ['error1'], 'start': 124.567, - 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) + { + "v1": { + "stage": None, + "blah": {"errors": [], "finished": 123.456}, + "init": { + "errors": ["error1"], + "start": 124.567, + "finished": 125.678, + }, + "init-local": {"start": 123.45, "finished": 123.46}, + } + }, + ) cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(1, retcode) - self.assertEqual('status: error\n', m_stdout.getvalue()) + self.assertEqual("status: error\n", m_stdout.getvalue()) def test_status_on_errors_long(self): - '''Long format of error status includes all error messages.''' + """Long format of error status includes all error messages.""" write_json( self.status_file, - {'v1': {'stage': None, - 'datasource': ( - 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' - '[dsmode=net]'), - 'init': {'errors': ['error1'], 'start': 124.567, - 'finished': 125.678}, - 'init-local': {'errors': ['error2', 'error3'], - 'start': 123.45, 'finished': 123.46}}}) + { + "v1": { + "stage": None, + "datasource": ( + "DataSourceNoCloud [seed=/var/.../seed/nocloud-net]" + "[dsmode=net]" + ), + "init": { + "errors": ["error1"], + "start": 124.567, + "finished": 125.678, + }, + "init-local": { + "errors": ["error2", "error3"], + "start": 123.45, + "finished": 123.46, + }, + } + }, + ) cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(1, retcode) - expected = dedent('''\ + expected = dedent( + """\ status: error time: Thu, 01 Jan 1970 00:02:05 +0000 detail: error1 error2 error3 - ''') + """ + ) self.assertEqual(expected, m_stdout.getvalue()) def test_status_returns_running_long_format(self): - '''Long format reports the stage in which we are running.''' + """Long format reports the stage in which we are running.""" write_json( self.status_file, - {'v1': {'stage': 'init', - 'init': {'start': 124.456, 'finished': None}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) + { + "v1": { + "stage": "init", + "init": {"start": 124.456, "finished": None}, + "init-local": {"start": 123.45, "finished": 123.46}, + } + }, + ) cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) - expected = dedent('''\ + expected = dedent( + """\ status: running time: Thu, 01 Jan 1970 00:02:04 +0000 detail: Running in stage: init - ''') + """ + ) self.assertEqual(expected, m_stdout.getvalue()) def test_status_wait_blocks_until_done(self): - '''Specifying wait will poll every 1/4 second until done state.''' + """Specifying wait will poll every 1/4 second until done state.""" running_json = { - 'v1': {'stage': 'init', - 'init': {'start': 124.456, 'finished': None}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} + "v1": { + "stage": "init", + "init": {"start": 124.456, "finished": None}, + "init-local": {"start": 123.45, "finished": 123.46}, + } + } done_json = { - 'v1': {'stage': None, - 'init': {'start': 124.456, 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} + "v1": { + "stage": None, + "init": {"start": 124.456, "finished": 125.678}, + "init-local": {"start": 123.45, "finished": 123.46}, + } + } self.sleep_calls = 0 @@ -324,32 +457,46 @@ class TestStatus(CiTestCase): write_json(self.status_file, running_json) elif self.sleep_calls == 3: write_json(self.status_file, done_json) - result_file = self.tmp_path('result.json', self.new_root) + result_file = self.tmp_path("result.json", self.new_root) ensure_file(result_file) cmdargs = myargs(long=False, wait=True) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'sleep': {'side_effect': fake_sleep}, - '_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "sleep": {"side_effect": fake_sleep}, + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(0, retcode) self.assertEqual(4, self.sleep_calls) - self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) + self.assertEqual("....\nstatus: done\n", m_stdout.getvalue()) def test_status_wait_blocks_until_error(self): - '''Specifying wait will poll every 1/4 second until error state.''' + """Specifying wait will poll every 1/4 second until error state.""" running_json = { - 'v1': {'stage': 'init', - 'init': {'start': 124.456, 'finished': None}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} + "v1": { + "stage": "init", + "init": {"start": 124.456, "finished": None}, + "init-local": {"start": 123.45, "finished": 123.46}, + } + } error_json = { - 'v1': {'stage': None, - 'init': {'errors': ['error1'], 'start': 124.456, - 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} + "v1": { + "stage": None, + "init": { + "errors": ["error1"], + "start": 124.456, + "finished": 125.678, + }, + "init-local": {"start": 123.45, "finished": 123.46}, + } + } self.sleep_calls = 0 @@ -362,30 +509,40 @@ class TestStatus(CiTestCase): write_json(self.status_file, error_json) cmdargs = myargs(long=False, wait=True) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'sleep': {'side_effect': fake_sleep}, - '_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) + "cloudinit.cmd.status", + { + "sleep": {"side_effect": fake_sleep}, + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.handle_status_args, + "ignored", + cmdargs, + ) self.assertEqual(1, retcode) self.assertEqual(4, self.sleep_calls) - self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) + self.assertEqual("....\nstatus: error\n", m_stdout.getvalue()) def test_status_main(self): - '''status.main can be run as a standalone script.''' - write_json(self.status_file, - {'v1': {'init': {'start': 1, 'finished': None}}}) + """status.main can be run as a standalone script.""" + write_json( + self.status_file, {"v1": {"init": {"start": 1, "finished": None}}} + ) with self.assertRaises(SystemExit) as context_manager: - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: wrap_and_call( - 'cloudinit.cmd.status', - {'sys.argv': {'new': ['status']}, - '_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.main) + "cloudinit.cmd.status", + { + "sys.argv": {"new": ["status"]}, + "_is_cloudinit_disabled": (False, ""), + "Init": {"side_effect": self.init_class}, + }, + status.main, + ) self.assertEqual(0, context_manager.exception.code) - self.assertEqual('status: running\n', m_stdout.getvalue()) + self.assertEqual("status: running\n", m_stdout.getvalue()) + # vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/config/test_apt_conf_v1.py b/tests/unittests/config/test_apt_conf_v1.py index 98d99945..5a75cf0a 100644 --- a/tests/unittests/config/test_apt_conf_v1.py +++ b/tests/unittests/config/test_apt_conf_v1.py @@ -1,16 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_apt_configure -from cloudinit import util - -from tests.unittests.helpers import TestCase - import copy import os import re import shutil import tempfile +from cloudinit import util +from cloudinit.config import cc_apt_configure +from tests.unittests.helpers import TestCase + class TestAptProxyConfig(TestCase): def setUp(self): @@ -23,10 +22,12 @@ class TestAptProxyConfig(TestCase): def _search_apt_config(self, contents, ptype, value): return re.search( r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), - contents, flags=re.IGNORECASE) + contents, + flags=re.IGNORECASE, + ) def test_apt_proxy_written(self): - cfg = {'proxy': 'myproxy'} + cfg = {"proxy": "myproxy"} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) @@ -36,7 +37,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_http_proxy_written(self): - cfg = {'http_proxy': 'myproxy'} + cfg = {"http_proxy": "myproxy"} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) @@ -46,14 +47,17 @@ class TestAptProxyConfig(TestCase): self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_all_proxy_written(self): - cfg = {'http_proxy': 'myproxy_http_proxy', - 'https_proxy': 'myproxy_https_proxy', - 'ftp_proxy': 'myproxy_ftp_proxy'} - - values = {'http': cfg['http_proxy'], - 'https': cfg['https_proxy'], - 'ftp': cfg['ftp_proxy'], - } + cfg = { + "http_proxy": "myproxy_http_proxy", + "https_proxy": "myproxy_https_proxy", + "ftp_proxy": "myproxy_ftp_proxy", + } + + values = { + "http": cfg["http_proxy"], + "https": cfg["https_proxy"], + "ftp": cfg["ftp_proxy"], + } cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) @@ -73,15 +77,16 @@ class TestAptProxyConfig(TestCase): def test_proxy_replaced(self): util.write_file(self.cfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({'proxy': "foo"}, - self.pfile, self.cfile) + cc_apt_configure.apply_apt_config( + {"proxy": "foo"}, self.pfile, self.cfile + ) self.assertTrue(os.path.isfile(self.pfile)) contents = util.load_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "foo")) def test_config_written(self): - payload = 'this is my apt config' - cfg = {'conf': payload} + payload = "this is my apt config" + cfg = {"conf": payload} cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) @@ -92,8 +97,9 @@ class TestAptProxyConfig(TestCase): def test_config_replaced(self): util.write_file(self.pfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({'conf': "foo"}, - self.pfile, self.cfile) + cc_apt_configure.apply_apt_config( + {"conf": "foo"}, self.pfile, self.cfile + ) self.assertTrue(os.path.isfile(self.cfile)) self.assertEqual(util.load_file(self.cfile), "foo") @@ -109,21 +115,23 @@ class TestConversion(TestCase): def test_convert_with_apt_mirror_as_empty_string(self): # an empty apt_mirror is the same as no apt_mirror empty_m_found = cc_apt_configure.convert_to_v3_apt_format( - {'apt_mirror': ''}) + {"apt_mirror": ""} + ) default_found = cc_apt_configure.convert_to_v3_apt_format({}) self.assertEqual(default_found, empty_m_found) def test_convert_with_apt_mirror(self): - mirror = 'http://my.mirror/ubuntu' - f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror}) - self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary'])) + mirror = "http://my.mirror/ubuntu" + f = cc_apt_configure.convert_to_v3_apt_format({"apt_mirror": mirror}) + self.assertIn(mirror, set(m["uri"] for m in f["apt"]["primary"])) def test_no_old_content(self): - mirror = 'http://my.mirror/ubuntu' - mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}} + mirror = "http://my.mirror/ubuntu" + mydata = {"apt": {"primary": {"arches": ["default"], "uri": mirror}}} expected = copy.deepcopy(mydata) - self.assertEqual(expected, - cc_apt_configure.convert_to_v3_apt_format(mydata)) + self.assertEqual( + expected, cc_apt_configure.convert_to_v3_apt_format(mydata) + ) # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py index 4aeaea24..d4ade106 100644 --- a/tests/unittests/config/test_apt_configure_sources_list_v1.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py @@ -9,14 +9,9 @@ import shutil import tempfile from unittest import mock -from cloudinit import templater -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, templater, util from cloudinit.config import cc_apt_configure - from cloudinit.distros.debian import Distro - from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud @@ -41,8 +36,7 @@ apt_custom_sources_list: | # FIND_SOMETHING_SPECIAL """ -EXPECTED_CONVERTED_CONTENT = ( - """## Note, this file is written by cloud-init on first boot of an instance +EXPECTED_CONVERTED_CONTENT = """## Note, this file is written by cloud-init on first boot of an instance ## modifications made here will not survive a re-bundle. ## if you wish to make changes you can: ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg @@ -55,13 +49,14 @@ EXPECTED_CONVERTED_CONTENT = ( deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted # FIND_SOMETHING_SPECIAL -""") +""" class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): """TestAptSourceConfigSourceList Main Class to test sources list rendering """ + def setUp(self): super(TestAptSourceConfigSourceList, self).setUp() self.subp = subp.subp @@ -70,11 +65,11 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): rpatcher = mock.patch("cloudinit.util.lsb_release") get_rel = rpatcher.start() - get_rel.return_value = {'codename': "fakerelease"} + get_rel.return_value = {"codename": "fakerelease"} self.addCleanup(rpatcher.stop) apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") get_arch = apatcher.start() - get_arch.return_value = 'amd64' + get_arch.return_value = "amd64" self.addCleanup(apatcher.stop) def apt_source_list(self, distro, mirror, mirrorcheck=None): @@ -85,47 +80,57 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): mirrorcheck = mirror if isinstance(mirror, list): - cfg = {'apt_mirror_search': mirror} + cfg = {"apt_mirror_search": mirror} else: - cfg = {'apt_mirror': mirror} + cfg = {"apt_mirror": mirror} mycloud = get_cloud(distro) - with mock.patch.object(util, 'write_file') as mockwf: - with mock.patch.object(util, 'load_file', - return_value="faketmpl") as mocklf: - with mock.patch.object(os.path, 'isfile', - return_value=True) as mockisfile: + with mock.patch.object(util, "write_file") as mockwf: + with mock.patch.object( + util, "load_file", return_value="faketmpl" + ) as mocklf: + with mock.patch.object( + os.path, "isfile", return_value=True + ) as mockisfile: with mock.patch.object( - templater, 'render_string', - return_value='fake') as mockrnd: - with mock.patch.object(util, 'rename'): - cc_apt_configure.handle("test", cfg, mycloud, - LOG, None) + templater, "render_string", return_value="fake" + ) as mockrnd: + with mock.patch.object(util, "rename"): + cc_apt_configure.handle( + "test", cfg, mycloud, LOG, None + ) mockisfile.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) + "/etc/cloud/templates/sources.list.%s.tmpl" % distro + ) mocklf.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - mockrnd.assert_called_once_with('faketmpl', - {'RELEASE': 'fakerelease', - 'PRIMARY': mirrorcheck, - 'MIRROR': mirrorcheck, - 'SECURITY': mirrorcheck, - 'codename': 'fakerelease', - 'primary': mirrorcheck, - 'mirror': mirrorcheck, - 'security': mirrorcheck}) - mockwf.assert_called_once_with('/etc/apt/sources.list', 'fake', - mode=0o644) + "/etc/cloud/templates/sources.list.%s.tmpl" % distro + ) + mockrnd.assert_called_once_with( + "faketmpl", + { + "RELEASE": "fakerelease", + "PRIMARY": mirrorcheck, + "MIRROR": mirrorcheck, + "SECURITY": mirrorcheck, + "codename": "fakerelease", + "primary": mirrorcheck, + "mirror": mirrorcheck, + "security": mirrorcheck, + }, + ) + mockwf.assert_called_once_with( + "/etc/apt/sources.list", "fake", mode=0o644 + ) def test_apt_v1_source_list_debian(self): """Test rendering of a source.list from template for debian""" - self.apt_source_list('debian', 'http://httpredir.debian.org/debian') + self.apt_source_list("debian", "http://httpredir.debian.org/debian") def test_apt_v1_source_list_ubuntu(self): """Test rendering of a source.list from template for ubuntu""" - self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/') + self.apt_source_list("ubuntu", "http://archive.ubuntu.com/ubuntu/") @staticmethod def myresolve(name): @@ -139,23 +144,30 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v1_srcl_debian_mirrorfail(self): """Test rendering of a source.list from template for debian""" - with mock.patch.object(util, 'is_resolvable', - side_effect=self.myresolve) as mockresolve: - self.apt_source_list('debian', - ['http://does.not.exist', - 'http://httpredir.debian.org/debian'], - 'http://httpredir.debian.org/debian') + with mock.patch.object( + util, "is_resolvable", side_effect=self.myresolve + ) as mockresolve: + self.apt_source_list( + "debian", + [ + "http://does.not.exist", + "http://httpredir.debian.org/debian", + ], + "http://httpredir.debian.org/debian", + ) mockresolve.assert_any_call("does.not.exist") mockresolve.assert_any_call("httpredir.debian.org") def test_apt_v1_srcl_ubuntu_mirrorfail(self): """Test rendering of a source.list from template for ubuntu""" - with mock.patch.object(util, 'is_resolvable', - side_effect=self.myresolve) as mockresolve: - self.apt_source_list('ubuntu', - ['http://does.not.exist', - 'http://archive.ubuntu.com/ubuntu/'], - 'http://archive.ubuntu.com/ubuntu/') + with mock.patch.object( + util, "is_resolvable", side_effect=self.myresolve + ) as mockresolve: + self.apt_source_list( + "ubuntu", + ["http://does.not.exist", "http://archive.ubuntu.com/ubuntu/"], + "http://archive.ubuntu.com/ubuntu/", + ) mockresolve.assert_any_call("does.not.exist") mockresolve.assert_any_call("archive.ubuntu.com") @@ -165,17 +177,18 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): mycloud = get_cloud() # the second mock restores the original subp - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(subp, 'subp', self.subp): - with mock.patch.object(Distro, 'get_primary_arch', - return_value='amd64'): - cc_apt_configure.handle("notimportant", cfg, mycloud, - LOG, None) + with mock.patch.object(util, "write_file") as mockwrite: + with mock.patch.object(subp, "subp", self.subp): + with mock.patch.object( + Distro, "get_primary_arch", return_value="amd64" + ): + cc_apt_configure.handle( + "notimportant", cfg, mycloud, LOG, None + ) mockwrite.assert_called_once_with( - '/etc/apt/sources.list', - EXPECTED_CONVERTED_CONTENT, - mode=420) + "/etc/apt/sources.list", EXPECTED_CONVERTED_CONTENT, mode=420 + ) # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py index a8087bd1..d9ec6f74 100644 --- a/tests/unittests/config/test_apt_configure_sources_list_v3.py +++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py @@ -3,20 +3,18 @@ """ test_apt_custom_sources_list Test templating of custom sources list """ -from contextlib import ExitStack import logging import os import shutil import tempfile +from contextlib import ExitStack from unittest import mock from unittest.mock import call -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.config import cc_apt_configure from cloudinit.distros.debian import Distro from tests.unittests import helpers as t_help - from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -65,30 +63,31 @@ deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted """ -EXPECTED_BASE_CONTENT = (""" +EXPECTED_BASE_CONTENT = """ deb http://test.ubuntu.com/ubuntu/ notouched main restricted deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""") +""" -EXPECTED_MIRROR_CONTENT = (""" +EXPECTED_MIRROR_CONTENT = """ deb http://test.ubuntu.com/ubuntu/ notouched main restricted deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted -""") +""" -EXPECTED_PRIMSEC_CONTENT = (""" +EXPECTED_PRIMSEC_CONTENT = """ deb http://test.ubuntu.com/ubuntu/ notouched main restricted deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""") +""" class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): """TestAptSourceConfigSourceList - Class to test sources list rendering""" + def setUp(self): super(TestAptSourceConfigSourceList, self).setUp() self.subp = subp.subp @@ -97,33 +96,39 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): rpatcher = mock.patch("cloudinit.util.lsb_release") get_rel = rpatcher.start() - get_rel.return_value = {'codename': "fakerel"} + get_rel.return_value = {"codename": "fakerel"} self.addCleanup(rpatcher.stop) apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") get_arch = apatcher.start() - get_arch.return_value = 'amd64' + get_arch.return_value = "amd64" self.addCleanup(apatcher.stop) def _apt_source_list(self, distro, cfg, cfg_on_empty=False): """_apt_source_list - Test rendering from template (generic)""" # entry at top level now, wrap in 'apt' key - cfg = {'apt': cfg} + cfg = {"apt": cfg} mycloud = get_cloud(distro) with ExitStack() as stack: - mock_writefile = stack.enter_context(mock.patch.object( - util, 'write_file')) - mock_loadfile = stack.enter_context(mock.patch.object( - util, 'load_file', return_value=MOCKED_APT_SRC_LIST)) - mock_isfile = stack.enter_context(mock.patch.object( - os.path, 'isfile', return_value=True)) - stack.enter_context(mock.patch.object( - util, 'del_file')) - cfg_func = ('cloudinit.config.cc_apt_configure.' - '_should_configure_on_empty_apt') - mock_shouldcfg = stack.enter_context(mock.patch( - cfg_func, return_value=(cfg_on_empty, 'test') - )) + mock_writefile = stack.enter_context( + mock.patch.object(util, "write_file") + ) + mock_loadfile = stack.enter_context( + mock.patch.object( + util, "load_file", return_value=MOCKED_APT_SRC_LIST + ) + ) + mock_isfile = stack.enter_context( + mock.patch.object(os.path, "isfile", return_value=True) + ) + stack.enter_context(mock.patch.object(util, "del_file")) + cfg_func = ( + "cloudinit.config.cc_apt_configure." + "_should_configure_on_empty_apt" + ) + mock_shouldcfg = stack.enter_context( + mock.patch(cfg_func, return_value=(cfg_on_empty, "test")) + ) cc_apt_configure.handle("test", cfg, mycloud, LOG, None) return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg @@ -131,15 +136,20 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v3_source_list_debian(self): """test_apt_v3_source_list_debian - without custom sources or parms""" cfg = {} - distro = 'debian' + distro = "debian" expected = EXPECTED_BASE_CONTENT - mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) + ( + mock_writefile, + mock_load_file, + mock_isfile, + mock_shouldcfg, + ) = self._apt_source_list(distro, cfg, cfg_on_empty=True) + + template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro + mock_writefile.assert_called_once_with( + "/etc/apt/sources.list", expected, mode=0o644 + ) mock_load_file.assert_called_with(template) mock_isfile.assert_any_call(template) self.assertEqual(1, mock_shouldcfg.call_count) @@ -147,15 +157,20 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v3_source_list_ubuntu(self): """test_apt_v3_source_list_ubuntu - without custom sources or parms""" cfg = {} - distro = 'ubuntu' + distro = "ubuntu" expected = EXPECTED_BASE_CONTENT - mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) + ( + mock_writefile, + mock_load_file, + mock_isfile, + mock_shouldcfg, + ) = self._apt_source_list(distro, cfg, cfg_on_empty=True) + + template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro + mock_writefile.assert_called_once_with( + "/etc/apt/sources.list", expected, mode=0o644 + ) mock_load_file.assert_called_with(template) mock_isfile.assert_any_call(template) self.assertEqual(1, mock_shouldcfg.call_count) @@ -163,12 +178,13 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v3_source_list_ubuntu_snappy(self): """test_apt_v3_source_list_ubuntu_snappy - without custom sources or parms""" - cfg = {'apt': {}} + cfg = {"apt": {}} mycloud = get_cloud() - with mock.patch.object(util, 'write_file') as mock_writefile: - with mock.patch.object(util, 'system_is_snappy', - return_value=True) as mock_issnappy: + with mock.patch.object(util, "write_file") as mock_writefile: + with mock.patch.object( + util, "system_is_snappy", return_value=True + ) as mock_issnappy: cc_apt_configure.handle("test", cfg, mycloud, LOG, None) self.assertEqual(0, mock_writefile.call_count) @@ -177,7 +193,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v3_source_list_centos(self): """test_apt_v3_source_list_centos - without custom sources or parms""" cfg = {} - distro = 'rhel' + distro = "rhel" mock_writefile, _, _, _ = self._apt_source_list(distro, cfg) @@ -185,22 +201,24 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v3_source_list_psm(self): """test_apt_v3_source_list_psm - Test specifying prim+sec mirrors""" - pm = 'http://test.ubuntu.com/ubuntu/' - sm = 'http://testsec.ubuntu.com/ubuntu/' - cfg = {'preserve_sources_list': False, - 'primary': [{'arches': ["default"], - 'uri': pm}], - 'security': [{'arches': ["default"], - 'uri': sm}]} - distro = 'ubuntu' + pm = "http://test.ubuntu.com/ubuntu/" + sm = "http://testsec.ubuntu.com/ubuntu/" + cfg = { + "preserve_sources_list": False, + "primary": [{"arches": ["default"], "uri": pm}], + "security": [{"arches": ["default"], "uri": sm}], + } + distro = "ubuntu" expected = EXPECTED_PRIMSEC_CONTENT - mock_writefile, mock_load_file, mock_isfile, _ = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) + mock_writefile, mock_load_file, mock_isfile, _ = self._apt_source_list( + distro, cfg, cfg_on_empty=True + ) - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) + template = "/etc/cloud/templates/sources.list.%s.tmpl" % distro + mock_writefile.assert_called_once_with( + "/etc/apt/sources.list", expected, mode=0o644 + ) mock_load_file.assert_called_with(template) mock_isfile.assert_any_call(template) @@ -210,16 +228,20 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): mycloud = get_cloud() # the second mock restores the original subp - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(subp, 'subp', self.subp): - with mock.patch.object(Distro, 'get_primary_arch', - return_value='amd64'): - cc_apt_configure.handle("notimportant", cfg, mycloud, - LOG, None) - - calls = [call('/etc/apt/sources.list', - EXPECTED_CONVERTED_CONTENT, - mode=0o644)] + with mock.patch.object(util, "write_file") as mockwrite: + with mock.patch.object(subp, "subp", self.subp): + with mock.patch.object( + Distro, "get_primary_arch", return_value="amd64" + ): + cc_apt_configure.handle( + "notimportant", cfg, mycloud, LOG, None + ) + + calls = [ + call( + "/etc/apt/sources.list", EXPECTED_CONVERTED_CONTENT, mode=0o644 + ) + ] mockwrite.assert_has_calls(calls) diff --git a/tests/unittests/config/test_apt_key.py b/tests/unittests/config/test_apt_key.py index 00e5a38d..9fcf3039 100644 --- a/tests/unittests/config/test_apt_key.py +++ b/tests/unittests/config/test_apt_key.py @@ -1,11 +1,10 @@ import os from unittest import mock +from cloudinit import subp, util from cloudinit.config import cc_apt_configure -from cloudinit import subp -from cloudinit import util -TEST_KEY_HUMAN = ''' +TEST_KEY_HUMAN = """ /etc/apt/cloud-init.gpg.d/my_key.gpg -------------------------------------------- pub rsa4096 2021-10-22 [SC] @@ -13,9 +12,9 @@ pub rsa4096 2021-10-22 [SC] uid [ unknown] Brett Holman <brett.holman@canonical.com> sub rsa4096 2021-10-22 [A] sub rsa4096 2021-10-22 [E] -''' +""" -TEST_KEY_MACHINE = ''' +TEST_KEY_MACHINE = """ tru::1:1635129362:0:3:1:5 pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: @@ -25,113 +24,101 @@ sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: -''' +""" -TEST_KEY_FINGERPRINT_HUMAN = \ - '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' +TEST_KEY_FINGERPRINT_HUMAN = ( + "3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85" +) -TEST_KEY_FINGERPRINT_MACHINE = \ - '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' +TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85" class TestAptKey: """TestAptKey Class to test apt-key commands """ - @mock.patch.object(subp, 'subp', return_value=('fakekey', '')) - @mock.patch.object(util, 'write_file') + + @mock.patch.object(subp, "subp", return_value=("fakekey", "")) + @mock.patch.object(util, "write_file") def _apt_key_add_success_helper(self, directory, *args, hardened=False): file = cc_apt_configure.apt_key( - 'add', - output_file='my-key', - data='fakekey', - hardened=hardened) - assert file == directory + '/my-key.gpg' + "add", output_file="my-key", data="fakekey", hardened=hardened + ) + assert file == directory + "/my-key.gpg" def test_apt_key_add_success(self): - """Verify the correct directory path gets returned for unhardened case - """ - self._apt_key_add_success_helper('/etc/apt/trusted.gpg.d') + """Verify the right directory path gets returned for unhardened case""" + self._apt_key_add_success_helper("/etc/apt/trusted.gpg.d") def test_apt_key_add_success_hardened(self): - """Verify the correct directory path gets returned for hardened case - """ + """Verify the right directory path gets returned for hardened case""" self._apt_key_add_success_helper( - '/etc/apt/cloud-init.gpg.d', - hardened=True) + "/etc/apt/cloud-init.gpg.d", hardened=True + ) def test_apt_key_add_fail_no_file_name(self): - """Verify that null filename gets handled correctly - """ - file = cc_apt_configure.apt_key( - 'add', - output_file=None, - data='') - assert '/dev/null' == file + """Verify that null filename gets handled correctly""" + file = cc_apt_configure.apt_key("add", output_file=None, data="") + assert "/dev/null" == file def _apt_key_fail_helper(self): file = cc_apt_configure.apt_key( - 'add', - output_file='my-key', - data='fakekey') - assert file == '/dev/null' + "add", output_file="my-key", data="fakekey" + ) + assert file == "/dev/null" - @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) + @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError) def test_apt_key_add_fail_no_file_name_subproc(self, *args): - """Verify that bad key value gets handled correctly - """ + """Verify that bad key value gets handled correctly""" self._apt_key_fail_helper() @mock.patch.object( - subp, 'subp', side_effect=UnicodeDecodeError('test', b'', 1, 1, '')) + subp, "subp", side_effect=UnicodeDecodeError("test", b"", 1, 1, "") + ) def test_apt_key_add_fail_no_file_name_unicode(self, *args): - """Verify that bad key encoding gets handled correctly - """ + """Verify that bad key encoding gets handled correctly""" self._apt_key_fail_helper() def _apt_key_list_success_helper(self, finger, key, human_output=True): - @mock.patch.object(os, 'listdir', return_value=('/fake/dir/key.gpg',)) - @mock.patch.object(subp, 'subp', return_value=(key, '')) + @mock.patch.object(os, "listdir", return_value=("/fake/dir/key.gpg",)) + @mock.patch.object(subp, "subp", return_value=(key, "")) def mocked_list(*a): - keys = cc_apt_configure.apt_key('list', human_output) + keys = cc_apt_configure.apt_key("list", human_output) assert finger in keys + mocked_list() def test_apt_key_list_success_human(self): - """Verify expected key output, human - """ + """Verify expected key output, human""" self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_HUMAN, - TEST_KEY_HUMAN) + TEST_KEY_FINGERPRINT_HUMAN, TEST_KEY_HUMAN + ) def test_apt_key_list_success_machine(self): - """Verify expected key output, machine - """ + """Verify expected key output, machine""" self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_MACHINE, - TEST_KEY_MACHINE, human_output=False) + TEST_KEY_FINGERPRINT_MACHINE, TEST_KEY_MACHINE, human_output=False + ) - @mock.patch.object(os, 'listdir', return_value=()) - @mock.patch.object(subp, 'subp', return_value=('', '')) + @mock.patch.object(os, "listdir", return_value=()) + @mock.patch.object(subp, "subp", return_value=("", "")) def test_apt_key_list_fail_no_keys(self, *args): - """Ensure falsy output for no keys - """ - keys = cc_apt_configure.apt_key('list') + """Ensure falsy output for no keys""" + keys = cc_apt_configure.apt_key("list") assert not keys - @mock.patch.object(os, 'listdir', return_value=('file_not_gpg_key.txt')) - @mock.patch.object(subp, 'subp', return_value=('', '')) + @mock.patch.object(os, "listdir", return_value="file_not_gpg_key.txt") + @mock.patch.object(subp, "subp", return_value=("", "")) def test_apt_key_list_fail_no_keys_file(self, *args): """Ensure non-gpg file is not returned. apt-key used file extensions for this, so we do too """ - assert not cc_apt_configure.apt_key('list') + assert not cc_apt_configure.apt_key("list") - @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) - @mock.patch.object(os, 'listdir', return_value=('bad_gpg_key.gpg')) + @mock.patch.object(subp, "subp", side_effect=subp.ProcessExecutionError) + @mock.patch.object(os, "listdir", return_value="bad_gpg_key.gpg") def test_apt_key_list_fail_bad_key_file(self, *args): - """Ensure bad gpg key doesn't throw exeption. - """ - assert not cc_apt_configure.apt_key('list') + """Ensure bad gpg key doesn't throw exeption.""" + assert not cc_apt_configure.apt_key("list") diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py index 684c2495..fbc2bf45 100644 --- a/tests/unittests/config/test_apt_source_v1.py +++ b/tests/unittests/config/test_apt_source_v1.py @@ -6,18 +6,15 @@ This calls all things with v1 format to stress the conversion code on top of the actually tested code. """ import os +import pathlib import re import shutil import tempfile -import pathlib from unittest import mock from unittest.mock import call +from cloudinit import gpg, subp, util from cloudinit.config import cc_apt_configure -from cloudinit import gpg -from cloudinit import subp -from cloudinit import util - from tests.unittests.helpers import TestCase EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- @@ -39,6 +36,7 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w" class FakeDistro(object): """Fake Distro helper object""" + def update_package_sources(self): """Fake update_package_sources helper method""" return @@ -46,12 +44,14 @@ class FakeDistro(object): class FakeDatasource: """Fake Datasource helper object""" + def __init__(self): - self.region = 'region' + self.region = "region" class FakeCloud(object): """Fake Cloud helper object""" + def __init__(self): self.distro = FakeDistro() self.datasource = FakeDatasource() @@ -61,6 +61,7 @@ class TestAptSourceConfig(TestCase): """TestAptSourceConfig Main Class to test apt_source configs """ + release = "fantastic" def setUp(self): @@ -73,18 +74,19 @@ class TestAptSourceConfig(TestCase): self.join = os.path.join self.matcher = re.compile(ADD_APT_REPO_MATCH).search # mock fallback filename into writable tmp dir - self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/", - "cloud_config_sources.list") + self.fallbackfn = os.path.join( + self.tmp, "etc/apt/sources.list.d/", "cloud_config_sources.list" + ) self.fakecloud = FakeCloud() rpatcher = mock.patch("cloudinit.util.lsb_release") get_rel = rpatcher.start() - get_rel.return_value = {'codename': self.release} + get_rel.return_value = {"codename": self.release} self.addCleanup(rpatcher.stop) apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") get_arch = apatcher.start() - get_arch.return_value = 'amd64' + get_arch.return_value = "amd64" self.addCleanup(apatcher.stop) def _get_default_params(self): @@ -92,23 +94,27 @@ class TestAptSourceConfig(TestCase): Get the most basic default mrror and release info to be used in tests """ params = {} - params['RELEASE'] = self.release - params['MIRROR'] = "http://archive.ubuntu.com/ubuntu" + params["RELEASE"] = self.release + params["MIRROR"] = "http://archive.ubuntu.com/ubuntu" return params def wrapv1conf(self, cfg): params = self._get_default_params() # old v1 list format under old keys, but callabe to main handler # disable source.list rendering and set mirror to avoid other code - return {'apt_preserve_sources_list': True, - 'apt_mirror': params['MIRROR'], - 'apt_sources': cfg} + return { + "apt_preserve_sources_list": True, + "apt_mirror": params["MIRROR"], + "apt_sources": cfg, + } def myjoin(self, *args, **kwargs): """myjoin - redir into writable tmpdir""" - if (args[0] == "/etc/apt/sources.list.d/" and - args[1] == "cloud_config_sources.list" and - len(args) == 2): + if ( + args[0] == "/etc/apt/sources.list.d/" + and args[1] == "cloud_config_sources.list" + and len(args) == 2 + ): return self.join(self.tmp, args[0].lstrip("/"), args[1]) else: return self.join(*args, **kwargs) @@ -124,26 +130,43 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "karmic-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://archive.ubuntu.com/ubuntu", + "karmic-backports", + "main universe multiverse restricted", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_src_basic(self): """Test deb source string, overwrite mirror and filename""" - cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile} + cfg = { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " karmic-backports" + " main universe multiverse restricted" + ), + "filename": self.aptlistfile, + } self.apt_src_basic(self.aptlistfile, [cfg]) def test_apt_src_basic_dict(self): """Test deb source string, overwrite mirror and filename (dict)""" - cfg = {self.aptlistfile: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}} + cfg = { + self.aptlistfile: { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " karmic-backports" + " main universe multiverse restricted" + ) + } + } self.apt_src_basic(self.aptlistfile, cfg) def apt_src_basic_tri(self, cfg): @@ -156,56 +179,99 @@ class TestAptSourceConfig(TestCase): # extra verify on two extra files of this test contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "precise-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://archive.ubuntu.com/ubuntu", + "precise-backports", + "main universe multiverse restricted", + ), + contents, + flags=re.IGNORECASE, + ) + ) contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "lucid-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://archive.ubuntu.com/ubuntu", + "lucid-backports", + "main universe multiverse restricted", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_src_basic_tri(self): """Test Fix three deb source string with filenames""" - cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile} - cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile2} - cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile3} + cfg1 = { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " karmic-backports" + " main universe multiverse restricted" + ), + "filename": self.aptlistfile, + } + cfg2 = { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " precise-backports" + " main universe multiverse restricted" + ), + "filename": self.aptlistfile2, + } + cfg3 = { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " lucid-backports" + " main universe multiverse restricted" + ), + "filename": self.aptlistfile3, + } self.apt_src_basic_tri([cfg1, cfg2, cfg3]) def test_apt_src_basic_dict_tri(self): """Test Fix three deb source string with filenames (dict)""" - cfg = {self.aptlistfile: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}, - self.aptlistfile2: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted')}, - self.aptlistfile3: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted')}} + cfg = { + self.aptlistfile: { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " karmic-backports" + " main universe multiverse restricted" + ) + }, + self.aptlistfile2: { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " precise-backports" + " main universe multiverse restricted" + ) + }, + self.aptlistfile3: { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " lucid-backports" + " main universe multiverse restricted" + ) + }, + } self.apt_src_basic_tri(cfg) def test_apt_src_basic_nofn(self): """Test Fix three deb source string without filenames (dict)""" - cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + cfg = { + "source": ( + "deb http://archive.ubuntu.com/ubuntu" + " karmic-backports" + " main universe multiverse restricted" + ) + } + with mock.patch.object(os.path, "join", side_effect=self.myjoin): self.apt_src_basic(self.fallbackfn, [cfg]) def apt_src_replacement(self, filename, cfg): @@ -219,15 +285,21 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "multiverse"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_src_replace(self): """Test Autoreplacement of MIRROR and RELEASE in source specs""" - cfg = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} + cfg = { + "source": "deb $MIRROR $RELEASE multiverse", + "filename": self.aptlistfile, + } self.apt_src_replacement(self.aptlistfile, [cfg]) def apt_src_replace_tri(self, cfg): @@ -240,38 +312,56 @@ class TestAptSourceConfig(TestCase): # extra verify on two extra files of this test params = self._get_default_params() contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "main"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ("deb", params["MIRROR"], params["RELEASE"], "main"), + contents, + flags=re.IGNORECASE, + ) + ) contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "universe"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ("deb", params["MIRROR"], params["RELEASE"], "universe"), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_src_replace_tri(self): """Test triple Autoreplacement of MIRROR and RELEASE in source specs""" - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} + cfg1 = { + "source": "deb $MIRROR $RELEASE multiverse", + "filename": self.aptlistfile, + } + cfg2 = { + "source": "deb $MIRROR $RELEASE main", + "filename": self.aptlistfile2, + } + cfg3 = { + "source": "deb $MIRROR $RELEASE universe", + "filename": self.aptlistfile3, + } self.apt_src_replace_tri([cfg1, cfg2, cfg3]) def test_apt_src_replace_dict_tri(self): """Test triple Autoreplacement in source specs (dict)""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, - 'notused': {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} + cfg = { + self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}, + "notused": { + "source": "deb $MIRROR $RELEASE main", + "filename": self.aptlistfile2, + }, + self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"}, + } self.apt_src_replace_tri(cfg) def test_apt_src_replace_nofn(self): """Test Autoreplacement of MIRROR and RELEASE in source specs nofile""" - cfg = {'source': 'deb $MIRROR $RELEASE multiverse'} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + cfg = {"source": "deb $MIRROR $RELEASE multiverse"} + with mock.patch.object(os.path, "join", side_effect=self.myjoin): self.apt_src_replacement(self.fallbackfn, [cfg]) def apt_src_keyid(self, filename, cfg, keynum): @@ -280,12 +370,12 @@ class TestAptSourceConfig(TestCase): """ cfg = self.wrapv1conf(cfg) - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: + with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) # check if it added the right number of keys calls = [] - sources = cfg['apt']['sources'] + sources = cfg["apt"]["sources"] for src in sources: print(sources[src]) calls.append(call(sources[src], None)) @@ -295,68 +385,109 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "main", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_src_keyid(self): """Test specification of a source + keyid with filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77", - 'filename': self.aptlistfile} + cfg = { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "keyid": "03683F77", + "filename": self.aptlistfile, + } self.apt_src_keyid(self.aptlistfile, [cfg], 1) def test_apt_src_keyid_tri(self): """Test 3x specification of a source + keyid with filename being set""" - cfg1 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77", - 'filename': self.aptlistfile} - cfg2 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial universe'), - 'keyid': "03683F77", - 'filename': self.aptlistfile2} - cfg3 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial multiverse'), - 'keyid': "03683F77", - 'filename': self.aptlistfile3} + cfg1 = { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "keyid": "03683F77", + "filename": self.aptlistfile, + } + cfg2 = { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial universe" + ), + "keyid": "03683F77", + "filename": self.aptlistfile2, + } + cfg3 = { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial multiverse" + ), + "keyid": "03683F77", + "filename": self.aptlistfile3, + } self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3) contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "universe"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "universe", + ), + contents, + flags=re.IGNORECASE, + ) + ) contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "multiverse"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "multiverse", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_src_keyid_nofn(self): """Test specification of a source + keyid without filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77"} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + cfg = { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "keyid": "03683F77", + } + with mock.patch.object(os.path, "join", side_effect=self.myjoin): self.apt_src_keyid(self.fallbackfn, [cfg], 1) def apt_src_key(self, filename, cfg): @@ -365,11 +496,11 @@ class TestAptSourceConfig(TestCase): """ cfg = self.wrapv1conf([cfg]) - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: + with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) # check if it added the right amount of keys - sources = cfg['apt']['sources'] + sources = cfg["apt"]["sources"] calls = [] for src in sources: print(sources[src]) @@ -380,46 +511,63 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "main", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_src_key(self): """Test specification of a source + key with filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'key': "fakekey 4321", - 'filename': self.aptlistfile} + cfg = { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "key": "fakekey 4321", + "filename": self.aptlistfile, + } self.apt_src_key(self.aptlistfile, cfg) def test_apt_src_key_nofn(self): """Test specification of a source + key without filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'key': "fakekey 4321"} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + cfg = { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "key": "fakekey 4321", + } + with mock.patch.object(os.path, "join", side_effect=self.myjoin): self.apt_src_key(self.fallbackfn, cfg) def test_apt_src_keyonly(self): """Test specifying key without source""" - cfg = {'key': "fakekey 4242", - 'filename': self.aptlistfile} + cfg = {"key": "fakekey 4242", "filename": self.aptlistfile} cfg = self.wrapv1conf([cfg]) - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4242', - hardened=False),) + calls = ( + call( + "add", + output_file=pathlib.Path(self.aptlistfile).stem, + data="fakekey 4242", + hardened=False, + ), + ) mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only @@ -427,25 +575,25 @@ class TestAptSourceConfig(TestCase): def test_apt_src_keyidonly(self): """Test specification of a keyid without source""" - cfg = {'keyid': "03683F77", - 'filename': self.aptlistfile} + cfg = {"keyid": "03683F77", "filename": self.aptlistfile} cfg = self.wrapv1conf([cfg]) - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')): - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + with mock.patch.object( + subp, "subp", return_value=("fakekey 1212", "") + ): + with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: cc_apt_configure.handle( - "test", - cfg, - self.fakecloud, - None, - None) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 1212', - hardened=False),) + "test", cfg, self.fakecloud, None, None + ) + + calls = ( + call( + "add", + output_file=pathlib.Path(self.aptlistfile).stem, + data="fakekey 1212", + hardened=False, + ), + ) mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only @@ -457,20 +605,21 @@ class TestAptSourceConfig(TestCase): up to addition of the key (add_apt_key_raw mocked to keep the environment as is) """ - key = cfg['keyid'] - keyserver = cfg.get('keyserver', 'keyserver.ubuntu.com') + key = cfg["keyid"] + keyserver = cfg.get("keyserver", "keyserver.ubuntu.com") cfg = self.wrapv1conf([cfg]) - with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: - with mock.patch.object(gpg, 'getkeybyid', - return_value=expectedkey) as mockgetkey: - cc_apt_configure.handle("test", cfg, self.fakecloud, - None, None) + with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey: + with mock.patch.object( + gpg, "getkeybyid", return_value=expectedkey + ) as mockgetkey: + cc_apt_configure.handle( + "test", cfg, self.fakecloud, None, None + ) if is_hardened is not None: mockkey.assert_called_with( - expectedkey, - self.aptlistfile, - hardened=is_hardened) + expectedkey, self.aptlistfile, hardened=is_hardened + ) else: mockkey.assert_called_with(expectedkey, self.aptlistfile) mockgetkey.assert_called_with(key, keyserver) @@ -481,62 +630,77 @@ class TestAptSourceConfig(TestCase): def test_apt_src_keyid_real(self): """test_apt_src_keyid_real - Test keyid including key add""" keyid = "03683F77" - cfg = {'keyid': keyid, - 'filename': self.aptlistfile} + cfg = {"keyid": keyid, "filename": self.aptlistfile} self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_longkeyid_real(self): """test_apt_src_longkeyid_real - Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {'keyid': keyid, - 'filename': self.aptlistfile} + cfg = {"keyid": keyid, "filename": self.aptlistfile} self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_longkeyid_ks_real(self): """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {'keyid': keyid, - 'keyserver': 'keys.gnupg.net', - 'filename': self.aptlistfile} + cfg = { + "keyid": keyid, + "keyserver": "keys.gnupg.net", + "filename": self.aptlistfile, + } self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_ppa(self): """Test adding a ppa""" - cfg = {'source': 'ppa:smoser/cloud-init-test', - 'filename': self.aptlistfile} + cfg = { + "source": "ppa:smoser/cloud-init-test", + "filename": self.aptlistfile, + } cfg = self.wrapv1conf([cfg]) - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(subp, "subp") as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - mockobj.assert_called_once_with(['add-apt-repository', - 'ppa:smoser/cloud-init-test'], - target=None) + mockobj.assert_called_once_with( + ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=None + ) # adding ppa should ignore filename (uses add-apt-repository) self.assertFalse(os.path.isfile(self.aptlistfile)) def test_apt_src_ppa_tri(self): """Test adding three ppa's""" - cfg1 = {'source': 'ppa:smoser/cloud-init-test', - 'filename': self.aptlistfile} - cfg2 = {'source': 'ppa:smoser/cloud-init-test2', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'ppa:smoser/cloud-init-test3', - 'filename': self.aptlistfile3} + cfg1 = { + "source": "ppa:smoser/cloud-init-test", + "filename": self.aptlistfile, + } + cfg2 = { + "source": "ppa:smoser/cloud-init-test2", + "filename": self.aptlistfile2, + } + cfg3 = { + "source": "ppa:smoser/cloud-init-test3", + "filename": self.aptlistfile3, + } cfg = self.wrapv1conf([cfg1, cfg2, cfg3]) - with mock.patch.object(subp, 'subp') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, - None, None) - calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], - target=None), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], - target=None), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], - target=None)] + with mock.patch.object(subp, "subp") as mockobj: + cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) + calls = [ + call( + ["add-apt-repository", "ppa:smoser/cloud-init-test"], + target=None, + ), + call( + ["add-apt-repository", "ppa:smoser/cloud-init-test2"], + target=None, + ), + call( + ["add-apt-repository", "ppa:smoser/cloud-init-test3"], + target=None, + ), + ] mockobj.assert_has_calls(calls, any_order=True) # adding ppa should ignore all filenames (uses add-apt-repository) @@ -546,43 +710,59 @@ class TestAptSourceConfig(TestCase): def test_convert_to_new_format(self): """Test the conversion of old to new format""" - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - cfg = {'apt_sources': [cfg1, cfg2, cfg3]} - checkcfg = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} + cfg1 = { + "source": "deb $MIRROR $RELEASE multiverse", + "filename": self.aptlistfile, + } + cfg2 = { + "source": "deb $MIRROR $RELEASE main", + "filename": self.aptlistfile2, + } + cfg3 = { + "source": "deb $MIRROR $RELEASE universe", + "filename": self.aptlistfile3, + } + cfg = {"apt_sources": [cfg1, cfg2, cfg3]} + checkcfg = { + self.aptlistfile: { + "filename": self.aptlistfile, + "source": "deb $MIRROR $RELEASE multiverse", + }, + self.aptlistfile2: { + "filename": self.aptlistfile2, + "source": "deb $MIRROR $RELEASE main", + }, + self.aptlistfile3: { + "filename": self.aptlistfile3, + "source": "deb $MIRROR $RELEASE universe", + }, + } newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg) - self.assertEqual(newcfg['apt']['sources'], checkcfg) + self.assertEqual(newcfg["apt"]["sources"], checkcfg) # convert again, should stay the same newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg) - self.assertEqual(newcfg2['apt']['sources'], checkcfg) + self.assertEqual(newcfg2["apt"]["sources"], checkcfg) # should work without raising an exception cc_apt_configure.convert_to_v3_apt_format({}) with self.assertRaises(ValueError): - cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5}) + cc_apt_configure.convert_to_v3_apt_format({"apt_sources": 5}) def test_convert_to_new_format_collision(self): """Test the conversion of old to new format with collisions - That matches e.g. the MAAS case specifying old and new config""" - cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, - 'apt_proxy': 'http://192.168.122.1:8000/'} - cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}} - cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, - 'apt_proxy': 'ftp://192.168.122.1:8000/'} + That matches e.g. the MAAS case specifying old and new config""" + cfg_1_and_3 = { + "apt": {"proxy": "http://192.168.122.1:8000/"}, + "apt_proxy": "http://192.168.122.1:8000/", + } + cfg_3_only = {"apt": {"proxy": "http://192.168.122.1:8000/"}} + cfgconflict = { + "apt": {"proxy": "http://192.168.122.1:8000/"}, + "apt_proxy": "ftp://192.168.122.1:8000/", + } # collision (equal) newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) @@ -596,22 +776,34 @@ class TestAptSourceConfig(TestCase): cc_apt_configure.convert_to_v3_apt_format(cfgconflict) def test_convert_to_new_format_dict_collision(self): - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - fullv3 = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - cfg_3_only = {'apt': {'sources': fullv3}} - cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]} + cfg1 = { + "source": "deb $MIRROR $RELEASE multiverse", + "filename": self.aptlistfile, + } + cfg2 = { + "source": "deb $MIRROR $RELEASE main", + "filename": self.aptlistfile2, + } + cfg3 = { + "source": "deb $MIRROR $RELEASE universe", + "filename": self.aptlistfile3, + } + fullv3 = { + self.aptlistfile: { + "filename": self.aptlistfile, + "source": "deb $MIRROR $RELEASE multiverse", + }, + self.aptlistfile2: { + "filename": self.aptlistfile2, + "source": "deb $MIRROR $RELEASE main", + }, + self.aptlistfile3: { + "filename": self.aptlistfile3, + "source": "deb $MIRROR $RELEASE universe", + }, + } + cfg_3_only = {"apt": {"sources": fullv3}} + cfg_1_and_3 = {"apt_sources": [cfg1, cfg2, cfg3]} cfg_1_and_3.update(cfg_3_only) # collision (equal, so ok to remove) @@ -621,27 +813,36 @@ class TestAptSourceConfig(TestCase): newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) self.assertEqual(newcfg, cfg_3_only) - diff = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'DIFFERENTVERSE'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - cfg_3_only = {'apt': {'sources': diff}} - cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]} + diff = { + self.aptlistfile: { + "filename": self.aptlistfile, + "source": "deb $MIRROR $RELEASE DIFFERENTVERSE", + }, + self.aptlistfile2: { + "filename": self.aptlistfile2, + "source": "deb $MIRROR $RELEASE main", + }, + self.aptlistfile3: { + "filename": self.aptlistfile3, + "source": "deb $MIRROR $RELEASE universe", + }, + } + cfg_3_only = {"apt": {"sources": diff}} + cfg_1_and_3_different = {"apt_sources": [cfg1, cfg2, cfg3]} cfg_1_and_3_different.update(cfg_3_only) # collision (unequal by dict having a different entry) with self.assertRaises(ValueError): cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different) - missing = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}} - cfg_3_only = {'apt': {'sources': missing}} - cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]} + missing = { + self.aptlistfile: { + "filename": self.aptlistfile, + "source": "deb $MIRROR $RELEASE multiverse", + } + } + cfg_3_only = {"apt": {"sources": missing}} + cfg_1_and_3_missing = {"apt_sources": [cfg1, cfg2, cfg3]} cfg_1_and_3_missing.update(cfg_3_only) # collision (unequal by dict missing an entry) with self.assertRaises(ValueError): diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py index 0b78037e..75adc647 100644 --- a/tests/unittests/config/test_apt_source_v3.py +++ b/tests/unittests/config/test_apt_source_v3.py @@ -6,21 +6,17 @@ This tries to call all in the new v3 format and cares about new features """ import glob import os +import pathlib import re import shutil import socket import tempfile -import pathlib - from unittest import TestCase, mock from unittest.mock import call -from cloudinit import gpg -from cloudinit import subp -from cloudinit import util +from cloudinit import gpg, subp, util from cloudinit.config import cc_apt_configure from tests.unittests import helpers as t_help - from tests.unittests.util import get_cloud EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- @@ -42,18 +38,23 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w" TARGET = None MOCK_LSB_RELEASE_DATA = { - 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS', - 'release': '18.04', 'codename': 'bionic'} + "id": "Ubuntu", + "description": "Ubuntu 18.04.1 LTS", + "release": "18.04", + "codename": "bionic", +} class FakeDatasource: """Fake Datasource helper object""" + def __init__(self): - self.region = 'region' + self.region = "region" class FakeCloud: """Fake Cloud helper object""" + def __init__(self): self.datasource = FakeDatasource() @@ -62,6 +63,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """TestAptSourceConfig Main Class to test apt configs """ + def setUp(self): super(TestAptSourceConfig, self).setUp() self.tmp = tempfile.mkdtemp() @@ -74,12 +76,14 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.join = os.path.join self.matcher = re.compile(ADD_APT_REPO_MATCH).search self.add_patch( - 'cloudinit.config.cc_apt_configure.util.lsb_release', - 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy()) + "cloudinit.config.cc_apt_configure.util.lsb_release", + "m_lsb_release", + return_value=MOCK_LSB_RELEASE_DATA.copy(), + ) @staticmethod def _add_apt_sources(*args, **kwargs): - with mock.patch.object(cc_apt_configure, 'update_packages'): + with mock.patch.object(cc_apt_configure, "update_packages"): cc_apt_configure.add_apt_sources(*args, **kwargs) @staticmethod @@ -88,17 +92,20 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): Get the most basic default mrror and release info to be used in tests """ params = {} - params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release'] - arch = 'amd64' - params['MIRROR'] = cc_apt_configure.\ - get_default_mirrors(arch)["PRIMARY"] + params["RELEASE"] = MOCK_LSB_RELEASE_DATA["release"] + arch = "amd64" + params["MIRROR"] = cc_apt_configure.get_default_mirrors(arch)[ + "PRIMARY" + ] return params def _myjoin(self, *args, **kwargs): """_myjoin - redir into writable tmpdir""" - if (args[0] == "/etc/apt/sources.list.d/" and - args[1] == "cloud_config_sources.list" and - len(args) == 2): + if ( + args[0] == "/etc/apt/sources.list.d/" + and args[1] == "cloud_config_sources.list" + and len(args) == 2 + ): return self.join(self.tmp, args[0].lstrip("/"), args[1]) else: return self.join(*args, **kwargs) @@ -109,81 +116,131 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """ params = self._get_default_params() - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) + self._add_apt_sources( + cfg, TARGET, template_params=params, aa_repo_match=self.matcher + ) self.assertTrue(os.path.isfile(filename)) contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "karmic-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://test.ubuntu.com/ubuntu", + "karmic-backports", + "main universe multiverse restricted", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_v3_src_basic(self): """test_apt_v3_src_basic - Test fix deb source string""" - cfg = {self.aptlistfile: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}} + cfg = { + self.aptlistfile: { + "source": ( + "deb http://test.ubuntu.com/ubuntu" + " karmic-backports" + " main universe multiverse restricted" + ) + } + } self._apt_src_basic(self.aptlistfile, cfg) def test_apt_v3_src_basic_tri(self): """test_apt_v3_src_basic_tri - Test multiple fix deb source strings""" - cfg = {self.aptlistfile: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}, - self.aptlistfile2: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted')}, - self.aptlistfile3: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted')}} + cfg = { + self.aptlistfile: { + "source": ( + "deb http://test.ubuntu.com/ubuntu" + " karmic-backports" + " main universe multiverse restricted" + ) + }, + self.aptlistfile2: { + "source": ( + "deb http://test.ubuntu.com/ubuntu" + " precise-backports" + " main universe multiverse restricted" + ) + }, + self.aptlistfile3: { + "source": ( + "deb http://test.ubuntu.com/ubuntu" + " lucid-backports" + " main universe multiverse restricted" + ) + }, + } self._apt_src_basic(self.aptlistfile, cfg) # extra verify on two extra files of this test contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "precise-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://test.ubuntu.com/ubuntu", + "precise-backports", + "main universe multiverse restricted", + ), + contents, + flags=re.IGNORECASE, + ) + ) contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "lucid-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://test.ubuntu.com/ubuntu", + "lucid-backports", + "main universe multiverse restricted", + ), + contents, + flags=re.IGNORECASE, + ) + ) def _apt_src_replacement(self, filename, cfg): """apt_src_replace Test Autoreplacement of MIRROR and RELEASE in source specs """ params = self._get_default_params() - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) + self._add_apt_sources( + cfg, TARGET, template_params=params, aa_repo_match=self.matcher + ) self.assertTrue(os.path.isfile(filename)) contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "multiverse"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ("deb", params["MIRROR"], params["RELEASE"], "multiverse"), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_v3_src_replace(self): """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}} + cfg = {self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}} self._apt_src_replacement(self.aptlistfile, cfg) def test_apt_v3_src_replace_fn(self): """test_apt_v3_src_replace_fn - Test filename overwritten in dict""" - cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile}} + cfg = { + "ignored": { + "source": "deb $MIRROR $RELEASE multiverse", + "filename": self.aptlistfile, + } + } # second file should overwrite the dict key self._apt_src_replacement(self.aptlistfile, cfg) @@ -197,22 +254,34 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): # extra verify on two extra files of this test params = self._get_default_params() contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "main"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ("deb", params["MIRROR"], params["RELEASE"], "main"), + contents, + flags=re.IGNORECASE, + ) + ) contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "universe"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ("deb", params["MIRROR"], params["RELEASE"], "universe"), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_v3_src_replace_tri(self): """test_apt_v3_src_replace_tri - Test multiple replace/overwrites""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, - 'notused': {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} + cfg = { + self.aptlistfile: {"source": "deb $MIRROR $RELEASE multiverse"}, + "notused": { + "source": "deb $MIRROR $RELEASE main", + "filename": self.aptlistfile2, + }, + self.aptlistfile3: {"source": "deb $MIRROR $RELEASE universe"}, + } self._apt_src_replace_tri(cfg) def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None): @@ -221,9 +290,10 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """ params = self._get_default_params() - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) + with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj: + self._add_apt_sources( + cfg, TARGET, template_params=params, aa_repo_match=self.matcher + ) # check if it added the right number of keys calls = [] @@ -238,103 +308,165 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(filename)) contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "main", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_v3_src_keyid(self): """test_apt_v3_src_keyid - Test source + keyid with filename""" - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'filename': self.aptlistfile, - 'keyid': "03683F77"}} + cfg = { + self.aptlistfile: { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "filename": self.aptlistfile, + "keyid": "03683F77", + } + } self._apt_src_keyid(self.aptlistfile, cfg, 1) def test_apt_v3_src_keyid_tri(self): """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes""" - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77"}, - 'ignored': {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial universe'), - 'keyid': "03683F77", - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial multiverse'), - 'filename': self.aptlistfile3, - 'keyid': "03683F77"}} + cfg = { + self.aptlistfile: { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "keyid": "03683F77", + }, + "ignored": { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial universe" + ), + "keyid": "03683F77", + "filename": self.aptlistfile2, + }, + self.aptlistfile3: { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial multiverse" + ), + "filename": self.aptlistfile3, + "keyid": "03683F77", + }, + } self._apt_src_keyid(self.aptlistfile, cfg, 3) contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "universe"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "universe", + ), + contents, + flags=re.IGNORECASE, + ) + ) contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "multiverse"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "multiverse", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_v3_src_key(self): """test_apt_v3_src_key - Test source + key""" params = self._get_default_params() - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'filename': self.aptlistfile, - 'key': "fakekey 4321"}} - - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4321', - hardened=False),) + cfg = { + self.aptlistfile: { + "source": ( + "deb " + "http://ppa.launchpad.net/" + "smoser/cloud-init-test/ubuntu" + " xenial main" + ), + "filename": self.aptlistfile, + "key": "fakekey 4321", + } + } + + with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: + self._add_apt_sources( + cfg, TARGET, template_params=params, aa_repo_match=self.matcher + ) + + calls = ( + call( + "add", + output_file=pathlib.Path(self.aptlistfile).stem, + data="fakekey 4321", + hardened=False, + ), + ) mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(self.aptlistfile)) contents = util.load_file(self.aptlistfile) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) + self.assertTrue( + re.search( + r"%s %s %s %s\n" + % ( + "deb", + "http://ppa.launchpad.net/smoser/cloud-init-test/ubuntu", + "xenial", + "main", + ), + contents, + flags=re.IGNORECASE, + ) + ) def test_apt_v3_src_keyonly(self): """test_apt_v3_src_keyonly - Test key without source""" params = self._get_default_params() - cfg = {self.aptlistfile: {'key': "fakekey 4242"}} - - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4242', - hardened=False),) + cfg = {self.aptlistfile: {"key": "fakekey 4242"}} + + with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: + self._add_apt_sources( + cfg, TARGET, template_params=params, aa_repo_match=self.matcher + ) + + calls = ( + call( + "add", + output_file=pathlib.Path(self.aptlistfile).stem, + data="fakekey 4242", + hardened=False, + ), + ) mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only @@ -343,18 +475,26 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): def test_apt_v3_src_keyidonly(self): """test_apt_v3_src_keyidonly - Test keyid without source""" params = self._get_default_params() - cfg = {self.aptlistfile: {'keyid': "03683F77"}} - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')): - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 1212', - hardened=False),) + cfg = {self.aptlistfile: {"keyid": "03683F77"}} + with mock.patch.object( + subp, "subp", return_value=("fakekey 1212", "") + ): + with mock.patch.object(cc_apt_configure, "apt_key") as mockobj: + self._add_apt_sources( + cfg, + TARGET, + template_params=params, + aa_repo_match=self.matcher, + ) + + calls = ( + call( + "add", + output_file=pathlib.Path(self.aptlistfile).stem, + data="fakekey 1212", + hardened=False, + ), + ) mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only @@ -368,21 +508,25 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """ params = self._get_default_params() - with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: - with mock.patch.object(gpg, 'getkeybyid', - return_value=expectedkey) as mockgetkey: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) + with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockkey: + with mock.patch.object( + gpg, "getkeybyid", return_value=expectedkey + ) as mockgetkey: + self._add_apt_sources( + cfg, + TARGET, + template_params=params, + aa_repo_match=self.matcher, + ) keycfg = cfg[self.aptlistfile] - mockgetkey.assert_called_with(keycfg['keyid'], - keycfg.get('keyserver', - 'keyserver.ubuntu.com')) + mockgetkey.assert_called_with( + keycfg["keyid"], keycfg.get("keyserver", "keyserver.ubuntu.com") + ) if is_hardened is not None: mockkey.assert_called_with( - expectedkey, - keycfg['keyfile'], - hardened=is_hardened) + expectedkey, keycfg["keyfile"], hardened=is_hardened + ) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -390,25 +534,27 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): def test_apt_v3_src_keyid_real(self): """test_apt_v3_src_keyid_real - Test keyid including key add""" keyid = "03683F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile}} + cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_v3_src_longkeyid_real(self): """test_apt_v3_src_longkeyid_real Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile}} + cfg = {self.aptlistfile: {"keyid": keyid, "keyfile": self.aptlistfile}} self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_v3_src_longkeyid_ks_real(self): """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile, - 'keyserver': 'keys.gnupg.net'}} + cfg = { + self.aptlistfile: { + "keyid": keyid, + "keyfile": self.aptlistfile, + "keyserver": "keys.gnupg.net", + } + } self.apt_src_keyid_real(cfg, EXPECTEDKEY) @@ -416,21 +562,31 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """test_apt_v3_src_keyid_keyserver - Test custom keyserver""" keyid = "03683F77" params = self._get_default_params() - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile, - 'keyserver': 'test.random.com'}} + cfg = { + self.aptlistfile: { + "keyid": keyid, + "keyfile": self.aptlistfile, + "keyserver": "test.random.com", + } + } # in some test environments only *.ubuntu.com is reachable # so mock the call and check if the config got there - with mock.patch.object(gpg, 'getkeybyid', - return_value="fakekey") as mockgetkey: - with mock.patch.object(cc_apt_configure, - 'add_apt_key_raw') as mockadd: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - mockgetkey.assert_called_with('03683F77', 'test.random.com') - mockadd.assert_called_with('fakekey', self.aptlistfile, hardened=False) + with mock.patch.object( + gpg, "getkeybyid", return_value="fakekey" + ) as mockgetkey: + with mock.patch.object( + cc_apt_configure, "add_apt_key_raw" + ) as mockadd: + self._add_apt_sources( + cfg, + TARGET, + template_params=params, + aa_repo_match=self.matcher, + ) + + mockgetkey.assert_called_with("03683F77", "test.random.com") + mockadd.assert_called_with("fakekey", self.aptlistfile, hardened=False) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -438,13 +594,15 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): def test_apt_v3_src_ppa(self): """test_apt_v3_src_ppa - Test specification of a ppa""" params = self._get_default_params() - cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}} + cfg = {self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"}} with mock.patch("cloudinit.subp.subp") as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - mockobj.assert_any_call(['add-apt-repository', - 'ppa:smoser/cloud-init-test'], target=TARGET) + self._add_apt_sources( + cfg, TARGET, template_params=params, aa_repo_match=self.matcher + ) + mockobj.assert_any_call( + ["add-apt-repository", "ppa:smoser/cloud-init-test"], target=TARGET + ) # adding ppa should ignore filename (uses add-apt-repository) self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -452,19 +610,30 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): def test_apt_v3_src_ppa_tri(self): """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's""" params = self._get_default_params() - cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}, - self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'}, - self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}} + cfg = { + self.aptlistfile: {"source": "ppa:smoser/cloud-init-test"}, + self.aptlistfile2: {"source": "ppa:smoser/cloud-init-test2"}, + self.aptlistfile3: {"source": "ppa:smoser/cloud-init-test3"}, + } with mock.patch("cloudinit.subp.subp") as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], - target=TARGET), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], - target=TARGET), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], - target=TARGET)] + self._add_apt_sources( + cfg, TARGET, template_params=params, aa_repo_match=self.matcher + ) + calls = [ + call( + ["add-apt-repository", "ppa:smoser/cloud-init-test"], + target=TARGET, + ), + call( + ["add-apt-repository", "ppa:smoser/cloud-init-test2"], + target=TARGET, + ), + call( + ["add-apt-repository", "ppa:smoser/cloud-init-test3"], + target=TARGET, + ), + ] mockobj.assert_has_calls(calls, any_order=True) # adding ppa should ignore all filenames (uses add-apt-repository) @@ -478,34 +647,46 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): pre = "/var/lib/apt/lists" # filenames are archive dependent - arch = 's390x' + arch = "s390x" m_get_dpkg_architecture.return_value = arch component = "ubuntu-ports" archive = "ports.ubuntu.com" - cfg = {'primary': [{'arches': ["default"], - 'uri': - 'http://test.ubuntu.com/%s/' % component}], - 'security': [{'arches': ["default"], - 'uri': - 'http://testsec.ubuntu.com/%s/' % component}]} - post = ("%s_dists_%s-updates_InRelease" % - (component, MOCK_LSB_RELEASE_DATA['codename'])) - fromfn = ("%s/%s_%s" % (pre, archive, post)) - tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) + cfg = { + "primary": [ + { + "arches": ["default"], + "uri": "http://test.ubuntu.com/%s/" % component, + } + ], + "security": [ + { + "arches": ["default"], + "uri": "http://testsec.ubuntu.com/%s/" % component, + } + ], + } + post = "%s_dists_%s-updates_InRelease" % ( + component, + MOCK_LSB_RELEASE_DATA["codename"], + ) + fromfn = "%s/%s_%s" % (pre, archive, post) + tofn = "%s/test.ubuntu.com_%s" % (pre, post) mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) - self.assertEqual(mirrors['MIRROR'], - "http://test.ubuntu.com/%s/" % component) - self.assertEqual(mirrors['PRIMARY'], - "http://test.ubuntu.com/%s/" % component) - self.assertEqual(mirrors['SECURITY'], - "http://testsec.ubuntu.com/%s/" % component) + self.assertEqual( + mirrors["MIRROR"], "http://test.ubuntu.com/%s/" % component + ) + self.assertEqual( + mirrors["PRIMARY"], "http://test.ubuntu.com/%s/" % component + ) + self.assertEqual( + mirrors["SECURITY"], "http://testsec.ubuntu.com/%s/" % component + ) - with mock.patch.object(os, 'rename') as mockren: - with mock.patch.object(glob, 'glob', - return_value=[fromfn]): + with mock.patch.object(os, "rename") as mockren: + with mock.patch.object(glob, "glob", return_value=[fromfn]): cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch) mockren.assert_any_call(fromfn, tofn) @@ -515,13 +696,13 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): target = os.path.join(self.tmp, "rename_non_slash") apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS) - arch = 'amd64' + arch = "amd64" m_get_dpkg_architecture.return_value = arch mirror_path = "some/random/path/" primary = "http://test.ubuntu.com/" + mirror_path security = "http://test-security.ubuntu.com/" + mirror_path - mirrors = {'PRIMARY': primary, 'SECURITY': security} + mirrors = {"PRIMARY": primary, "SECURITY": security} # these match default archive prefixes opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial" @@ -559,203 +740,226 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): @staticmethod def test_apt_v3_proxy(): """test_apt_v3_proxy - Test apt_*proxy configuration""" - cfg = {"proxy": "foobar1", - "http_proxy": "foobar2", - "ftp_proxy": "foobar3", - "https_proxy": "foobar4"} + cfg = { + "proxy": "foobar1", + "http_proxy": "foobar2", + "ftp_proxy": "foobar3", + "https_proxy": "foobar4", + } - with mock.patch.object(util, 'write_file') as mockobj: + with mock.patch.object(util, "write_file") as mockobj: cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused") - mockobj.assert_called_with('proxyfn', - ('Acquire::http::Proxy "foobar1";\n' - 'Acquire::http::Proxy "foobar2";\n' - 'Acquire::ftp::Proxy "foobar3";\n' - 'Acquire::https::Proxy "foobar4";\n')) + mockobj.assert_called_with( + "proxyfn", + 'Acquire::http::Proxy "foobar1";\n' + 'Acquire::http::Proxy "foobar2";\n' + 'Acquire::ftp::Proxy "foobar3";\n' + 'Acquire::https::Proxy "foobar4";\n', + ) def test_apt_v3_mirror(self): """test_apt_v3_mirror - Test defining a mirror""" pmir = "http://us.archive.ubuntu.com/ubuntu/" smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir}], - "security": [{'arches': ["default"], - "uri": smir}]} + cfg = { + "primary": [{"arches": ["default"], "uri": pmir}], + "security": [{"arches": ["default"], "uri": smir}], + } mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), 'amd64') + cfg, FakeCloud(), "amd64" + ) - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) def test_apt_v3_mirror_default(self): """test_apt_v3_mirror_default - Test without defining a mirror""" - arch = 'amd64' + arch = "amd64" default_mirrors = cc_apt_configure.get_default_mirrors(arch) pmir = default_mirrors["PRIMARY"] smir = default_mirrors["SECURITY"] mycloud = get_cloud() mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch) - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) def test_apt_v3_mirror_arches(self): """test_apt_v3_mirror_arches - Test arches selection of mirror""" pmir = "http://my-primary.ubuntu.com/ubuntu/" smir = "http://my-security.ubuntu.com/ubuntu/" - arch = 'ppc64el' - cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"}, - {'arches': [arch], "uri": pmir}], - "security": [{'arches': ["default"], "uri": "nothis-security"}, - {'arches': [arch], "uri": smir}]} + arch = "ppc64el" + cfg = { + "primary": [ + {"arches": ["default"], "uri": "notthis-primary"}, + {"arches": [arch], "uri": pmir}, + ], + "security": [ + {"arches": ["default"], "uri": "nothis-security"}, + {"arches": [arch], "uri": smir}, + ], + } mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) - self.assertEqual(mirrors['PRIMARY'], pmir) - self.assertEqual(mirrors['MIRROR'], pmir) - self.assertEqual(mirrors['SECURITY'], smir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) def test_apt_v3_mirror_arches_default(self): """test_apt_v3_mirror_arches - Test falling back to default arch""" pmir = "http://us.archive.ubuntu.com/ubuntu/" smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir}, - {'arches': ["thisarchdoesntexist"], - "uri": "notthis"}], - "security": [{'arches': ["thisarchdoesntexist"], - "uri": "nothat"}, - {'arches': ["default"], - "uri": smir}]} + cfg = { + "primary": [ + {"arches": ["default"], "uri": pmir}, + {"arches": ["thisarchdoesntexist"], "uri": "notthis"}, + ], + "security": [ + {"arches": ["thisarchdoesntexist"], "uri": "nothat"}, + {"arches": ["default"], "uri": smir}, + ], + } mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), 'amd64') + cfg, FakeCloud(), "amd64" + ) - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") def test_apt_v3_get_def_mir_non_intel_no_arch( self, m_get_dpkg_architecture ): - arch = 'ppc64el' + arch = "ppc64el" m_get_dpkg_architecture.return_value = arch - expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', - 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} + expected = { + "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", + "SECURITY": "http://ports.ubuntu.com/ubuntu-ports", + } self.assertEqual(expected, cc_apt_configure.get_default_mirrors()) def test_apt_v3_get_default_mirrors_non_intel_with_arch(self): - found = cc_apt_configure.get_default_mirrors('ppc64el') + found = cc_apt_configure.get_default_mirrors("ppc64el") - expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', - 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} + expected = { + "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", + "SECURITY": "http://ports.ubuntu.com/ubuntu-ports", + } self.assertEqual(expected, found) def test_apt_v3_mirror_arches_sysdefault(self): """test_apt_v3_mirror_arches - Test arches fallback to sys default""" - arch = 'amd64' + arch = "amd64" default_mirrors = cc_apt_configure.get_default_mirrors(arch) pmir = default_mirrors["PRIMARY"] smir = default_mirrors["SECURITY"] mycloud = get_cloud() - cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"], - "uri": "notthis"}, - {'arches': ["thisarchdoesntexist"], - "uri": "notthiseither"}], - "security": [{'arches': ["thisarchdoesntexist"], - "uri": "nothat"}, - {'arches': ["thisarchdoesntexist_64"], - "uri": "nothateither"}]} + cfg = { + "primary": [ + {"arches": ["thisarchdoesntexist_64"], "uri": "notthis"}, + {"arches": ["thisarchdoesntexist"], "uri": "notthiseither"}, + ], + "security": [ + {"arches": ["thisarchdoesntexist"], "uri": "nothat"}, + {"arches": ["thisarchdoesntexist_64"], "uri": "nothateither"}, + ], + } mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - self.assertEqual(mirrors['MIRROR'], pmir) - self.assertEqual(mirrors['PRIMARY'], pmir) - self.assertEqual(mirrors['SECURITY'], smir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) def test_apt_v3_mirror_search(self): """test_apt_v3_mirror_search - Test searching mirrors in a list - mock checks to avoid relying on network connectivity""" + mock checks to avoid relying on network connectivity""" pmir = "http://us.archive.ubuntu.com/ubuntu/" smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "search": ["pfailme", pmir]}], - "security": [{'arches': ["default"], - "search": ["sfailme", smir]}]} - - with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', - side_effect=[pmir, smir]) as mocksearch: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), - 'amd64') - - calls = [call(["pfailme", pmir]), - call(["sfailme", smir])] + cfg = { + "primary": [{"arches": ["default"], "search": ["pfailme", pmir]}], + "security": [{"arches": ["default"], "search": ["sfailme", smir]}], + } + + with mock.patch.object( + cc_apt_configure.util, + "search_for_mirror", + side_effect=[pmir, smir], + ) as mocksearch: + mirrors = cc_apt_configure.find_apt_mirror_info( + cfg, FakeCloud(), "amd64" + ) + + calls = [call(["pfailme", pmir]), call(["sfailme", smir])] mocksearch.assert_has_calls(calls) - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) def test_apt_v3_mirror_search_many2(self): """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once""" pmir = "http://us.archive.ubuntu.com/ubuntu/" smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir, - "search": ["pfailme", "foo"]}], - "security": [{'arches': ["default"], - "uri": smir, - "search": ["sfailme", "bar"]}]} + cfg = { + "primary": [ + { + "arches": ["default"], + "uri": pmir, + "search": ["pfailme", "foo"], + } + ], + "security": [ + { + "arches": ["default"], + "uri": smir, + "search": ["sfailme", "bar"], + } + ], + } - arch = 'amd64' + arch = "amd64" # should be called only once per type, despite two mirror configs mycloud = None - with mock.patch.object(cc_apt_configure, 'get_mirror', - return_value="http://mocked/foo") as mockgm: + with mock.patch.object( + cc_apt_configure, "get_mirror", return_value="http://mocked/foo" + ) as mockgm: mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(cfg, 'primary', arch, mycloud), - call(cfg, 'security', arch, mycloud)] + calls = [ + call(cfg, "primary", arch, mycloud), + call(cfg, "security", arch, mycloud), + ] mockgm.assert_has_calls(calls) # should not be called, since primary is specified - with mock.patch.object(cc_apt_configure.util, - 'search_for_mirror') as mockse: + with mock.patch.object( + cc_apt_configure.util, "search_for_mirror" + ) as mockse: mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), arch) + cfg, FakeCloud(), arch + ) mockse.assert_not_called() - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) def test_apt_v3_url_resolvable(self): """test_apt_v3_url_resolvable - Test resolving urls""" - with mock.patch.object(util, 'is_resolvable') as mockresolve: + with mock.patch.object(util, "is_resolvable") as mockresolve: util.is_resolvable_url("http://1.2.3.4/ubuntu") mockresolve.assert_called_with("1.2.3.4") - with mock.patch.object(util, 'is_resolvable') as mockresolve: + with mock.patch.object(util, "is_resolvable") as mockresolve: util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") mockresolve.assert_called_with("us.archive.ubuntu.com") @@ -764,25 +968,27 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): util._DNS_REDIRECT_IP = None bad = [(None, None, None, "badname", ["10.3.2.1"])] good = [(None, None, None, "goodname", ["10.2.3.4"])] - with mock.patch.object(socket, 'getaddrinfo', - side_effect=[bad, bad, bad, good, - good]) as mocksock: + with mock.patch.object( + socket, "getaddrinfo", side_effect=[bad, bad, bad, good, good] + ) as mocksock: ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu") - mocksock.assert_any_call('does-not-exist.example.com.', None, - 0, 0, 1, 2) - mocksock.assert_any_call('example.invalid.', None, 0, 0, 1, 2) - mocksock.assert_any_call('us.archive.ubuntu.com', None) - mocksock.assert_any_call('1.2.3.4', None) + mocksock.assert_any_call( + "does-not-exist.example.com.", None, 0, 0, 1, 2 + ) + mocksock.assert_any_call("example.invalid.", None, 0, 0, 1, 2) + mocksock.assert_any_call("us.archive.ubuntu.com", None) + mocksock.assert_any_call("1.2.3.4", None) self.assertTrue(ret) self.assertTrue(ret2) # side effect need only bad ret after initial call - with mock.patch.object(socket, 'getaddrinfo', - side_effect=[bad]) as mocksock: + with mock.patch.object( + socket, "getaddrinfo", side_effect=[bad] + ) as mocksock: ret3 = util.is_resolvable_url("http://failme.com/ubuntu") - calls = [call('failme.com', None)] + calls = [call("failme.com", None)] mocksock.assert_has_calls(calls) self.assertFalse(ret3) @@ -818,24 +1024,28 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""" # single disable other suite disabled = ["$RELEASE-updates"] - expect = ("""deb http://ubuntu.com//ubuntu xenial main + expect = ( + """deb http://ubuntu.com//ubuntu xenial main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu""" - """ xenial-updates main + """ xenial-updates main deb http://ubuntu.com//ubuntu xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + ) result = cc_apt_configure.disable_suites(disabled, orig, release) self.assertEqual(expect, result) # multi disable disabled = ["$RELEASE-updates", "$RELEASE-security"] - expect = ("""deb http://ubuntu.com//ubuntu xenial main + expect = ( + """deb http://ubuntu.com//ubuntu xenial main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main + """xenial-updates main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main + """xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + ) result = cc_apt_configure.disable_suites(disabled, orig, release) self.assertEqual(expect, result) @@ -848,17 +1058,19 @@ deb-src http://ubuntu.com//ubuntu universe multiverse deb http://UBUNTU.com//ubuntu xenial-updates main deb http://UBUNTU.COM//ubuntu xenial-updates main deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main + expect = ( + """deb http://ubuntu.com//ubuntu xenial main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main + """xenial-updates main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main + """xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse # suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """ - """xenial-updates main + """xenial-updates main # suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ - """xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + """xenial-updates main +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + ) result = cc_apt_configure.disable_suites(disabled, orig, release) self.assertEqual(expect, result) @@ -872,17 +1084,19 @@ deb-src http://ubuntu.com//ubuntu universe multiverse #deb http://UBUNTU.com//ubuntu xenial-updates main deb http://UBUNTU.COM//ubuntu xenial-updates main deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main + expect = ( + """deb http://ubuntu.com//ubuntu xenial main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main + """xenial-updates main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main + """xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse #foo #deb http://UBUNTU.com//ubuntu xenial-updates main # suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ - """xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + """xenial-updates main +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + ) result = cc_apt_configure.disable_suites(disabled, orig, release) self.assertEqual(expect, result) @@ -919,12 +1133,14 @@ deb [a=b] http://ubu.com//ubu xenial-updates main deb http://ubuntu.com//ubuntu xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main + expect = ( + """deb http://ubuntu.com//ubuntu xenial main # suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """ - """xenial-updates main + """xenial-updates main deb http://ubuntu.com//ubuntu xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + ) result = cc_apt_configure.disable_suites(disabled, orig, release) self.assertEqual(expect, result) @@ -951,134 +1167,167 @@ deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main deb http://ubuntu.com//ubuntu xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main + expect = ( + """deb http://ubuntu.com//ubuntu xenial main deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main # suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main + """xenial-security main deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + ) result = cc_apt_configure.disable_suites(disabled, orig, release) self.assertEqual(expect, result) def test_disable_suites_blank_lines(self): """test_disable_suites_blank_lines - ensure blank lines allowed""" - lines = ["deb %(repo)s %(rel)s main universe", - "", - "deb %(repo)s %(rel)s-updates main universe", - " # random comment", - "#comment here", - ""] + lines = [ + "deb %(repo)s %(rel)s main universe", + "", + "deb %(repo)s %(rel)s-updates main universe", + " # random comment", + "#comment here", + "", + ] rel = "trusty" - repo = 'http://example.com/mirrors/ubuntu' - orig = "\n".join(lines) % {'repo': repo, 'rel': rel} + repo = "http://example.com/mirrors/ubuntu" + orig = "\n".join(lines) % {"repo": repo, "rel": rel} self.assertEqual( - orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)) + orig, cc_apt_configure.disable_suites(["proposed"], orig, rel) + ) - @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain') + @mock.patch("cloudinit.util.get_hostname", return_value="abc.localdomain") def test_apt_v3_mirror_search_dns(self, m_get_hostname): """test_apt_v3_mirror_search_dns - Test searching dns patterns""" pmir = "phit" smir = "shit" - arch = 'amd64' - mycloud = get_cloud('ubuntu') - cfg = {"primary": [{'arches': ["default"], - "search_dns": True}], - "security": [{'arches': ["default"], - "search_dns": True}]} - - with mock.patch.object(cc_apt_configure, 'get_mirror', - return_value="http://mocked/foo") as mockgm: + arch = "amd64" + mycloud = get_cloud("ubuntu") + cfg = { + "primary": [{"arches": ["default"], "search_dns": True}], + "security": [{"arches": ["default"], "search_dns": True}], + } + + with mock.patch.object( + cc_apt_configure, "get_mirror", return_value="http://mocked/foo" + ) as mockgm: mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(cfg, 'primary', arch, mycloud), - call(cfg, 'security', arch, mycloud)] + calls = [ + call(cfg, "primary", arch, mycloud), + call(cfg, "security", arch, mycloud), + ] mockgm.assert_has_calls(calls) - with mock.patch.object(cc_apt_configure, 'search_for_mirror_dns', - return_value="http://mocked/foo") as mocksdns: + with mock.patch.object( + cc_apt_configure, + "search_for_mirror_dns", + return_value="http://mocked/foo", + ) as mocksdns: mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(True, 'primary', cfg, mycloud), - call(True, 'security', cfg, mycloud)] + calls = [ + call(True, "primary", cfg, mycloud), + call(True, "security", cfg, mycloud), + ] mocksdns.assert_has_calls(calls) # first return is for the non-dns call before - with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', - side_effect=[None, pmir, None, smir]) as mockse: + with mock.patch.object( + cc_apt_configure.util, + "search_for_mirror", + side_effect=[None, pmir, None, smir], + ) as mockse: mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(None), - call(['http://ubuntu-mirror.localdomain/ubuntu', - 'http://ubuntu-mirror/ubuntu']), - call(None), - call(['http://ubuntu-security-mirror.localdomain/ubuntu', - 'http://ubuntu-security-mirror/ubuntu'])] + calls = [ + call(None), + call( + [ + "http://ubuntu-mirror.localdomain/ubuntu", + "http://ubuntu-mirror/ubuntu", + ] + ), + call(None), + call( + [ + "http://ubuntu-security-mirror.localdomain/ubuntu", + "http://ubuntu-security-mirror/ubuntu", + ] + ), + ] mockse.assert_has_calls(calls) - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) + self.assertEqual(mirrors["MIRROR"], pmir) + self.assertEqual(mirrors["PRIMARY"], pmir) + self.assertEqual(mirrors["SECURITY"], smir) def test_apt_v3_add_mirror_keys(self): """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" - arch = 'amd64' + arch = "amd64" cfg = { - 'primary': [ - {'arches': [arch], - 'uri': 'http://test.ubuntu.com/', - 'filename': 'primary', - 'key': 'fakekey_primary'}], - 'security': [ - {'arches': [arch], - 'uri': 'http://testsec.ubuntu.com/', - 'filename': 'security', - 'key': 'fakekey_security'}] + "primary": [ + { + "arches": [arch], + "uri": "http://test.ubuntu.com/", + "filename": "primary", + "key": "fakekey_primary", + } + ], + "security": [ + { + "arches": [arch], + "uri": "http://testsec.ubuntu.com/", + "filename": "security", + "key": "fakekey_security", + } + ], } - with mock.patch.object(cc_apt_configure, - 'add_apt_key_raw') as mockadd: + with mock.patch.object(cc_apt_configure, "add_apt_key_raw") as mockadd: cc_apt_configure.add_mirror_keys(cfg, TARGET) calls = [ - mock.call('fakekey_primary', 'primary', hardened=False), - mock.call('fakekey_security', 'security', hardened=False), + mock.call("fakekey_primary", "primary", hardened=False), + mock.call("fakekey_security", "security", hardened=False), ] mockadd.assert_has_calls(calls, any_order=True) class TestDebconfSelections(TestCase): - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") def test_set_sel_appends_newline_if_absent(self, m_subp): """Automatically append a newline to debconf-set-selections config.""" - selections = b'some/setting boolean true' + selections = b"some/setting boolean true" cc_apt_configure.debconf_set_selections(selections=selections) - cc_apt_configure.debconf_set_selections(selections=selections + b'\n') + cc_apt_configure.debconf_set_selections(selections=selections + b"\n") m_call = mock.call( - ['debconf-set-selections'], data=selections + b'\n', capture=True, - target=None) + ["debconf-set-selections"], + data=selections + b"\n", + capture=True, + target=None, + ) self.assertEqual([m_call, m_call], m_subp.call_args_list) @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") def test_no_set_sel_if_none_to_set(self, m_set_sel): - cc_apt_configure.apply_debconf_selections({'foo': 'bar'}) + cc_apt_configure.apply_debconf_selections({"foo": "bar"}) m_set_sel.assert_not_called() - @mock.patch("cloudinit.config.cc_apt_configure." - "debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") + @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") + @mock.patch( + "cloudinit.config.cc_apt_configure.util.get_installed_packages" + ) def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): data = { - 'set1': 'pkga pkga/q1 mybool false', - 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' - 'pkgc\tpkgc/ip\tstring\t10.0.0.1')} - lines = '\n'.join(data.values()).split('\n') + "set1": "pkga pkga/q1 mybool false", + "set2": ( + "pkgb\tpkgb/b1\tstr\tthis is a string\n" + "pkgc\tpkgc/ip\tstring\t10.0.0.1" + ), + } + lines = "\n".join(data.values()).split("\n") m_get_inst.return_value = ["adduser", "apparmor"] m_set_sel.return_value = None - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) + cc_apt_configure.apply_debconf_selections({"debconf_selections": data}) self.assertTrue(m_get_inst.called) self.assertEqual(m_set_sel.call_count, 1) @@ -1092,43 +1341,59 @@ class TestDebconfSelections(TestCase): @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, - m_dpkg_r): + @mock.patch( + "cloudinit.config.cc_apt_configure.util.get_installed_packages" + ) + def test_reconfigure_if_intersection( + self, m_get_inst, m_set_sel, m_dpkg_r + ): data = { - 'set1': 'pkga pkga/q1 mybool false', - 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' - 'pkgc\tpkgc/ip\tstring\t10.0.0.1'), - 'cloud-init': ('cloud-init cloud-init/datasources' - 'multiselect MAAS')} + "set1": "pkga pkga/q1 mybool false", + "set2": ( + "pkgb\tpkgb/b1\tstr\tthis is a string\n" + "pkgc\tpkgc/ip\tstring\t10.0.0.1" + ), + "cloud-init": "cloud-init cloud-init/datasourcesmultiselect MAAS", + } m_set_sel.return_value = None - m_get_inst.return_value = ["adduser", "apparmor", "pkgb", - "cloud-init", 'zdog'] + m_get_inst.return_value = [ + "adduser", + "apparmor", + "pkgb", + "cloud-init", + "zdog", + ] - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) + cc_apt_configure.apply_debconf_selections({"debconf_selections": data}) # reconfigure should be called with the intersection # of (packages in config, packages installed) self.assertEqual(m_dpkg_r.call_count, 1) # assumes called with *args (dpkg_reconfigure([a,b,c], target=)) packages = m_dpkg_r.call_args_list[0][0][0] - self.assertEqual(set(['cloud-init', 'pkgb']), set(packages)) + self.assertEqual(set(["cloud-init", "pkgb"]), set(packages)) @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, - m_dpkg_r): - data = {'set1': 'pkga pkga/q1 mybool false'} - - m_get_inst.return_value = ["adduser", "apparmor", "pkgb", - "cloud-init", 'zdog'] + @mock.patch( + "cloudinit.config.cc_apt_configure.util.get_installed_packages" + ) + def test_reconfigure_if_no_intersection( + self, m_get_inst, m_set_sel, m_dpkg_r + ): + data = {"set1": "pkga pkga/q1 mybool false"} + + m_get_inst.return_value = [ + "adduser", + "apparmor", + "pkgb", + "cloud-init", + "zdog", + ] m_set_sel.return_value = None - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) + cc_apt_configure.apply_debconf_selections({"debconf_selections": data}) self.assertTrue(m_get_inst.called) self.assertEqual(m_dpkg_r.call_count, 0) @@ -1141,19 +1406,25 @@ class TestDebconfSelections(TestCase): # mocking clean_cloud_init directly does not work. So we mock # the CONFIG_CLEANERS dictionary and assert our cleaner is called. ci_cleaner = mock.MagicMock() - with mock.patch.dict(("cloudinit.config.cc_apt_configure." - "CONFIG_CLEANERS"), - values={'cloud-init': ci_cleaner}, clear=True): - cc_apt_configure.dpkg_reconfigure(['pkga', 'cloud-init'], - target=target) + with mock.patch.dict( + "cloudinit.config.cc_apt_configure.CONFIG_CLEANERS", + values={"cloud-init": ci_cleaner}, + clear=True, + ): + cc_apt_configure.dpkg_reconfigure( + ["pkga", "cloud-init"], target=target + ) # cloud-init is actually the only package we have a cleaner for # so for now, its the only one that should reconfigured self.assertTrue(m_subp.called) ci_cleaner.assert_called_with(target) self.assertEqual(m_subp.call_count, 1) found = m_subp.call_args_list[0][0][0] - expected = ['dpkg-reconfigure', '--frontend=noninteractive', - 'cloud-init'] + expected = [ + "dpkg-reconfigure", + "--frontend=noninteractive", + "cloud-init", + ] self.assertEqual(expected, found) @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") @@ -1163,8 +1434,9 @@ class TestDebconfSelections(TestCase): @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp): - cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar']) + cc_apt_configure.dpkg_reconfigure(["pkgfoo", "pkgbar"]) m_subp.assert_not_called() + # # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_apk_configure.py b/tests/unittests/config/test_cc_apk_configure.py index 70139451..6fbc3dec 100644 --- a/tests/unittests/config/test_cc_apk_configure.py +++ b/tests/unittests/config/test_cc_apk_configure.py @@ -8,20 +8,19 @@ import logging import os import textwrap -from cloudinit import (cloud, helpers, util) - +from cloudinit import cloud, helpers, util from cloudinit.config import cc_apk_configure -from tests.unittests.helpers import (FilesystemMockingTestCase, mock) +from tests.unittests.helpers import FilesystemMockingTestCase, mock REPO_FILE = "/etc/apk/repositories" DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine" -CC_APK = 'cloudinit.config.cc_apk_configure' +CC_APK = "cloudinit.config.cc_apk_configure" class TestNoConfig(FilesystemMockingTestCase): def setUp(self): super(TestNoConfig, self).setUp() - self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos') + self.add_patch(CC_APK + "._write_repositories_file", "m_write_repos") self.name = "apk-configure" self.cloud_init = None self.log = logging.getLogger("TestNoConfig") @@ -34,8 +33,9 @@ class TestNoConfig(FilesystemMockingTestCase): """ config = util.get_builtin_cfg() - cc_apk_configure.handle(self.name, config, self.cloud_init, - self.log, self.args) + cc_apk_configure.handle( + self.name, config, self.cloud_init, self.log, self.args + ) self.assertEqual(0, self.m_write_repos.call_count) @@ -45,15 +45,15 @@ class TestConfig(FilesystemMockingTestCase): super(TestConfig, self).setUp() self.new_root = self.tmp_dir() self.new_root = self.reRoot(root=self.new_root) - for dirname in ['tmp', 'etc/apk']: + for dirname in ["tmp", "etc/apk"]: util.ensure_dir(os.path.join(self.new_root, dirname)) - self.paths = helpers.Paths({'templates_dir': self.new_root}) + self.paths = helpers.Paths({"templates_dir": self.new_root}) self.name = "apk-configure" self.cloud = cloud.Cloud(None, self.paths, None, None, None) self.log = logging.getLogger("TestNoConfig") self.args = [] - @mock.patch(CC_APK + '._write_repositories_file') + @mock.patch(CC_APK + "._write_repositories_file") def test_no_repo_settings(self, m_write_repos): """ Test that nothing is written if the 'alpine-repo' key @@ -61,20 +61,22 @@ class TestConfig(FilesystemMockingTestCase): """ config = {"apk_repos": {}} - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) self.assertEqual(0, m_write_repos.call_count) - @mock.patch(CC_APK + '._write_repositories_file') + @mock.patch(CC_APK + "._write_repositories_file") def test_empty_repo_settings(self, m_write_repos): """ Test that nothing is written if 'alpine_repo' list is empty. """ config = {"apk_repos": {"alpine_repo": []}} - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) self.assertEqual(0, m_write_repos.call_count) @@ -82,19 +84,15 @@ class TestConfig(FilesystemMockingTestCase): """ Test when only details of main repo is written to file. """ - alpine_version = 'v3.12' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version - } - } - } + alpine_version = "v3.12" + config = {"apk_repos": {"alpine_repo": {"version": alpine_version}}} - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) - expected_content = textwrap.dedent("""\ + expected_content = textwrap.dedent( + """\ # # Created by cloud-init # @@ -103,7 +101,10 @@ class TestConfig(FilesystemMockingTestCase): {0}/{1}/main - """.format(DEFAULT_MIRROR_URL, alpine_version)) + """.format( + DEFAULT_MIRROR_URL, alpine_version + ) + ) self.assertEqual(expected_content, util.load_file(REPO_FILE)) @@ -112,20 +113,22 @@ class TestConfig(FilesystemMockingTestCase): Test when only details of main and community repos are written to file. """ - alpine_version = 'edge' + alpine_version = "edge" config = { "apk_repos": { "alpine_repo": { "version": alpine_version, - "community_enabled": True + "community_enabled": True, } } } - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) - expected_content = textwrap.dedent("""\ + expected_content = textwrap.dedent( + """\ # # Created by cloud-init # @@ -135,7 +138,10 @@ class TestConfig(FilesystemMockingTestCase): {0}/{1}/main {0}/{1}/community - """.format(DEFAULT_MIRROR_URL, alpine_version)) + """.format( + DEFAULT_MIRROR_URL, alpine_version + ) + ) self.assertEqual(expected_content, util.load_file(REPO_FILE)) @@ -144,21 +150,23 @@ class TestConfig(FilesystemMockingTestCase): Test when details of main, community and testing repos are written to file. """ - alpine_version = 'v3.12' + alpine_version = "v3.12" config = { "apk_repos": { "alpine_repo": { "version": alpine_version, "community_enabled": True, - "testing_enabled": True + "testing_enabled": True, } } } - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) - expected_content = textwrap.dedent("""\ + expected_content = textwrap.dedent( + """\ # # Created by cloud-init # @@ -172,7 +180,10 @@ class TestConfig(FilesystemMockingTestCase): # {0}/edge/testing - """.format(DEFAULT_MIRROR_URL, alpine_version)) + """.format( + DEFAULT_MIRROR_URL, alpine_version + ) + ) self.assertEqual(expected_content, util.load_file(REPO_FILE)) @@ -181,21 +192,23 @@ class TestConfig(FilesystemMockingTestCase): Test when details of main, community and testing repos for Edge version of Alpine are written to file. """ - alpine_version = 'edge' + alpine_version = "edge" config = { "apk_repos": { "alpine_repo": { "version": alpine_version, "community_enabled": True, - "testing_enabled": True + "testing_enabled": True, } } } - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) - expected_content = textwrap.dedent("""\ + expected_content = textwrap.dedent( + """\ # # Created by cloud-init # @@ -206,7 +219,10 @@ class TestConfig(FilesystemMockingTestCase): {0}/{1}/community {0}/{1}/testing - """.format(DEFAULT_MIRROR_URL, alpine_version)) + """.format( + DEFAULT_MIRROR_URL, alpine_version + ) + ) self.assertEqual(expected_content, util.load_file(REPO_FILE)) @@ -215,23 +231,25 @@ class TestConfig(FilesystemMockingTestCase): Test when details of main, community, testing and local repos are written to file. """ - alpine_version = 'v3.12' - local_repo_url = 'http://some.mirror/whereever' + alpine_version = "v3.12" + local_repo_url = "http://some.mirror/whereever" config = { "apk_repos": { "alpine_repo": { "version": alpine_version, "community_enabled": True, - "testing_enabled": True + "testing_enabled": True, }, - "local_repo_base_url": local_repo_url + "local_repo_base_url": local_repo_url, } } - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) - expected_content = textwrap.dedent("""\ + expected_content = textwrap.dedent( + """\ # # Created by cloud-init # @@ -250,7 +268,10 @@ class TestConfig(FilesystemMockingTestCase): # {2}/{1} - """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) + """.format( + DEFAULT_MIRROR_URL, alpine_version, local_repo_url + ) + ) self.assertEqual(expected_content, util.load_file(REPO_FILE)) @@ -259,23 +280,25 @@ class TestConfig(FilesystemMockingTestCase): Test when details of main, community, testing and local repos for Edge version of Alpine are written to file. """ - alpine_version = 'edge' - local_repo_url = 'http://some.mirror/whereever' + alpine_version = "edge" + local_repo_url = "http://some.mirror/whereever" config = { "apk_repos": { "alpine_repo": { "version": alpine_version, "community_enabled": True, - "testing_enabled": True + "testing_enabled": True, }, - "local_repo_base_url": local_repo_url + "local_repo_base_url": local_repo_url, } } - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) + cc_apk_configure.handle( + self.name, config, self.cloud, self.log, self.args + ) - expected_content = textwrap.dedent("""\ + expected_content = textwrap.dedent( + """\ # # Created by cloud-init # @@ -291,7 +314,10 @@ class TestConfig(FilesystemMockingTestCase): # {2}/{1} - """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) + """.format( + DEFAULT_MIRROR_URL, alpine_version, local_repo_url + ) + ) self.assertEqual(expected_content, util.load_file(REPO_FILE)) diff --git a/tests/unittests/config/test_cc_apt_pipelining.py b/tests/unittests/config/test_cc_apt_pipelining.py index d7589d35..b4497156 100644 --- a/tests/unittests/config/test_cc_apt_pipelining.py +++ b/tests/unittests/config/test_cc_apt_pipelining.py @@ -3,26 +3,26 @@ """Tests cc_apt_pipelining handler""" import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining - from tests.unittests.helpers import CiTestCase, mock class TestAptPipelining(CiTestCase): - - @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + @mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file") def test_not_disabled_by_default(self, m_write_file): """ensure that default behaviour is to not disable pipelining""" - cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None) + cc_apt_pipelining.handle("foo", {}, None, mock.MagicMock(), None) self.assertEqual(0, m_write_file.call_count) - @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + @mock.patch("cloudinit.config.cc_apt_pipelining.util.write_file") def test_false_disables_pipelining(self, m_write_file): """ensure that pipelining can be disabled with correct config""" cc_apt_pipelining.handle( - 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None) + "foo", {"apt_pipelining": "false"}, None, mock.MagicMock(), None + ) self.assertEqual(1, m_write_file.call_count) args, _ = m_write_file.call_args self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0]) self.assertIn('Pipeline-Depth "0"', args[1]) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_bootcmd.py b/tests/unittests/config/test_cc_bootcmd.py index 6f38f12a..6d8793b9 100644 --- a/tests/unittests/config/test_cc_bootcmd.py +++ b/tests/unittests/config/test_cc_bootcmd.py @@ -2,11 +2,14 @@ import logging import tempfile +from cloudinit import subp, util from cloudinit.config.cc_bootcmd import handle, schema -from cloudinit import (subp, util) from tests.unittests.helpers import ( - CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) - + CiTestCase, + SchemaTestCaseMixin, + mock, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -16,7 +19,8 @@ class FakeExtendedTempFile(object): def __init__(self, suffix): self.suffix = suffix self.handle = tempfile.NamedTemporaryFile( - prefix="ci-%s." % self.__class__.__name__, delete=False) + prefix="ci-%s." % self.__class__.__name__, delete=False + ) def __enter__(self): return self.handle @@ -30,8 +34,9 @@ class TestBootcmd(CiTestCase): with_logs = True - _etmpfile_path = ('cloudinit.config.cc_bootcmd.temp_utils.' - 'ExtendedTemporaryFile') + _etmpfile_path = ( + "cloudinit.config.cc_bootcmd.temp_utils.ExtendedTemporaryFile" + ) def setUp(self): super(TestBootcmd, self).setUp() @@ -42,21 +47,23 @@ class TestBootcmd(CiTestCase): """When the provided config doesn't contain bootcmd, skip it.""" cfg = {} mycloud = get_cloud() - handle('notimportant', cfg, mycloud, LOG, None) + handle("notimportant", cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'bootcmd' key", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_handler_invalid_command_set(self): """Commands which can't be converted to shell will raise errors.""" - invalid_config = {'bootcmd': 1} + invalid_config = {"bootcmd": 1} cc = get_cloud() with self.assertRaises(TypeError) as context_manager: - handle('cc_bootcmd', invalid_config, cc, LOG, []) - self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) + handle("cc_bootcmd", invalid_config, cc, LOG, []) + self.assertIn("Failed to shellify bootcmd", self.logs.getvalue()) self.assertEqual( "Input to shellify was type 'int'. Expected list or tuple.", - str(context_manager.exception)) + str(context_manager.exception), + ) @skipUnlessJsonSchema() def test_handler_schema_validation_warns_non_array_type(self): @@ -65,14 +72,15 @@ class TestBootcmd(CiTestCase): Schema validation is not strict, so bootcmd attempts to shellify the invalid content. """ - invalid_config = {'bootcmd': 1} + invalid_config = {"bootcmd": 1} cc = get_cloud() with self.assertRaises(TypeError): - handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle("cc_bootcmd", invalid_config, cc, LOG, []) self.assertIn( - 'Invalid config:\nbootcmd: 1 is not of type \'array\'', - self.logs.getvalue()) - self.assertIn('Failed to shellify', self.logs.getvalue()) + "Invalid config:\nbootcmd: 1 is not of type 'array'", + self.logs.getvalue(), + ) + self.assertIn("Failed to shellify", self.logs.getvalue()) @skipUnlessJsonSchema() def test_handler_schema_validation_warns_non_array_item_type(self): @@ -82,54 +90,58 @@ class TestBootcmd(CiTestCase): invalid content. """ invalid_config = { - 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} + "bootcmd": ["ls /", 20, ["wget", "http://stuff/blah"], {"a": "n"}] + } cc = get_cloud() with self.assertRaises(TypeError) as context_manager: - handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle("cc_bootcmd", invalid_config, cc, LOG, []) expected_warnings = [ - 'bootcmd.1: 20 is not valid under any of the given schemas', - 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given' - ' schema' + "bootcmd.1: 20 is not valid under any of the given schemas", + "bootcmd.3: {'a': 'n'} is not valid under any of the given schema", ] logs = self.logs.getvalue() for warning in expected_warnings: self.assertIn(warning, logs) - self.assertIn('Failed to shellify', logs) + self.assertIn("Failed to shellify", logs) self.assertEqual( - ("Unable to shellify type 'int'. Expected list, string, tuple. " - "Got: 20"), - str(context_manager.exception)) + "Unable to shellify type 'int'. Expected list, string, tuple. " + "Got: 20", + str(context_manager.exception), + ) def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self): """Valid schema runs a bootcmd script with INSTANCE_ID in the env.""" cc = get_cloud() - out_file = self.tmp_path('bootcmd.out', self.new_root) + out_file = self.tmp_path("bootcmd.out", self.new_root) my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425" - valid_config = {'bootcmd': [ - 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} + valid_config = { + "bootcmd": ["echo {0} $INSTANCE_ID > {1}".format(my_id, out_file)] + } with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.allow_subp(['/bin/sh']): - handle('cc_bootcmd', valid_config, cc, LOG, []) - self.assertEqual(my_id + ' iid-datasource-none\n', - util.load_file(out_file)) + with self.allow_subp(["/bin/sh"]): + handle("cc_bootcmd", valid_config, cc, LOG, []) + self.assertEqual( + my_id + " iid-datasource-none\n", util.load_file(out_file) + ) def test_handler_runs_bootcmd_script_with_error(self): """When a valid script generates an error, that error is raised.""" cc = get_cloud() - valid_config = {'bootcmd': ['exit 1']} # Script with error + valid_config = {"bootcmd": ["exit 1"]} # Script with error with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.allow_subp(['/bin/sh']): + with self.allow_subp(["/bin/sh"]): with self.assertRaises(subp.ProcessExecutionError) as ctxt: - handle('does-not-matter', valid_config, cc, LOG, []) + handle("does-not-matter", valid_config, cc, LOG, []) self.assertIn( - 'Unexpected error while running command.\n' - "Command: ['/bin/sh',", - str(ctxt.exception)) + "Unexpected error while running command.\nCommand: ['/bin/sh',", + str(ctxt.exception), + ) self.assertIn( - 'Failed to run bootcmd module does-not-matter', - self.logs.getvalue()) + "Failed to run bootcmd module does-not-matter", + self.logs.getvalue(), + ) @skipUnlessJsonSchema() @@ -141,12 +153,14 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): def test_duplicates_are_fine_array_array(self): """Duplicated commands array/array entries are allowed.""" self.assertSchemaValid( - ["byebye", "byebye"], 'command entries can be duplicate') + ["byebye", "byebye"], "command entries can be duplicate" + ) def test_duplicates_are_fine_array_string(self): """Duplicated commands array/string entries are allowed.""" self.assertSchemaValid( - ["echo bye", "echo bye"], "command entries can be duplicate.") + ["echo bye", "echo bye"], "command entries can be duplicate." + ) # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py index 91b005d0..c49922e6 100644 --- a/tests/unittests/config/test_cc_ca_certs.py +++ b/tests/unittests/config/test_cc_ca_certs.py @@ -6,13 +6,9 @@ import unittest from contextlib import ExitStack from unittest import mock -from cloudinit import distros +from cloudinit import distros, helpers, subp, util from cloudinit.config import cc_ca_certs -from cloudinit import helpers -from cloudinit import subp -from cloudinit import util from tests.unittests.helpers import TestCase - from tests.unittests.util import get_cloud @@ -31,12 +27,15 @@ class TestNoConfig(unittest.TestCase): config = util.get_builtin_cfg() with ExitStack() as mocks: util_mock = mocks.enter_context( - mock.patch.object(util, 'write_file')) + mock.patch.object(util, "write_file") + ) certs_mock = mocks.enter_context( - mock.patch.object(cc_ca_certs, 'update_ca_certs')) + mock.patch.object(cc_ca_certs, "update_ca_certs") + ) - cc_ca_certs.handle(self.name, config, self.cloud_init, self.log, - self.args) + cc_ca_certs.handle( + self.name, config, self.cloud_init, self.log, self.args + ) self.assertEqual(util_mock.call_count, 0) self.assertEqual(certs_mock.call_count, 0) @@ -61,11 +60,14 @@ class TestConfig(TestCase): # Mock out the functions that actually modify the system self.mock_add = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'add_ca_certs')) + mock.patch.object(cc_ca_certs, "add_ca_certs") + ) self.mock_update = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'update_ca_certs')) + mock.patch.object(cc_ca_certs, "update_ca_certs") + ) self.mock_remove = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) + mock.patch.object(cc_ca_certs, "remove_default_ca_certs") + ) def test_no_trusted_list(self): """ @@ -106,7 +108,7 @@ class TestConfig(TestCase): conf = cc_ca_certs._distro_ca_certs_configs(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.mock_add.assert_called_once_with(conf, ["CERT1"]) self.assertEqual(self.mock_update.call_count, 1) self.assertEqual(self.mock_remove.call_count, 0) @@ -120,7 +122,7 @@ class TestConfig(TestCase): conf = cc_ca_certs._distro_ca_certs_configs(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2']) + self.mock_add.assert_called_once_with(conf, ["CERT1", "CERT2"]) self.assertEqual(self.mock_update.call_count, 1) self.assertEqual(self.mock_remove.call_count, 0) @@ -160,20 +162,21 @@ class TestConfig(TestCase): conf = cc_ca_certs._distro_ca_certs_configs(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.mock_add.assert_called_once_with(conf, ["CERT1"]) self.assertEqual(self.mock_update.call_count, 1) self.assertEqual(self.mock_remove.call_count, 1) class TestAddCaCerts(TestCase): - def setUp(self): super(TestAddCaCerts, self).setUp() tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir) - self.paths = helpers.Paths({ - 'cloud_dir': tmpdir, - }) + self.paths = helpers.Paths( + { + "cloud_dir": tmpdir, + } + ) self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat") def _fetch_distro(self, kind): @@ -185,7 +188,7 @@ class TestAddCaCerts(TestCase): """Test that no certificate are written if not provided.""" for distro_name in cc_ca_certs.distros: conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(util, 'write_file') as mockobj: + with mock.patch.object(util, "write_file") as mockobj: cc_ca_certs.add_ca_certs(conf, []) self.assertEqual(mockobj.call_count, 0) @@ -204,21 +207,28 @@ class TestAddCaCerts(TestCase): with ExitStack() as mocks: mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) + mock.patch.object(util, "write_file") + ) mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + mock.patch.object( + util, "load_file", return_value=ca_certs_content + ) + ) cc_ca_certs.add_ca_certs(conf, [cert]) - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - expected, omode="wb")]) - mock_load.assert_called_once_with(conf['ca_cert_config']) + mock_write.assert_has_calls( + [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)] + ) + if conf["ca_cert_config"] is not None: + mock_write.assert_has_calls( + [ + mock.call( + conf["ca_cert_config"], expected, omode="wb" + ) + ] + ) + mock_load.assert_called_once_with(conf["ca_cert_config"]) def test_single_cert_no_trailing_cr(self): """Test adding a single certificate to the trusted CAs @@ -234,24 +244,32 @@ class TestAddCaCerts(TestCase): with ExitStack() as mocks: mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) + mock.patch.object(util, "write_file") + ) mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + mock.patch.object( + util, "load_file", return_value=ca_certs_content + ) + ) cc_ca_certs.add_ca_certs(conf, [cert]) - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - "%s\n%s\n" % (ca_certs_content, - conf['ca_cert_filename']), - omode="wb")]) - - mock_load.assert_called_once_with(conf['ca_cert_config']) + mock_write.assert_has_calls( + [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)] + ) + if conf["ca_cert_config"] is not None: + mock_write.assert_has_calls( + [ + mock.call( + conf["ca_cert_config"], + "%s\n%s\n" + % (ca_certs_content, conf["ca_cert_filename"]), + omode="wb", + ) + ] + ) + + mock_load.assert_called_once_with(conf["ca_cert_config"]) def test_single_cert_to_empty_existing_ca_file(self): """Test adding a single certificate to the trusted CAs @@ -264,18 +282,23 @@ class TestAddCaCerts(TestCase): for distro_name in cc_ca_certs.distros: conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(util, 'write_file', - autospec=True) as m_write: + with mock.patch.object( + util, "write_file", autospec=True + ) as m_write: cc_ca_certs.add_ca_certs(conf, [cert]) - m_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - m_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - expected, omode="wb")]) + m_write.assert_has_calls( + [mock.call(conf["ca_cert_full_path"], cert, mode=0o644)] + ) + if conf["ca_cert_config"] is not None: + m_write.assert_has_calls( + [ + mock.call( + conf["ca_cert_config"], expected, omode="wb" + ) + ] + ) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -290,45 +313,61 @@ class TestAddCaCerts(TestCase): with ExitStack() as mocks: mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) + mock.patch.object(util, "write_file") + ) mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + mock.patch.object( + util, "load_file", return_value=ca_certs_content + ) + ) cc_ca_certs.add_ca_certs(conf, certs) - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - expected_cert_file, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - "%s\n%s\n" % (ca_certs_content, - conf['ca_cert_filename']), - omode='wb')]) - - mock_load.assert_called_once_with(conf['ca_cert_config']) + mock_write.assert_has_calls( + [ + mock.call( + conf["ca_cert_full_path"], + expected_cert_file, + mode=0o644, + ) + ] + ) + if conf["ca_cert_config"] is not None: + mock_write.assert_has_calls( + [ + mock.call( + conf["ca_cert_config"], + "%s\n%s\n" + % (ca_certs_content, conf["ca_cert_filename"]), + omode="wb", + ) + ] + ) + + mock_load.assert_called_once_with(conf["ca_cert_config"]) class TestUpdateCaCerts(unittest.TestCase): def test_commands(self): for distro_name in cc_ca_certs.distros: conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(subp, "subp") as mockobj: cc_ca_certs.update_ca_certs(conf) mockobj.assert_called_once_with( - conf['ca_cert_update_cmd'], capture=False) + conf["ca_cert_update_cmd"], capture=False + ) class TestRemoveDefaultCaCerts(TestCase): - def setUp(self): super(TestRemoveDefaultCaCerts, self).setUp() tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir) - self.paths = helpers.Paths({ - 'cloud_dir': tmpdir, - }) + self.paths = helpers.Paths( + { + "cloud_dir": tmpdir, + } + ) def test_commands(self): for distro_name in cc_ca_certs.distros: @@ -336,26 +375,35 @@ class TestRemoveDefaultCaCerts(TestCase): with ExitStack() as mocks: mock_delete = mocks.enter_context( - mock.patch.object(util, 'delete_dir_contents')) + mock.patch.object(util, "delete_dir_contents") + ) mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) + mock.patch.object(util, "write_file") + ) mock_subp = mocks.enter_context( - mock.patch.object(subp, 'subp')) + mock.patch.object(subp, "subp") + ) cc_ca_certs.remove_default_ca_certs(distro_name, conf) - mock_delete.assert_has_calls([ - mock.call(conf['ca_cert_path']), - mock.call(conf['ca_cert_system_path'])]) + mock_delete.assert_has_calls( + [ + mock.call(conf["ca_cert_path"]), + mock.call(conf["ca_cert_system_path"]), + ] + ) - if conf['ca_cert_config'] is not None: + if conf["ca_cert_config"] is not None: mock_write.assert_called_once_with( - conf['ca_cert_config'], "", mode=0o644) + conf["ca_cert_config"], "", mode=0o644 + ) - if distro_name in ['debian', 'ubuntu']: + if distro_name in ["debian", "ubuntu"]: mock_subp.assert_called_once_with( - ('debconf-set-selections', '-'), - "ca-certificates \ -ca-certificates/trust_new_crts select no") + ("debconf-set-selections", "-"), + "ca-certificates ca-certificates/trust_new_crts" + " select no", + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py index 1c90a4fc..835974e5 100644 --- a/tests/unittests/config/test_cc_chef.py +++ b/tests/unittests/config/test_cc_chef.py @@ -1,21 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. -import httpretty import json import logging import os -from cloudinit.config import cc_chef -from cloudinit import util +import httpretty +from cloudinit import util +from cloudinit.config import cc_chef from tests.unittests.helpers import ( - HttprettyTestCase, FilesystemMockingTestCase, + HttprettyTestCase, + cloud_init_project_dir, mock, skipIf, - cloud_init_project_dir, ) - from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -31,7 +30,6 @@ OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:") class TestInstallChefOmnibus(HttprettyTestCase): - def setUp(self): super(TestInstallChefOmnibus, self).setUp() self.new_root = self.tmp_dir() @@ -41,70 +39,81 @@ class TestInstallChefOmnibus(HttprettyTestCase): """install_chef_from_omnibus calls subp_blob_in_tempfile.""" response = b'#!/bin/bash\necho "Hi Mom"' httpretty.register_uri( - httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200) + httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200 + ) ret = (None, None) # stdout, stderr but capture=False - with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile", - return_value=ret) as m_subp_blob: + with mock.patch( + "cloudinit.config.cc_chef.subp_blob_in_tempfile", return_value=ret + ) as m_subp_blob: cc_chef.install_chef_from_omnibus() # admittedly whitebox, but assuming subp_blob_in_tempfile works # this should be fine. self.assertEqual( - [mock.call(blob=response, args=[], basename='chef-omnibus-install', - capture=False)], - m_subp_blob.call_args_list) - - @mock.patch('cloudinit.config.cc_chef.url_helper.readurl') - @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') + [ + mock.call( + blob=response, + args=[], + basename="chef-omnibus-install", + capture=False, + ) + ], + m_subp_blob.call_args_list, + ) + + @mock.patch("cloudinit.config.cc_chef.url_helper.readurl") + @mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile") def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl): """install_chef_from_omnibus retries OMNIBUS_URL upon failure.""" class FakeURLResponse(object): contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format( - self.new_root) + self.new_root + ) m_rdurl.return_value = FakeURLResponse() cc_chef.install_chef_from_omnibus() - expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES, - 'url': cc_chef.OMNIBUS_URL} + expected_kwargs = { + "retries": cc_chef.OMNIBUS_URL_RETRIES, + "url": cc_chef.OMNIBUS_URL, + } self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1]) cc_chef.install_chef_from_omnibus(retries=10) - expected_kwargs = {'retries': 10, - 'url': cc_chef.OMNIBUS_URL} + expected_kwargs = {"retries": 10, "url": cc_chef.OMNIBUS_URL} self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1]) expected_subp_kwargs = { - 'args': ['-v', '2.0'], - 'basename': 'chef-omnibus-install', - 'blob': m_rdurl.return_value.contents, - 'capture': False + "args": ["-v", "2.0"], + "basename": "chef-omnibus-install", + "blob": m_rdurl.return_value.contents, + "capture": False, } self.assertCountEqual( - expected_subp_kwargs, - m_subp_blob.call_args_list[0][1]) + expected_subp_kwargs, m_subp_blob.call_args_list[0][1] + ) @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) - @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') + @mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile") def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob): """install_chef_from_omnibus provides version arg to OMNIBUS_URL.""" - chef_outfile = self.tmp_path('chef.out', self.new_root) + chef_outfile = self.tmp_path("chef.out", self.new_root) response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile) httpretty.register_uri( - httpretty.GET, cc_chef.OMNIBUS_URL, body=response) - cc_chef.install_chef_from_omnibus(omnibus_version='2.0') + httpretty.GET, cc_chef.OMNIBUS_URL, body=response + ) + cc_chef.install_chef_from_omnibus(omnibus_version="2.0") called_kwargs = m_subp_blob.call_args_list[0][1] expected_kwargs = { - 'args': ['-v', '2.0'], - 'basename': 'chef-omnibus-install', - 'blob': response, - 'capture': False + "args": ["-v", "2.0"], + "basename": "chef-omnibus-install", + "blob": response, + "capture": False, } self.assertCountEqual(expected_kwargs, called_kwargs) class TestChef(FilesystemMockingTestCase): - def setUp(self): super(TestChef, self).setUp() self.tmp = self.tmp_dir() @@ -114,12 +123,13 @@ class TestChef(FilesystemMockingTestCase): self.patchOS(self.tmp) cfg = {} - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + cc_chef.handle("chef", cfg, get_cloud(), LOG, []) for d in cc_chef.CHEF_DIRS: self.assertFalse(os.path.isdir(d)) - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") + @skipIf( + not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available" + ) def test_basic_config(self): """ test basic config looks sane @@ -147,26 +157,27 @@ class TestChef(FilesystemMockingTestCase): self.patchUtils(self.tmp) self.patchOS(self.tmp) - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) + util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file) cfg = { - 'chef': { - 'chef_license': "accept", - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': "/etc/chef/vkey.pem", - 'validation_cert': "this is my cert", - 'encrypted_data_bag_secret': - '/etc/chef/encrypted_data_bag_secret' + "chef": { + "chef_license": "accept", + "server_url": "localhost", + "validation_name": "bob", + "validation_key": "/etc/chef/vkey.pem", + "validation_cert": "this is my cert", + "encrypted_data_bag_secret": ( + "/etc/chef/encrypted_data_bag_secret" + ), }, } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + cc_chef.handle("chef", cfg, get_cloud(), LOG, []) for d in cc_chef.CHEF_DIRS: self.assertTrue(os.path.isdir(d)) c = util.load_file(cc_chef.CHEF_RB_PATH) # the content of these keys is not expected to be rendered to tmpl - unrendered_keys = ('validation_cert',) - for k, v in cfg['chef'].items(): + unrendered_keys = ("validation_cert",) + for k, v in cfg["chef"].items(): if k in unrendered_keys: continue self.assertIn(v, c) @@ -174,7 +185,7 @@ class TestChef(FilesystemMockingTestCase): if k in unrendered_keys: continue # the value from the cfg overrides that in the default - val = cfg['chef'].get(k, v) + val = cfg["chef"].get(k, v) if isinstance(val, str): self.assertIn(val, c) c = util.load_file(cc_chef.CHEF_FB_PATH) @@ -185,64 +196,68 @@ class TestChef(FilesystemMockingTestCase): self.patchOS(self.tmp) cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'run_list': ['a', 'b', 'c'], - 'initial_attributes': { - 'c': 'd', - } + "chef": { + "server_url": "localhost", + "validation_name": "bob", + "run_list": ["a", "b", "c"], + "initial_attributes": { + "c": "d", + }, }, } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + cc_chef.handle("chef", cfg, get_cloud(), LOG, []) c = util.load_file(cc_chef.CHEF_FB_PATH) self.assertEqual( { - 'run_list': ['a', 'b', 'c'], - 'c': 'd', - }, json.loads(c)) + "run_list": ["a", "b", "c"], + "c": "d", + }, + json.loads(c), + ) - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") + @skipIf( + not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available" + ) def test_template_deletes(self): tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) + util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file) cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'json_attribs': None, - 'show_time': None, + "chef": { + "server_url": "localhost", + "validation_name": "bob", + "json_attribs": None, + "show_time": None, }, } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + cc_chef.handle("chef", cfg, get_cloud(), LOG, []) c = util.load_file(cc_chef.CHEF_RB_PATH) - self.assertNotIn('json_attribs', c) - self.assertNotIn('Formatter.show_time', c) + self.assertNotIn("json_attribs", c) + self.assertNotIn("Formatter.show_time", c) - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") + @skipIf( + not os.path.isfile(CLIENT_TEMPL), CLIENT_TEMPL + " is not available" + ) def test_validation_cert_and_validation_key(self): # test validation_cert content is written to validation_key path tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - v_path = '/etc/chef/vkey.pem' - v_cert = 'this is my cert' + util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file) + v_path = "/etc/chef/vkey.pem" + v_cert = "this is my cert" cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': v_path, - 'validation_cert': v_cert + "chef": { + "server_url": "localhost", + "validation_name": "bob", + "validation_key": v_path, + "validation_cert": v_cert, }, } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + cc_chef.handle("chef", cfg, get_cloud(), LOG, []) content = util.load_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) util.load_file(v_path) @@ -254,23 +269,24 @@ class TestChef(FilesystemMockingTestCase): self.patchUtils(self.tmp) self.patchOS(self.tmp) - v_path = '/etc/chef/vkey.pem' + v_path = "/etc/chef/vkey.pem" v_cert = "system" expected_cert = "this is the system file certificate" cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': v_path, - 'validation_cert': v_cert + "chef": { + "server_url": "localhost", + "validation_name": "bob", + "validation_key": v_path, + "validation_cert": v_cert, }, } - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) + util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file) util.write_file(v_path, expected_cert) - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + cc_chef.handle("chef", cfg, get_cloud(), LOG, []) content = util.load_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) util.load_file(v_path) self.assertEqual(expected_cert, util.load_file(v_path)) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_debug.py b/tests/unittests/config/test_cc_debug.py index 174f772f..79a88561 100644 --- a/tests/unittests/config/test_cc_debug.py +++ b/tests/unittests/config/test_cc_debug.py @@ -7,14 +7,13 @@ import tempfile from cloudinit import util from cloudinit.config import cc_debug -from tests.unittests.helpers import (FilesystemMockingTestCase, mock) - +from tests.unittests.helpers import FilesystemMockingTestCase, mock from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) -@mock.patch('cloudinit.distros.debian.read_system_locale') +@mock.patch("cloudinit.distros.debian.read_system_locale") class TestDebug(FilesystemMockingTestCase): def setUp(self): super(TestDebug, self).setUp() @@ -23,37 +22,39 @@ class TestDebug(FilesystemMockingTestCase): self.patchUtils(self.new_root) def test_debug_write(self, m_locale): - m_locale.return_value = 'en_US.UTF-8' + m_locale.return_value = "en_US.UTF-8" cfg = { - 'abc': '123', - 'c': '\u20a0', - 'debug': { - 'verbose': True, + "abc": "123", + "c": "\u20a0", + "debug": { + "verbose": True, # Does not actually write here due to mocking... - 'output': '/var/log/cloud-init-debug.log', + "output": "/var/log/cloud-init-debug.log", }, } cc = get_cloud() - cc_debug.handle('cc_debug', cfg, cc, LOG, []) - contents = util.load_file('/var/log/cloud-init-debug.log') + cc_debug.handle("cc_debug", cfg, cc, LOG, []) + contents = util.load_file("/var/log/cloud-init-debug.log") # Some basic sanity tests... self.assertNotEqual(0, len(contents)) for k in cfg.keys(): self.assertIn(k, contents) def test_debug_no_write(self, m_locale): - m_locale.return_value = 'en_US.UTF-8' + m_locale.return_value = "en_US.UTF-8" cfg = { - 'abc': '123', - 'debug': { - 'verbose': False, + "abc": "123", + "debug": { + "verbose": False, # Does not actually write here due to mocking... - 'output': '/var/log/cloud-init-debug.log', + "output": "/var/log/cloud-init-debug.log", }, } cc = get_cloud() - cc_debug.handle('cc_debug', cfg, cc, LOG, []) - self.assertRaises(IOError, - util.load_file, '/var/log/cloud-init-debug.log') + cc_debug.handle("cc_debug", cfg, cc, LOG, []) + self.assertRaises( + IOError, util.load_file, "/var/log/cloud-init-debug.log" + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_disable_ec2_metadata.py b/tests/unittests/config/test_cc_disable_ec2_metadata.py index 7a794845..3c3313a7 100644 --- a/tests/unittests/config/test_cc_disable_ec2_metadata.py +++ b/tests/unittests/config/test_cc_disable_ec2_metadata.py @@ -2,47 +2,49 @@ """Tests cc_disable_ec2_metadata handler""" -import cloudinit.config.cc_disable_ec2_metadata as ec2_meta +import logging +import cloudinit.config.cc_disable_ec2_metadata as ec2_meta from tests.unittests.helpers import CiTestCase, mock -import logging - LOG = logging.getLogger(__name__) -DISABLE_CFG = {'disable_ec2_metadata': 'true'} +DISABLE_CFG = {"disable_ec2_metadata": "true"} class TestEC2MetadataRoute(CiTestCase): - - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') + @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which") + @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp") def test_disable_ifconfig(self, m_subp, m_which): """Set the route if ifconfig command is available""" - m_which.side_effect = lambda x: x if x == 'ifconfig' else None - ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_which.side_effect = lambda x: x if x == "ifconfig" else None + ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None) m_subp.assert_called_with( - ['route', 'add', '-host', '169.254.169.254', 'reject'], - capture=False) + ["route", "add", "-host", "169.254.169.254", "reject"], + capture=False, + ) - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') + @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which") + @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp") def test_disable_ip(self, m_subp, m_which): """Set the route if ip command is available""" - m_which.side_effect = lambda x: x if x == 'ip' else None - ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_which.side_effect = lambda x: x if x == "ip" else None + ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None) m_subp.assert_called_with( - ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], - capture=False) + ["ip", "route", "add", "prohibit", "169.254.169.254"], + capture=False, + ) - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') + @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.which") + @mock.patch("cloudinit.config.cc_disable_ec2_metadata.subp.subp") def test_disable_no_tool(self, m_subp, m_which): """Log error when neither route nor ip commands are available""" m_which.return_value = None # Find neither ifconfig nor ip - ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + ec2_meta.handle("foo", DISABLE_CFG, None, LOG, None) self.assertEqual( - [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list) + [mock.call("ip"), mock.call("ifconfig")], m_which.call_args_list + ) m_subp.assert_not_called() + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py index fa565559..8a8d7195 100644 --- a/tests/unittests/config/test_cc_disk_setup.py +++ b/tests/unittests/config/test_cc_disk_setup.py @@ -3,19 +3,20 @@ import random from cloudinit.config import cc_disk_setup -from tests.unittests.helpers import CiTestCase, ExitStack, mock, TestCase +from tests.unittests.helpers import CiTestCase, ExitStack, TestCase, mock class TestIsDiskUsed(TestCase): - def setUp(self): super(TestIsDiskUsed, self).setUp() self.patches = ExitStack() - mod_name = 'cloudinit.config.cc_disk_setup' + mod_name = "cloudinit.config.cc_disk_setup" self.enumerate_disk = self.patches.enter_context( - mock.patch('{0}.enumerate_disk'.format(mod_name))) + mock.patch("{0}.enumerate_disk".format(mod_name)) + ) self.check_fs = self.patches.enter_context( - mock.patch('{0}.check_fs'.format(mod_name))) + mock.patch("{0}.check_fs".format(mod_name)) + ) def tearDown(self): super(TestIsDiskUsed, self).tearDown() @@ -29,7 +30,10 @@ class TestIsDiskUsed(TestCase): def test_valid_filesystem_returns_true(self): self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) self.check_fs.return_value = ( - mock.MagicMock(), 'ext4', mock.MagicMock()) + mock.MagicMock(), + "ext4", + mock.MagicMock(), + ) self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) def test_one_child_nodes_and_no_fs_returns_false(self): @@ -39,12 +43,12 @@ class TestIsDiskUsed(TestCase): class TestGetMbrHddSize(TestCase): - def setUp(self): super(TestGetMbrHddSize, self).setUp() self.patches = ExitStack() self.subp = self.patches.enter_context( - mock.patch.object(cc_disk_setup.subp, 'subp')) + mock.patch.object(cc_disk_setup.subp, "subp") + ) def tearDown(self): super(TestGetMbrHddSize, self).tearDown() @@ -53,11 +57,11 @@ class TestGetMbrHddSize(TestCase): def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes): def _subp(cmd, *args, **kwargs): self.assertEqual(3, len(cmd)) - if '--getsize64' in cmd: + if "--getsize64" in cmd: return hdd_size_in_bytes, None - elif '--getss' in cmd: + elif "--getss" in cmd: return sector_size_in_bytes, None - raise Exception('Unexpected blockdev command called') + raise Exception("Unexpected blockdev command called") self.subp.side_effect = _subp @@ -65,8 +69,9 @@ class TestGetMbrHddSize(TestCase): size_in_bytes = random.randint(10000, 10000000) * 512 size_in_sectors = size_in_bytes / sector_size self._configure_subp_mock(size_in_bytes, sector_size) - self.assertEqual(size_in_sectors, - cc_disk_setup.get_hdd_size('/dev/sda1')) + self.assertEqual( + size_in_sectors, cc_disk_setup.get_hdd_size("/dev/sda1") + ) def test_size_for_512_byte_sectors(self): self._test_for_sector_size(512) @@ -82,98 +87,116 @@ class TestGetMbrHddSize(TestCase): class TestGetPartitionMbrLayout(TestCase): - def test_single_partition_using_boolean(self): - self.assertEqual('0,', - cc_disk_setup.get_partition_mbr_layout(1000, True)) + self.assertEqual( + "0,", cc_disk_setup.get_partition_mbr_layout(1000, True) + ) def test_single_partition_using_list(self): disk_size = random.randint(1000000, 1000000000000) self.assertEqual( - ',,83', - cc_disk_setup.get_partition_mbr_layout(disk_size, [100])) + ",,83", cc_disk_setup.get_partition_mbr_layout(disk_size, [100]) + ) def test_half_and_half(self): disk_size = random.randint(1000000, 1000000000000) expected_partition_size = int(float(disk_size) / 2) self.assertEqual( - ',{0},83\n,,83'.format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50])) + ",{0},83\n,,83".format(expected_partition_size), + cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50]), + ) def test_thirds_with_different_partition_type(self): disk_size = random.randint(1000000, 1000000000000) expected_partition_size = int(float(disk_size) * 0.33) self.assertEqual( - ',{0},83\n,,82'.format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]])) + ",{0},83\n,,82".format(expected_partition_size), + cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]]), + ) class TestUpdateFsSetupDevices(TestCase): def test_regression_1634678(self): # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678 fs_setup = { - 'partition': 'auto', - 'device': '/dev/xvdb1', - 'overwrite': False, - 'label': 'test', - 'filesystem': 'ext4' + "partition": "auto", + "device": "/dev/xvdb1", + "overwrite": False, + "label": "test", + "filesystem": "ext4", } - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) + cc_disk_setup.update_fs_setup_devices( + [fs_setup], lambda device: device + ) - self.assertEqual({ - '_origname': '/dev/xvdb1', - 'partition': 'auto', - 'device': '/dev/xvdb1', - 'overwrite': False, - 'label': 'test', - 'filesystem': 'ext4' - }, fs_setup) + self.assertEqual( + { + "_origname": "/dev/xvdb1", + "partition": "auto", + "device": "/dev/xvdb1", + "overwrite": False, + "label": "test", + "filesystem": "ext4", + }, + fs_setup, + ) def test_dotted_devname(self): fs_setup = { - 'partition': 'auto', - 'device': 'ephemeral0.0', - 'label': 'test2', - 'filesystem': 'xfs' + "partition": "auto", + "device": "ephemeral0.0", + "label": "test2", + "filesystem": "xfs", } - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) + cc_disk_setup.update_fs_setup_devices( + [fs_setup], lambda device: device + ) - self.assertEqual({ - '_origname': 'ephemeral0.0', - '_partition': 'auto', - 'partition': '0', - 'device': 'ephemeral0', - 'label': 'test2', - 'filesystem': 'xfs' - }, fs_setup) + self.assertEqual( + { + "_origname": "ephemeral0.0", + "_partition": "auto", + "partition": "0", + "device": "ephemeral0", + "label": "test2", + "filesystem": "xfs", + }, + fs_setup, + ) def test_dotted_devname_populates_partition(self): fs_setup = { - 'device': 'ephemeral0.1', - 'label': 'test2', - 'filesystem': 'xfs' + "device": "ephemeral0.1", + "label": "test2", + "filesystem": "xfs", } - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) - self.assertEqual({ - '_origname': 'ephemeral0.1', - 'device': 'ephemeral0', - 'partition': '1', - 'label': 'test2', - 'filesystem': 'xfs' - }, fs_setup) - - -@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device', - return_value=None) -@mock.patch('cloudinit.config.cc_disk_setup.find_device_node', - return_value=('/dev/xdb1', False)) -@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) -@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', '')) + cc_disk_setup.update_fs_setup_devices( + [fs_setup], lambda device: device + ) + self.assertEqual( + { + "_origname": "ephemeral0.1", + "device": "ephemeral0", + "partition": "1", + "label": "test2", + "filesystem": "xfs", + }, + fs_setup, + ) + + +@mock.patch( + "cloudinit.config.cc_disk_setup.assert_and_settle_device", + return_value=None, +) +@mock.patch( + "cloudinit.config.cc_disk_setup.find_device_node", + return_value=("/dev/xdb1", False), +) +@mock.patch("cloudinit.config.cc_disk_setup.device_type", return_value=None) +@mock.patch("cloudinit.config.cc_disk_setup.subp.subp", return_value=("", "")) class TestMkfsCommandHandling(CiTestCase): with_logs = True @@ -181,63 +204,84 @@ class TestMkfsCommandHandling(CiTestCase): def test_with_cmd(self, subp, *args): """mkfs honors cmd and logs warnings when extra_opts or overwrite are provided.""" - cc_disk_setup.mkfs({ - 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', - 'filesystem': 'ext4', - 'device': '/dev/xdb1', - 'label': 'with_cmd', - 'extra_opts': ['should', 'generate', 'warning'], - 'overwrite': 'should generate warning too' - }) + cc_disk_setup.mkfs( + { + "cmd": "mkfs -t %(filesystem)s -L %(label)s %(device)s", + "filesystem": "ext4", + "device": "/dev/xdb1", + "label": "with_cmd", + "extra_opts": ["should", "generate", "warning"], + "overwrite": "should generate warning too", + } + ) self.assertIn( - 'extra_opts ' + - 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + - '/dev/xdb1', - self.logs.getvalue()) + "extra_opts " + + "ignored because cmd was specified: mkfs -t ext4 -L with_cmd " + + "/dev/xdb1", + self.logs.getvalue(), + ) self.assertIn( - 'overwrite ' + - 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + - '/dev/xdb1', - self.logs.getvalue()) + "overwrite " + + "ignored because cmd was specified: mkfs -t ext4 -L with_cmd " + + "/dev/xdb1", + self.logs.getvalue(), + ) subp.assert_called_once_with( - 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) + "mkfs -t ext4 -L with_cmd /dev/xdb1", shell=True + ) - @mock.patch('cloudinit.config.cc_disk_setup.subp.which') + @mock.patch("cloudinit.config.cc_disk_setup.subp.which") def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args): """mkfs observes extra_opts and overwrite settings when cmd is not present.""" - m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p] - cc_disk_setup.mkfs({ - 'filesystem': 'ext4', - 'device': '/dev/xdb1', - 'label': 'without_cmd', - 'extra_opts': ['are', 'added'], - 'overwrite': True - }) + m_which.side_effect = lambda p: {"mkfs.ext4": "/sbin/mkfs.ext4"}[p] + cc_disk_setup.mkfs( + { + "filesystem": "ext4", + "device": "/dev/xdb1", + "label": "without_cmd", + "extra_opts": ["are", "added"], + "overwrite": True, + } + ) subp.assert_called_once_with( - ['/sbin/mkfs.ext4', '/dev/xdb1', - '-L', 'without_cmd', '-F', 'are', 'added'], - shell=False) - - @mock.patch('cloudinit.config.cc_disk_setup.subp.which') + [ + "/sbin/mkfs.ext4", + "/dev/xdb1", + "-L", + "without_cmd", + "-F", + "are", + "added", + ], + shell=False, + ) + + @mock.patch("cloudinit.config.cc_disk_setup.subp.which") def test_mkswap(self, m_which, subp, *args): """mkfs observes extra_opts and overwrite settings when cmd is not present.""" - m_which.side_effect = iter([None, '/sbin/mkswap']) - cc_disk_setup.mkfs({ - 'filesystem': 'swap', - 'device': '/dev/xdb1', - 'label': 'swap', - 'overwrite': True, - }) - - self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')], - m_which.call_args_list) + m_which.side_effect = iter([None, "/sbin/mkswap"]) + cc_disk_setup.mkfs( + { + "filesystem": "swap", + "device": "/dev/xdb1", + "label": "swap", + "overwrite": True, + } + ) + + self.assertEqual( + [mock.call("mkfs.swap"), mock.call("mkswap")], + m_which.call_args_list, + ) subp.assert_called_once_with( - ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False) + ["/sbin/mkswap", "/dev/xdb1", "-L", "swap", "-f"], shell=False + ) + # # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py index b007f24f..ba66f136 100644 --- a/tests/unittests/config/test_cc_growpart.py +++ b/tests/unittests/config/test_cc_growpart.py @@ -1,21 +1,18 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import cloud -from cloudinit.config import cc_growpart -from cloudinit import subp -from cloudinit import temp_utils - -from tests.unittests.helpers import TestCase - import errno import logging import os -import shutil import re +import shutil +import stat import unittest from contextlib import ExitStack from unittest import mock -import stat + +from cloudinit import cloud, subp, temp_utils +from cloudinit.config import cc_growpart +from tests.unittests.helpers import TestCase # growpart: # mode: auto # off, on, auto, 'growpart' @@ -62,7 +59,8 @@ usage: gpart add -t type [-a alignment] [-b start] <SNIP> geom class Dir: - '''Stub object''' + """Stub object""" + def __init__(self, name): self.name = name self.st_mode = name @@ -75,9 +73,13 @@ class Dir: class Scanner: - '''Stub object''' + """Stub object""" + def __enter__(self): - return (Dir(''), Dir(''),) + return ( + Dir(""), + Dir(""), + ) def __exit__(self, *args): pass @@ -97,11 +99,12 @@ class TestDisabled(unittest.TestCase): # Test that nothing is done if mode is off. # this really only verifies that resizer_factory isn't called - config = {'growpart': {'mode': 'off'}} + config = {"growpart": {"mode": "off"}} - with mock.patch.object(cc_growpart, 'resizer_factory') as mockobj: - self.handle(self.name, config, self.cloud_init, self.log, - self.args) + with mock.patch.object(cc_growpart, "resizer_factory") as mockobj: + self.handle( + self.name, config, self.cloud_init, self.log, self.args + ) self.assertEqual(mockobj.call_count, 0) @@ -116,9 +119,9 @@ class TestConfig(TestCase): self.cloud_init = None self.handle = cc_growpart.handle - self.tmppath = '/tmp/cloudinit-test-file' - self.tmpdir = os.scandir('/tmp') - self.tmpfile = open(self.tmppath, 'w') + self.tmppath = "/tmp/cloudinit-test-file" + self.tmpdir = os.scandir("/tmp") + self.tmpfile = open(self.tmppath, "w") def tearDown(self): self.tmpfile.close() @@ -127,110 +130,143 @@ class TestConfig(TestCase): @mock.patch.dict("os.environ", clear=True) def test_no_resizers_auto_is_fine(self): with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: - - config = {'growpart': {'mode': 'auto'}} - self.handle(self.name, config, self.cloud_init, self.log, - self.args) - - mockobj.assert_has_calls([ - mock.call(['growpart', '--help'], env={'LANG': 'C'}), - mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) + subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "") + ) as mockobj: + + config = {"growpart": {"mode": "auto"}} + self.handle( + self.name, config, self.cloud_init, self.log, self.args + ) + + mockobj.assert_has_calls( + [ + mock.call(["growpart", "--help"], env={"LANG": "C"}), + mock.call( + ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1] + ), + ] + ) @mock.patch.dict("os.environ", clear=True) def test_no_resizers_mode_growpart_is_exception(self): with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: - config = {'growpart': {'mode': "growpart"}} + subp, "subp", return_value=(HELP_GROWPART_NO_RESIZE, "") + ) as mockobj: + config = {"growpart": {"mode": "growpart"}} self.assertRaises( - ValueError, self.handle, self.name, config, - self.cloud_init, self.log, self.args) + ValueError, + self.handle, + self.name, + config, + self.cloud_init, + self.log, + self.args, + ) mockobj.assert_called_once_with( - ['growpart', '--help'], env={'LANG': 'C'}) + ["growpart", "--help"], env={"LANG": "C"} + ) @mock.patch.dict("os.environ", clear=True) def test_mode_auto_prefers_growpart(self): with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: + subp, "subp", return_value=(HELP_GROWPART_RESIZE, "") + ) as mockobj: ret = cc_growpart.resizer_factory(mode="auto") self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) mockobj.assert_called_once_with( - ['growpart', '--help'], env={'LANG': 'C'}) - - @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) - @mock.patch.object(temp_utils, 'mkdtemp', return_value='/tmp/much-random') - @mock.patch.object(stat, 'S_ISDIR', return_value=False) - @mock.patch.object(os.path, 'samestat', return_value=True) - @mock.patch.object(os.path, "join", return_value='/tmp') - @mock.patch.object(os, 'scandir', return_value=Scanner()) - @mock.patch.object(os, 'mkdir') - @mock.patch.object(os, 'unlink') - @mock.patch.object(os, 'rmdir') - @mock.patch.object(os, 'open', return_value=1) - @mock.patch.object(os, 'close') - @mock.patch.object(shutil, 'rmtree') - @mock.patch.object(os, 'lseek', return_value=1024) - @mock.patch.object(os, 'lstat', return_value='interesting metadata') + ["growpart", "--help"], env={"LANG": "C"} + ) + + @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True) + @mock.patch.object(temp_utils, "mkdtemp", return_value="/tmp/much-random") + @mock.patch.object(stat, "S_ISDIR", return_value=False) + @mock.patch.object(os.path, "samestat", return_value=True) + @mock.patch.object(os.path, "join", return_value="/tmp") + @mock.patch.object(os, "scandir", return_value=Scanner()) + @mock.patch.object(os, "mkdir") + @mock.patch.object(os, "unlink") + @mock.patch.object(os, "rmdir") + @mock.patch.object(os, "open", return_value=1) + @mock.patch.object(os, "close") + @mock.patch.object(shutil, "rmtree") + @mock.patch.object(os, "lseek", return_value=1024) + @mock.patch.object(os, "lstat", return_value="interesting metadata") def test_force_lang_check_tempfile(self, *args, **kwargs): with mock.patch.object( - subp, - 'subp', - return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: + subp, "subp", return_value=(HELP_GROWPART_RESIZE, "") + ) as mockobj: ret = cc_growpart.resizer_factory(mode="auto") self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) - diskdev = '/dev/sdb' + diskdev = "/dev/sdb" partnum = 1 - partdev = '/dev/sdb' + partdev = "/dev/sdb" ret.resize(diskdev, partnum, partdev) - mockobj.assert_has_calls([ - mock.call( - ["growpart", '--dry-run', diskdev, partnum], - env={'LANG': 'C', 'TMPDIR': '/tmp'}), - mock.call( - ["growpart", diskdev, partnum], - env={'LANG': 'C', 'TMPDIR': '/tmp'}), - ]) - - @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) + mockobj.assert_has_calls( + [ + mock.call( + ["growpart", "--dry-run", diskdev, partnum], + env={"LANG": "C", "TMPDIR": "/tmp"}, + ), + mock.call( + ["growpart", diskdev, partnum], + env={"LANG": "C", "TMPDIR": "/tmp"}, + ), + ] + ) + + @mock.patch.dict("os.environ", {"LANG": "cs_CZ.UTF-8"}, clear=True) def test_mode_auto_falls_back_to_gpart(self): with mock.patch.object( - subp, 'subp', - return_value=("", HELP_GPART)) as mockobj: + subp, "subp", return_value=("", HELP_GPART) + ) as mockobj: ret = cc_growpart.resizer_factory(mode="auto") self.assertIsInstance(ret, cc_growpart.ResizeGpart) - mockobj.assert_has_calls([ - mock.call(['growpart', '--help'], env={'LANG': 'C'}), - mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) + mockobj.assert_has_calls( + [ + mock.call(["growpart", "--help"], env={"LANG": "C"}), + mock.call( + ["gpart", "help"], env={"LANG": "C"}, rcs=[0, 1] + ), + ] + ) def test_handle_with_no_growpart_entry(self): # if no 'growpart' entry in config, then mode=auto should be used myresizer = object() - retval = (("/", cc_growpart.RESIZE.CHANGED, "my-message",),) + retval = ( + ( + "/", + cc_growpart.RESIZE.CHANGED, + "my-message", + ), + ) with ExitStack() as mocks: factory = mocks.enter_context( - mock.patch.object(cc_growpart, 'resizer_factory', - return_value=myresizer)) + mock.patch.object( + cc_growpart, "resizer_factory", return_value=myresizer + ) + ) rsdevs = mocks.enter_context( - mock.patch.object(cc_growpart, 'resize_devices', - return_value=retval)) + mock.patch.object( + cc_growpart, "resize_devices", return_value=retval + ) + ) mocks.enter_context( - mock.patch.object(cc_growpart, 'RESIZERS', - (('mysizer', object),) - )) + mock.patch.object( + cc_growpart, "RESIZERS", (("mysizer", object),) + ) + ) self.handle(self.name, {}, self.cloud_init, self.log, self.args) - factory.assert_called_once_with('auto') - rsdevs.assert_called_once_with(myresizer, ['/']) + factory.assert_called_once_with("auto") + rsdevs.assert_called_once_with(myresizer, ["/"]) class TestResize(unittest.TestCase): @@ -244,9 +280,18 @@ class TestResize(unittest.TestCase): # this patches out devent2dev, os.stat, and device_part_info # so in the end, doesn't test a lot devs = ["/dev/XXda1", "/dev/YYda2"] - devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, - st_nlink=1, st_uid=0, st_gid=6, st_size=0, - st_atime=0, st_mtime=0, st_ctime=0) + devstat_ret = Bunch( + st_mode=25008, + st_ino=6078, + st_dev=5, + st_nlink=1, + st_uid=0, + st_gid=6, + st_size=0, + st_atime=0, + st_mtime=0, + st_ctime=0, + ) enoent = ["/dev/NOENT"] real_stat = os.stat resize_calls = [] @@ -280,12 +325,15 @@ class TestResize(unittest.TestCase): return f return None - self.assertEqual(cc_growpart.RESIZE.NOCHANGE, - find("/dev/XXda1", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.CHANGED, - find("/dev/YYda2", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.SKIPPED, - find(enoent[0], resized)[1]) + self.assertEqual( + cc_growpart.RESIZE.NOCHANGE, find("/dev/XXda1", resized)[1] + ) + self.assertEqual( + cc_growpart.RESIZE.CHANGED, find("/dev/YYda2", resized)[1] + ) + self.assertEqual( + cc_growpart.RESIZE.SKIPPED, find(enoent[0], resized)[1] + ) # self.assertEqual(resize_calls, # [("/dev/XXda", "1", "/dev/XXda1"), # ("/dev/YYda", "2", "/dev/YYda2")]) diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py index 99c05bb5..5151a7b5 100644 --- a/tests/unittests/config/test_cc_grub_dpkg.py +++ b/tests/unittests/config/test_cc_grub_dpkg.py @@ -1,11 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +from logging import Logger +from unittest import mock + import pytest -from unittest import mock -from logging import Logger -from cloudinit.subp import ProcessExecutionError from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle +from cloudinit.subp import ProcessExecutionError class TestFetchIdevs: @@ -21,73 +22,78 @@ class TestFetchIdevs: ProcessExecutionError(reason=FileNotFoundError()), False, mock.call("'grub-probe' not found in $PATH"), - '', - '', + "", + "", ), # Inside a container, grub installed ( ProcessExecutionError(stderr="failed to get canonical path"), False, mock.call("grub-probe 'failed to get canonical path'"), - '', - '', + "", + "", ), # KVM Instance ( - ['/dev/vda'], + ["/dev/vda"], True, None, ( - '/dev/disk/by-path/pci-0000:00:00.0 ', - '/dev/disk/by-path/virtio-pci-0000:00:00.0 ' + "/dev/disk/by-path/pci-0000:00:00.0 ", + "/dev/disk/by-path/virtio-pci-0000:00:00.0 ", ), - '/dev/vda', + "/dev/vda", ), # Xen Instance ( - ['/dev/xvda'], + ["/dev/xvda"], True, None, - '', - '/dev/xvda', + "", + "/dev/xvda", ), # NVMe Hardware Instance ( - ['/dev/nvme1n1'], + ["/dev/nvme1n1"], True, None, ( - '/dev/disk/by-id/nvme-Company_hash000 ', - '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ', - '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ' + "/dev/disk/by-id/nvme-Company_hash000 ", + "/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ", + "/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ", ), - '/dev/disk/by-id/nvme-Company_hash000', + "/dev/disk/by-id/nvme-Company_hash000", ), # SCSI Hardware Instance ( - ['/dev/sda'], + ["/dev/sda"], True, None, ( - '/dev/disk/by-id/company-user-1 ', - '/dev/disk/by-id/scsi-0Company_user-1 ', - '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ' + "/dev/disk/by-id/company-user-1 ", + "/dev/disk/by-id/scsi-0Company_user-1 ", + "/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ", ), - '/dev/disk/by-id/company-user-1', + "/dev/disk/by-id/company-user-1", ), ], ) @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists") @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") - def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output, - path_exists, expected_log_call, udevadm_output, - expected_idevs): + def test_fetch_idevs( + self, + m_subp, + m_exists, + m_logexc, + grub_output, + path_exists, + expected_log_call, + udevadm_output, + expected_idevs, + ): """Tests outputs from grub-probe and udevadm info against grub-dpkg""" - m_subp.side_effect = [ - grub_output, - ["".join(udevadm_output)] - ] + m_subp.side_effect = [grub_output, ["".join(udevadm_output)]] m_exists.return_value = path_exists log = mock.Mock(spec=Logger) idevs = fetch_idevs(log) @@ -106,67 +112,72 @@ class TestHandle: # No configuration None, None, - '/dev/disk/by-id/nvme-Company_hash000', + "/dev/disk/by-id/nvme-Company_hash000", ( "Setting grub debconf-set-selections with ", - "'/dev/disk/by-id/nvme-Company_hash000','false'" + "'/dev/disk/by-id/nvme-Company_hash000','false'", ), ), ( # idevs set, idevs_empty unset - '/dev/sda', + "/dev/sda", None, - '/dev/sda', + "/dev/sda", ( "Setting grub debconf-set-selections with ", - "'/dev/sda','false'" + "'/dev/sda','false'", ), ), ( # idevs unset, idevs_empty set None, - 'true', - '/dev/xvda', + "true", + "/dev/xvda", ( "Setting grub debconf-set-selections with ", - "'/dev/xvda','true'" + "'/dev/xvda','true'", ), ), ( # idevs set, idevs_empty set - '/dev/vda', - 'false', - '/dev/disk/by-id/company-user-1', + "/dev/vda", + "false", + "/dev/disk/by-id/company-user-1", ( "Setting grub debconf-set-selections with ", - "'/dev/vda','false'" + "'/dev/vda','false'", ), ), ( # idevs set, idevs_empty set # Respect what the user defines, even if its logically wrong - '/dev/nvme0n1', - 'true', - '', + "/dev/nvme0n1", + "true", + "", ( "Setting grub debconf-set-selections with ", - "'/dev/nvme0n1','true'" + "'/dev/nvme0n1','true'", ), - ) + ), ], ) @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs") @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str") @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") - def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs, - cfg_idevs, cfg_idevs_empty, fetch_idevs_output, - expected_log_output): + def test_handle( + self, + m_subp, + m_logexc, + m_get_cfg_str, + m_fetch_idevs, + cfg_idevs, + cfg_idevs_empty, + fetch_idevs_output, + expected_log_output, + ): """Test setting of correct debconf database entries""" - m_get_cfg_str.side_effect = [ - cfg_idevs, - cfg_idevs_empty - ] + m_get_cfg_str.side_effect = [cfg_idevs, cfg_idevs_empty] m_fetch_idevs.return_value = fetch_idevs_output log = mock.Mock(spec=Logger) handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock()) diff --git a/tests/unittests/config/test_cc_install_hotplug.py b/tests/unittests/config/test_cc_install_hotplug.py index 5d6b1e77..3bd44aba 100644 --- a/tests/unittests/config/test_cc_install_hotplug.py +++ b/tests/unittests/config/test_cc_install_hotplug.py @@ -5,28 +5,31 @@ from unittest import mock import pytest from cloudinit.config.cc_install_hotplug import ( - handle, HOTPLUG_UDEV_PATH, HOTPLUG_UDEV_RULES_TEMPLATE, + handle, ) from cloudinit.event import EventScope, EventType @pytest.yield_fixture() def mocks(): - m_update_enabled = mock.patch('cloudinit.stages.update_event_enabled') - m_write = mock.patch('cloudinit.util.write_file', autospec=True) - m_del = mock.patch('cloudinit.util.del_file', autospec=True) - m_subp = mock.patch('cloudinit.subp.subp') - m_which = mock.patch('cloudinit.subp.which', return_value=None) - m_path_exists = mock.patch('os.path.exists', return_value=False) + m_update_enabled = mock.patch("cloudinit.stages.update_event_enabled") + m_write = mock.patch("cloudinit.util.write_file", autospec=True) + m_del = mock.patch("cloudinit.util.del_file", autospec=True) + m_subp = mock.patch("cloudinit.subp.subp") + m_which = mock.patch("cloudinit.subp.which", return_value=None) + m_path_exists = mock.patch("os.path.exists", return_value=False) yield namedtuple( - 'Mocks', - 'm_update_enabled m_write m_del m_subp m_which m_path_exists' + "Mocks", "m_update_enabled m_write m_del m_subp m_which m_path_exists" )( - m_update_enabled.start(), m_write.start(), m_del.start(), - m_subp.start(), m_which.start(), m_path_exists.start() + m_update_enabled.start(), + m_write.start(), + m_del.start(), + m_subp.start(), + m_which.start(), + m_path_exists.start(), ) m_update_enabled.stop() @@ -38,11 +41,11 @@ def mocks(): class TestInstallHotplug: - @pytest.mark.parametrize('libexec_exists', [True, False]) + @pytest.mark.parametrize("libexec_exists", [True, False]) def test_rules_installed_when_supported_and_enabled( self, mocks, libexec_exists ): - mocks.m_which.return_value = 'udevadm' + mocks.m_which.return_value = "udevadm" mocks.m_update_enabled.return_value = True m_cloud = mock.MagicMock() m_cloud.datasource.get_supported_events.return_value = { @@ -53,16 +56,23 @@ class TestInstallHotplug: libexecdir = "/usr/libexec/cloud-init" else: libexecdir = "/usr/lib/cloud-init" - with mock.patch('os.path.exists', return_value=libexec_exists): + with mock.patch("os.path.exists", return_value=libexec_exists): handle(None, {}, m_cloud, mock.Mock(), None) mocks.m_write.assert_called_once_with( filename=HOTPLUG_UDEV_PATH, content=HOTPLUG_UDEV_RULES_TEMPLATE.format( - libexecdir=libexecdir), + libexecdir=libexecdir + ), ) - assert mocks.m_subp.call_args_list == [mock.call([ - 'udevadm', 'control', '--reload-rules', - ])] + assert mocks.m_subp.call_args_list == [ + mock.call( + [ + "udevadm", + "control", + "--reload-rules", + ] + ) + ] assert mocks.m_del.call_args_list == [] def test_rules_not_installed_when_unsupported(self, mocks): @@ -95,9 +105,15 @@ class TestInstallHotplug: handle(None, {}, m_cloud, mock.Mock(), None) mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH) - assert mocks.m_subp.call_args_list == [mock.call([ - 'udevadm', 'control', '--reload-rules', - ])] + assert mocks.m_subp.call_args_list == [ + mock.call( + [ + "udevadm", + "control", + "--reload-rules", + ] + ) + ] assert mocks.m_write.call_args_list == [] def test_rules_not_installed_when_no_udevadm(self, mocks): diff --git a/tests/unittests/config/test_cc_keys_to_console.py b/tests/unittests/config/test_cc_keys_to_console.py index 4083fc54..9efc2b48 100644 --- a/tests/unittests/config/test_cc_keys_to_console.py +++ b/tests/unittests/config/test_cc_keys_to_console.py @@ -16,12 +16,18 @@ class TestHandle: @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log") @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists") @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp") - @pytest.mark.parametrize("cfg,subp_called", [ - ({}, True), # Default to emitting keys - ({"ssh": {}}, True), # Default even if we have the parent key - ({"ssh": {"emit_keys_to_console": True}}, True), # Explicitly enabled - ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled - ]) + @pytest.mark.parametrize( + "cfg,subp_called", + [ + ({}, True), # Default to emitting keys + ({"ssh": {}}, True), # Default even if we have the parent key + ( + {"ssh": {"emit_keys_to_console": True}}, + True, + ), # Explicitly enabled + ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled + ], + ) def test_emit_keys_to_console_config( self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called ): diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py index 07b3f899..efddc1b6 100644 --- a/tests/unittests/config/test_cc_landscape.py +++ b/tests/unittests/config/test_cc_landscape.py @@ -1,12 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging + from configobj import ConfigObj -from cloudinit.config import cc_landscape from cloudinit import util -from tests.unittests.helpers import (FilesystemMockingTestCase, mock, - wrap_and_call) - +from cloudinit.config import cc_landscape +from tests.unittests.helpers import ( + FilesystemMockingTestCase, + mock, + wrap_and_call, +) from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -19,108 +22,149 @@ class TestLandscape(FilesystemMockingTestCase): def setUp(self): super(TestLandscape, self).setUp() self.new_root = self.tmp_dir() - self.conf = self.tmp_path('client.conf', self.new_root) - self.default_file = self.tmp_path('default_landscape', self.new_root) + self.conf = self.tmp_path("client.conf", self.new_root) + self.default_file = self.tmp_path("default_landscape", self.new_root) self.patchUtils(self.new_root) self.add_patch( - 'cloudinit.distros.ubuntu.Distro.install_packages', - 'm_install_packages' + "cloudinit.distros.ubuntu.Distro.install_packages", + "m_install_packages", ) def test_handler_skips_empty_landscape_cloudconfig(self): """Empty landscape cloud-config section does no work.""" - mycloud = get_cloud('ubuntu') + mycloud = get_cloud("ubuntu") mycloud.distro = mock.MagicMock() - cfg = {'landscape': {}} - cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) + cfg = {"landscape": {}} + cc_landscape.handle("notimportant", cfg, mycloud, LOG, None) self.assertFalse(mycloud.distro.install_packages.called) def test_handler_error_on_invalid_landscape_type(self): """Raise an error when landscape configuraiton option is invalid.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': 'wrongtype'} + mycloud = get_cloud("ubuntu") + cfg = {"landscape": "wrongtype"} with self.assertRaises(RuntimeError) as context_manager: - cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) + cc_landscape.handle("notimportant", cfg, mycloud, LOG, None) self.assertIn( "'landscape' key existed in config, but not a dict", - str(context_manager.exception)) + str(context_manager.exception), + ) - @mock.patch('cloudinit.config.cc_landscape.subp') + @mock.patch("cloudinit.config.cc_landscape.subp") def test_handler_restarts_landscape_client(self, m_subp): """handler restarts lansdscape-client after install.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} + mycloud = get_cloud("ubuntu") + cfg = {"landscape": {"client": {}}} wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + "cloudinit.config.cc_landscape", + {"LSC_CLIENT_CFG_FILE": {"new": self.conf}}, + cc_landscape.handle, + "notimportant", + cfg, + mycloud, + LOG, + None, + ) self.assertEqual( - [mock.call(['service', 'landscape-client', 'restart'])], - m_subp.subp.call_args_list) + [mock.call(["service", "landscape-client", "restart"])], + m_subp.subp.call_args_list, + ) def test_handler_installs_client_and_creates_config_file(self): """Write landscape client.conf and install landscape-client.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client'}} + mycloud = get_cloud("ubuntu") + cfg = {"landscape": {"client": {}}} + expected = { + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", + } + } mycloud.distro = mock.MagicMock() wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}, - 'LS_DEFAULT_FILE': {'new': self.default_file}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + "cloudinit.config.cc_landscape", + { + "LSC_CLIENT_CFG_FILE": {"new": self.conf}, + "LS_DEFAULT_FILE": {"new": self.default_file}, + }, + cc_landscape.handle, + "notimportant", + cfg, + mycloud, + LOG, + None, + ) self.assertEqual( - [mock.call('landscape-client')], - mycloud.distro.install_packages.call_args) + [mock.call("landscape-client")], + mycloud.distro.install_packages.call_args, + ) self.assertEqual(expected, dict(ConfigObj(self.conf))) self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) + "Wrote landscape config file to {0}".format(self.conf), + self.logs.getvalue(), + ) default_content = util.load_file(self.default_file) - self.assertEqual('RUN=1\n', default_content) + self.assertEqual("RUN=1\n", default_content) def test_handler_writes_merged_client_config_file_with_defaults(self): """Merge and write options from LSC_CLIENT_CFG_FILE with defaults.""" # Write existing sparse client.conf file - util.write_file(self.conf, '[client]\ncomputer_title = My PC\n') - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client', - 'computer_title': 'My PC'}} + util.write_file(self.conf, "[client]\ncomputer_title = My PC\n") + mycloud = get_cloud("ubuntu") + cfg = {"landscape": {"client": {}}} + expected = { + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", + "computer_title": "My PC", + } + } wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + "cloudinit.config.cc_landscape", + {"LSC_CLIENT_CFG_FILE": {"new": self.conf}}, + cc_landscape.handle, + "notimportant", + cfg, + mycloud, + LOG, + None, + ) self.assertEqual(expected, dict(ConfigObj(self.conf))) self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) + "Wrote landscape config file to {0}".format(self.conf), + self.logs.getvalue(), + ) def test_handler_writes_merged_provided_cloudconfig_with_defaults(self): """Merge and write options from cloud-config options with defaults.""" # Write empty sparse client.conf file - util.write_file(self.conf, '') - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {'computer_title': 'My PC'}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client', - 'computer_title': 'My PC'}} + util.write_file(self.conf, "") + mycloud = get_cloud("ubuntu") + cfg = {"landscape": {"client": {"computer_title": "My PC"}}} + expected = { + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", + "computer_title": "My PC", + } + } wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + "cloudinit.config.cc_landscape", + {"LSC_CLIENT_CFG_FILE": {"new": self.conf}}, + cc_landscape.handle, + "notimportant", + cfg, + mycloud, + LOG, + None, + ) self.assertEqual(expected, dict(ConfigObj(self.conf))) self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) + "Wrote landscape config file to {0}".format(self.conf), + self.logs.getvalue(), + ) diff --git a/tests/unittests/config/test_cc_locale.py b/tests/unittests/config/test_cc_locale.py index 6cd95a29..7190bc68 100644 --- a/tests/unittests/config/test_cc_locale.py +++ b/tests/unittests/config/test_cc_locale.py @@ -8,21 +8,19 @@ import os import shutil import tempfile from io import BytesIO -from configobj import ConfigObj from unittest import mock +from configobj import ConfigObj + from cloudinit import util from cloudinit.config import cc_locale from tests.unittests import helpers as t_help - from tests.unittests.util import get_cloud - LOG = logging.getLogger(__name__) class TestLocale(t_help.FilesystemMockingTestCase): - def setUp(self): super(TestLocale, self).setUp() self.new_root = tempfile.mkdtemp() @@ -30,35 +28,37 @@ class TestLocale(t_help.FilesystemMockingTestCase): self.patchUtils(self.new_root) def test_set_locale_arch(self): - locale = 'en_GB.UTF-8' - locale_configfile = '/etc/invalid-locale-path' + locale = "en_GB.UTF-8" + locale_configfile = "/etc/invalid-locale-path" cfg = { - 'locale': locale, - 'locale_configfile': locale_configfile, + "locale": locale, + "locale_configfile": locale_configfile, } - cc = get_cloud('arch') - - with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp: - with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG: - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_LOG.assert_called_with('Invalid locale_configfile %s, ' - 'only supported value is ' - '/etc/locale.conf', - locale_configfile) + cc = get_cloud("arch") + + with mock.patch("cloudinit.distros.arch.subp.subp") as m_subp: + with mock.patch("cloudinit.distros.arch.LOG.warning") as m_LOG: + cc_locale.handle("cc_locale", cfg, cc, LOG, []) + m_LOG.assert_called_with( + "Invalid locale_configfile %s, " + "only supported value is " + "/etc/locale.conf", + locale_configfile, + ) contents = util.load_file(cc.distro.locale_gen_fn) - self.assertIn('%s UTF-8' % locale, contents) - m_subp.assert_called_with(['localectl', - 'set-locale', - locale], capture=False) + self.assertIn("%s UTF-8" % locale, contents) + m_subp.assert_called_with( + ["localectl", "set-locale", locale], capture=False + ) def test_set_locale_sles(self): cfg = { - 'locale': 'My.Locale', + "locale": "My.Locale", } - cc = get_cloud('sles') - cc_locale.handle('cc_locale', cfg, cc, LOG, []) + cc = get_cloud("sles") + cc_locale.handle("cc_locale", cfg, cc, LOG, []) if cc.distro.uses_systemd(): locale_conf = cc.distro.systemd_locale_conf_fn else: @@ -66,51 +66,58 @@ class TestLocale(t_help.FilesystemMockingTestCase): contents = util.load_file(locale_conf, decode=False) n_cfg = ConfigObj(BytesIO(contents)) if cc.distro.uses_systemd(): - self.assertEqual({'LANG': cfg['locale']}, dict(n_cfg)) + self.assertEqual({"LANG": cfg["locale"]}, dict(n_cfg)) else: - self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg)) + self.assertEqual({"RC_LANG": cfg["locale"]}, dict(n_cfg)) def test_set_locale_sles_default(self): cfg = {} - cc = get_cloud('sles') - cc_locale.handle('cc_locale', cfg, cc, LOG, []) + cc = get_cloud("sles") + cc_locale.handle("cc_locale", cfg, cc, LOG, []) if cc.distro.uses_systemd(): locale_conf = cc.distro.systemd_locale_conf_fn - keyname = 'LANG' + keyname = "LANG" else: locale_conf = cc.distro.locale_conf_fn - keyname = 'RC_LANG' + keyname = "RC_LANG" contents = util.load_file(locale_conf, decode=False) n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({keyname: 'en_US.UTF-8'}, dict(n_cfg)) + self.assertEqual({keyname: "en_US.UTF-8"}, dict(n_cfg)) def test_locale_update_config_if_different_than_default(self): """Test cc_locale writes updates conf if different than default""" locale_conf = os.path.join(self.new_root, "etc/default/locale") util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n') - cfg = {'locale': 'C.UTF-8'} - cc = get_cloud('ubuntu') - with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp: - with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN', - locale_conf): - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_subp.assert_called_with(['update-locale', - '--locale-file=%s' % locale_conf, - 'LANG=C.UTF-8'], capture=False) + cfg = {"locale": "C.UTF-8"} + cc = get_cloud("ubuntu") + with mock.patch("cloudinit.distros.debian.subp.subp") as m_subp: + with mock.patch( + "cloudinit.distros.debian.LOCALE_CONF_FN", locale_conf + ): + cc_locale.handle("cc_locale", cfg, cc, LOG, []) + m_subp.assert_called_with( + [ + "update-locale", + "--locale-file=%s" % locale_conf, + "LANG=C.UTF-8", + ], + capture=False, + ) def test_locale_rhel_defaults_en_us_utf8(self): """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback""" cfg = {} - cc = get_cloud('rhel') - update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file' - with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd: + cc = get_cloud("rhel") + update_sysconfig = "cloudinit.distros.rhel_util.update_sysconfig_file" + with mock.patch.object(cc.distro, "uses_systemd") as m_use_sd: m_use_sd.return_value = True with mock.patch(update_sysconfig) as m_update_syscfg: - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_update_syscfg.assert_called_with('/etc/locale.conf', - {'LANG': 'en_US.UTF-8'}) + cc_locale.handle("cc_locale", cfg, cc, LOG, []) + m_update_syscfg.assert_called_with( + "/etc/locale.conf", {"LANG": "en_US.UTF-8"} + ) # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py index 887987c0..720274d6 100644 --- a/tests/unittests/config/test_cc_lxd.py +++ b/tests/unittests/config/test_cc_lxd.py @@ -3,7 +3,6 @@ from unittest import mock from cloudinit.config import cc_lxd from tests.unittests import helpers as t_help - from tests.unittests.util import get_cloud @@ -12,11 +11,11 @@ class TestLxd(t_help.CiTestCase): with_logs = True lxd_cfg = { - 'lxd': { - 'init': { - 'network_address': '0.0.0.0', - 'storage_backend': 'zfs', - 'storage_pool': 'poolname', + "lxd": { + "init": { + "network_address": "0.0.0.0", + "storage_backend": "zfs", + "storage_pool": "poolname", } } } @@ -27,16 +26,26 @@ class TestLxd(t_help.CiTestCase): cc = get_cloud() mock_subp.which.return_value = True m_maybe_clean.return_value = None - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, []) self.assertTrue(mock_subp.which.called) # no bridge config, so maybe_cleanup should not be called. self.assertFalse(m_maybe_clean.called) self.assertEqual( - [mock.call(['lxd', 'waitready', '--timeout=300']), - mock.call( - ['lxd', 'init', '--auto', '--network-address=0.0.0.0', - '--storage-backend=zfs', '--storage-pool=poolname'])], - mock_subp.subp.call_args_list) + [ + mock.call(["lxd", "waitready", "--timeout=300"]), + mock.call( + [ + "lxd", + "init", + "--auto", + "--network-address=0.0.0.0", + "--storage-backend=zfs", + "--storage-pool=poolname", + ] + ), + ], + mock_subp.subp.call_args_list, + ) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") @@ -44,20 +53,20 @@ class TestLxd(t_help.CiTestCase): cc = get_cloud() cc.distro = mock.MagicMock() mock_subp.which.return_value = None - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) - self.assertNotIn('WARN', self.logs.getvalue()) + cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, []) + self.assertNotIn("WARN", self.logs.getvalue()) self.assertTrue(cc.distro.install_packages.called) - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, []) self.assertFalse(m_maybe_clean.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux']) + self.assertEqual(sorted(install_pkg), ["lxd", "zfsutils-linux"]) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") def test_no_init_does_nothing(self, mock_subp, m_maybe_clean): cc = get_cloud() cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) + cc_lxd.handle("cc_lxd", {"lxd": {}}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_subp.subp.called) self.assertFalse(m_maybe_clean.called) @@ -67,118 +76,150 @@ class TestLxd(t_help.CiTestCase): def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean): cc = get_cloud() cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) + cc_lxd.handle("cc_lxd", {"package_update": True}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_subp.subp.called) self.assertFalse(m_maybe_clean.called) def test_lxd_debconf_new_full(self): - data = {"mode": "new", - "name": "testbr0", - "ipv4_address": "10.0.8.1", - "ipv4_netmask": "24", - "ipv4_dhcp_first": "10.0.8.2", - "ipv4_dhcp_last": "10.0.8.254", - "ipv4_dhcp_leases": "250", - "ipv4_nat": "true", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true", - "domain": "lxd"} + data = { + "mode": "new", + "name": "testbr0", + "ipv4_address": "10.0.8.1", + "ipv4_netmask": "24", + "ipv4_dhcp_first": "10.0.8.2", + "ipv4_dhcp_last": "10.0.8.254", + "ipv4_dhcp_leases": "250", + "ipv4_nat": "true", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + "domain": "lxd", + } self.assertEqual( cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "true", - "lxd/bridge-name": "testbr0", - "lxd/bridge-ipv4": "true", - "lxd/bridge-ipv4-address": "10.0.8.1", - "lxd/bridge-ipv4-netmask": "24", - "lxd/bridge-ipv4-dhcp-first": "10.0.8.2", - "lxd/bridge-ipv4-dhcp-last": "10.0.8.254", - "lxd/bridge-ipv4-dhcp-leases": "250", - "lxd/bridge-ipv4-nat": "true", - "lxd/bridge-ipv6": "true", - "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", - "lxd/bridge-ipv6-netmask": "64", - "lxd/bridge-ipv6-nat": "true", - "lxd/bridge-domain": "lxd"}) + { + "lxd/setup-bridge": "true", + "lxd/bridge-name": "testbr0", + "lxd/bridge-ipv4": "true", + "lxd/bridge-ipv4-address": "10.0.8.1", + "lxd/bridge-ipv4-netmask": "24", + "lxd/bridge-ipv4-dhcp-first": "10.0.8.2", + "lxd/bridge-ipv4-dhcp-last": "10.0.8.254", + "lxd/bridge-ipv4-dhcp-leases": "250", + "lxd/bridge-ipv4-nat": "true", + "lxd/bridge-ipv6": "true", + "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", + "lxd/bridge-ipv6-netmask": "64", + "lxd/bridge-ipv6-nat": "true", + "lxd/bridge-domain": "lxd", + }, + ) def test_lxd_debconf_new_partial(self): - data = {"mode": "new", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true"} + data = { + "mode": "new", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + } self.assertEqual( cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "true", - "lxd/bridge-ipv6": "true", - "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", - "lxd/bridge-ipv6-netmask": "64", - "lxd/bridge-ipv6-nat": "true"}) + { + "lxd/setup-bridge": "true", + "lxd/bridge-ipv6": "true", + "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", + "lxd/bridge-ipv6-netmask": "64", + "lxd/bridge-ipv6-nat": "true", + }, + ) def test_lxd_debconf_existing(self): - data = {"mode": "existing", - "name": "testbr0"} + data = {"mode": "existing", "name": "testbr0"} self.assertEqual( cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "false", - "lxd/use-existing-bridge": "true", - "lxd/bridge-name": "testbr0"}) + { + "lxd/setup-bridge": "false", + "lxd/use-existing-bridge": "true", + "lxd/bridge-name": "testbr0", + }, + ) def test_lxd_debconf_none(self): data = {"mode": "none"} self.assertEqual( cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "false", - "lxd/bridge-name": ""}) + {"lxd/setup-bridge": "false", "lxd/bridge-name": ""}, + ) def test_lxd_cmd_new_full(self): - data = {"mode": "new", - "name": "testbr0", - "ipv4_address": "10.0.8.1", - "ipv4_netmask": "24", - "ipv4_dhcp_first": "10.0.8.2", - "ipv4_dhcp_last": "10.0.8.254", - "ipv4_dhcp_leases": "250", - "ipv4_nat": "true", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true", - "domain": "lxd"} + data = { + "mode": "new", + "name": "testbr0", + "ipv4_address": "10.0.8.1", + "ipv4_netmask": "24", + "ipv4_dhcp_first": "10.0.8.2", + "ipv4_dhcp_last": "10.0.8.254", + "ipv4_dhcp_leases": "250", + "ipv4_nat": "true", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + "domain": "lxd", + } self.assertEqual( cc_lxd.bridge_to_cmd(data), - (["network", "create", "testbr0", - "ipv4.address=10.0.8.1/24", "ipv4.nat=true", - "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", - "ipv6.address=fd98:9e0:3744::1/64", - "ipv6.nat=true", "dns.domain=lxd"], - ["network", "attach-profile", - "testbr0", "default", "eth0"])) + ( + [ + "network", + "create", + "testbr0", + "ipv4.address=10.0.8.1/24", + "ipv4.nat=true", + "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", + "ipv6.address=fd98:9e0:3744::1/64", + "ipv6.nat=true", + "dns.domain=lxd", + ], + ["network", "attach-profile", "testbr0", "default", "eth0"], + ), + ) def test_lxd_cmd_new_partial(self): - data = {"mode": "new", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true"} + data = { + "mode": "new", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + } self.assertEqual( cc_lxd.bridge_to_cmd(data), - (["network", "create", "lxdbr0", "ipv4.address=none", - "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"], - ["network", "attach-profile", - "lxdbr0", "default", "eth0"])) + ( + [ + "network", + "create", + "lxdbr0", + "ipv4.address=none", + "ipv6.address=fd98:9e0:3744::1/64", + "ipv6.nat=true", + ], + ["network", "attach-profile", "lxdbr0", "default", "eth0"], + ), + ) def test_lxd_cmd_existing(self): - data = {"mode": "existing", - "name": "testbr0"} + data = {"mode": "existing", "name": "testbr0"} self.assertEqual( cc_lxd.bridge_to_cmd(data), - (None, ["network", "attach-profile", - "testbr0", "default", "eth0"])) + ( + None, + ["network", "attach-profile", "testbr0", "default", "eth0"], + ), + ) def test_lxd_cmd_none(self): data = {"mode": "none"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (None, None)) + self.assertEqual(cc_lxd.bridge_to_cmd(data), (None, None)) class TestLxdMaybeCleanupDefault(t_help.CiTestCase): @@ -190,21 +231,24 @@ class TestLxdMaybeCleanupDefault(t_help.CiTestCase): def test_network_other_than_default_not_deleted(self, m_lxc): """deletion or removal should only occur if bridge is default.""" cc_lxd.maybe_cleanup_default( - net_name="lxdbr1", did_init=True, create=True, attach=True) + net_name="lxdbr1", did_init=True, create=True, attach=True + ) m_lxc.assert_not_called() @mock.patch("cloudinit.config.cc_lxd._lxc") def test_did_init_false_does_not_delete(self, m_lxc): """deletion or removal should only occur if did_init is True.""" cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=False, create=True, attach=True) + net_name=self.defnet, did_init=False, create=True, attach=True + ) m_lxc.assert_not_called() @mock.patch("cloudinit.config.cc_lxd._lxc") def test_network_deleted_if_create_true(self, m_lxc): """deletion of network should occur if create is True.""" cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=True, create=True, attach=False) + net_name=self.defnet, did_init=True, create=True, attach=False + ) m_lxc.assert_called_with(["network", "delete", self.defnet]) @mock.patch("cloudinit.config.cc_lxd._lxc") @@ -213,10 +257,16 @@ class TestLxdMaybeCleanupDefault(t_help.CiTestCase): nic_name = "my_nic" profile = "my_profile" cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=True, create=False, attach=True, - profile=profile, nic_name=nic_name) + net_name=self.defnet, + did_init=True, + create=False, + attach=True, + profile=profile, + nic_name=nic_name, + ) m_lxc.assert_called_once_with( - ["profile", "device", "remove", profile, nic_name]) + ["profile", "device", "remove", profile, nic_name] + ) # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_mcollective.py b/tests/unittests/config/test_cc_mcollective.py index fff777b6..5cbdeb76 100644 --- a/tests/unittests/config/test_cc_mcollective.py +++ b/tests/unittests/config/test_cc_mcollective.py @@ -1,15 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. -import configobj import logging import os import shutil import tempfile from io import BytesIO -from cloudinit import (util) +import configobj + +from cloudinit import util from cloudinit.config import cc_mcollective from tests.unittests import helpers as t_help - from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -47,83 +47,92 @@ class TestConfig(t_help.FilesystemMockingTestCase): self.addCleanup(shutil.rmtree, self.tmp) # "./": make os.path.join behave correctly with abs path as second arg self.server_cfg = os.path.join( - self.tmp, "./" + cc_mcollective.SERVER_CFG) + self.tmp, "./" + cc_mcollective.SERVER_CFG + ) self.pubcert_file = os.path.join( - self.tmp, "./" + cc_mcollective.PUBCERT_FILE) + self.tmp, "./" + cc_mcollective.PUBCERT_FILE + ) self.pricert_file = os.path.join( - self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE) + self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE + ) def test_basic_config(self): cfg = { - 'mcollective': { - 'conf': { - 'loglevel': 'debug', - 'connector': 'rabbitmq', - 'logfile': '/var/log/mcollective.log', - 'ttl': '4294957', - 'collectives': 'mcollective', - 'main_collective': 'mcollective', - 'securityprovider': 'psk', - 'daemonize': '1', - 'factsource': 'yaml', - 'direct_addressing': '1', - 'plugin.psk': 'unset', - 'libdir': '/usr/share/mcollective/plugins', - 'identity': '1', + "mcollective": { + "conf": { + "loglevel": "debug", + "connector": "rabbitmq", + "logfile": "/var/log/mcollective.log", + "ttl": "4294957", + "collectives": "mcollective", + "main_collective": "mcollective", + "securityprovider": "psk", + "daemonize": "1", + "factsource": "yaml", + "direct_addressing": "1", + "plugin.psk": "unset", + "libdir": "/usr/share/mcollective/plugins", + "identity": "1", }, }, } - expected = cfg['mcollective']['conf'] + expected = cfg["mcollective"]["conf"] self.patchUtils(self.tmp) - cc_mcollective.configure(cfg['mcollective']['conf']) + cc_mcollective.configure(cfg["mcollective"]["conf"]) contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False) contents = configobj.ConfigObj(BytesIO(contents)) self.assertEqual(expected, dict(contents)) def test_existing_config_is_saved(self): - cfg = {'loglevel': 'warn'} + cfg = {"loglevel": "warn"} util.write_file(self.server_cfg, STOCK_CONFIG) cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) self.assertTrue(os.path.exists(self.server_cfg)) self.assertTrue(os.path.exists(self.server_cfg + ".old")) - self.assertEqual(util.load_file(self.server_cfg + ".old"), - STOCK_CONFIG) + self.assertEqual( + util.load_file(self.server_cfg + ".old"), STOCK_CONFIG + ) def test_existing_updated(self): - cfg = {'loglevel': 'warn'} + cfg = {"loglevel": "warn"} util.write_file(self.server_cfg, STOCK_CONFIG) cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) cfgobj = configobj.ConfigObj(self.server_cfg) - self.assertEqual(cfg['loglevel'], cfgobj['loglevel']) + self.assertEqual(cfg["loglevel"], cfgobj["loglevel"]) def test_certificats_written(self): # check public-cert and private-cert keys in config get written - cfg = {'loglevel': 'debug', - 'public-cert': "this is my public-certificate", - 'private-cert': "secret private certificate"} + cfg = { + "loglevel": "debug", + "public-cert": "this is my public-certificate", + "private-cert": "secret private certificate", + } cc_mcollective.configure( - config=cfg, server_cfg=self.server_cfg, - pricert_file=self.pricert_file, pubcert_file=self.pubcert_file) + config=cfg, + server_cfg=self.server_cfg, + pricert_file=self.pricert_file, + pubcert_file=self.pubcert_file, + ) found = configobj.ConfigObj(self.server_cfg) # make sure these didnt get written in - self.assertFalse('public-cert' in found) - self.assertFalse('private-cert' in found) + self.assertFalse("public-cert" in found) + self.assertFalse("private-cert" in found) # these need updating to the specified paths - self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file) - self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file) + self.assertEqual(found["plugin.ssl_server_public"], self.pubcert_file) + self.assertEqual(found["plugin.ssl_server_private"], self.pricert_file) # and the security provider should be ssl - self.assertEqual(found['securityprovider'], 'ssl') + self.assertEqual(found["securityprovider"], "ssl") self.assertEqual( - util.load_file(self.pricert_file), cfg['private-cert']) - self.assertEqual( - util.load_file(self.pubcert_file), cfg['public-cert']) + util.load_file(self.pricert_file), cfg["private-cert"] + ) + self.assertEqual(util.load_file(self.pubcert_file), cfg["public-cert"]) class TestHandler(t_help.TestCase): @@ -133,14 +142,17 @@ class TestHandler(t_help.TestCase): cc = get_cloud() cc.distro = t_help.mock.MagicMock() mock_util.load_file.return_value = b"" - mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}} - cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, []) + mycfg = {"mcollective": {"conf": {"loglevel": "debug"}}} + cc_mcollective.handle("cc_mcollective", mycfg, cc, LOG, []) self.assertTrue(cc.distro.install_packages.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(install_pkg, ('mcollective',)) + self.assertEqual(install_pkg, ("mcollective",)) self.assertTrue(mock_subp.subp.called) - self.assertEqual(mock_subp.subp.call_args_list[0][0][0], - ['service', 'mcollective', 'restart']) + self.assertEqual( + mock_subp.subp.call_args_list[0][0][0], + ["service", "mcollective", "restart"], + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py index fc65f108..084faacd 100644 --- a/tests/unittests/config/test_cc_mounts.py +++ b/tests/unittests/config/test_cc_mounts.py @@ -1,302 +1,363 @@ # This file is part of cloud-init. See LICENSE file for license information. -import pytest import os.path from unittest import mock -from tests.unittests import helpers as test_helpers +import pytest + from cloudinit.config import cc_mounts from cloudinit.config.cc_mounts import create_swapfile from cloudinit.subp import ProcessExecutionError +from tests.unittests import helpers as test_helpers -M_PATH = 'cloudinit.config.cc_mounts.' +M_PATH = "cloudinit.config.cc_mounts." class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): - def setUp(self): super(TestSanitizeDevname, self).setUp() self.new_root = self.tmp_dir() self.patchOS(self.new_root) def _touch(self, path): - path = os.path.join(self.new_root, path.lstrip('/')) + path = os.path.join(self.new_root, path.lstrip("/")) basedir = os.path.dirname(path) if not os.path.exists(basedir): os.makedirs(basedir) - open(path, 'a').close() + open(path, "a").close() def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) + directory = os.path.join(self.new_root, directory.lstrip("/")) if not os.path.exists(directory): os.makedirs(directory) def mock_existence_of_disk(self, disk_path): self._touch(disk_path) - self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1])) + self._makedirs(os.path.join("/sys/block", disk_path.split("/")[-1])) def mock_existence_of_partition(self, disk_path, partition_number): self.mock_existence_of_disk(disk_path) self._touch(disk_path + str(partition_number)) - disk_name = disk_path.split('/')[-1] - self._makedirs(os.path.join('/sys/block', - disk_name, - disk_name + str(partition_number))) + disk_name = disk_path.split("/")[-1] + self._makedirs( + os.path.join( + "/sys/block", disk_name, disk_name + str(partition_number) + ) + ) def test_existent_full_disk_path_is_returned(self): - disk_path = '/dev/sda' + disk_path = "/dev/sda" self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname(disk_path, - lambda x: None, - mock.Mock())) + self.assertEqual( + disk_path, + cc_mounts.sanitize_devname(disk_path, lambda x: None, mock.Mock()), + ) def test_existent_disk_name_returns_full_path(self): - disk_name = 'sda' - disk_path = '/dev/' + disk_name + disk_name = "sda" + disk_path = "/dev/" + disk_name self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname(disk_name, - lambda x: None, - mock.Mock())) + self.assertEqual( + disk_path, + cc_mounts.sanitize_devname(disk_name, lambda x: None, mock.Mock()), + ) def test_existent_meta_disk_is_returned(self): - actual_disk_path = '/dev/sda' + actual_disk_path = "/dev/sda" self.mock_existence_of_disk(actual_disk_path) self.assertEqual( actual_disk_path, - cc_mounts.sanitize_devname('ephemeral0', - lambda x: actual_disk_path, - mock.Mock())) + cc_mounts.sanitize_devname( + "ephemeral0", lambda x: actual_disk_path, mock.Mock() + ), + ) def test_existent_meta_partition_is_returned(self): - disk_name, partition_part = '/dev/sda', '1' + disk_name, partition_part = "/dev/sda", "1" actual_partition_path = disk_name + partition_part self.mock_existence_of_partition(disk_name, partition_part) self.assertEqual( actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.1', - lambda x: disk_name, - mock.Mock())) + cc_mounts.sanitize_devname( + "ephemeral0.1", lambda x: disk_name, mock.Mock() + ), + ) def test_existent_meta_partition_with_p_is_returned(self): - disk_name, partition_part = '/dev/sda', 'p1' + disk_name, partition_part = "/dev/sda", "p1" actual_partition_path = disk_name + partition_part self.mock_existence_of_partition(disk_name, partition_part) self.assertEqual( actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.1', - lambda x: disk_name, - mock.Mock())) + cc_mounts.sanitize_devname( + "ephemeral0.1", lambda x: disk_name, mock.Mock() + ), + ) def test_first_partition_returned_if_existent_disk_is_partitioned(self): - disk_name, partition_part = '/dev/sda', '1' + disk_name, partition_part = "/dev/sda", "1" actual_partition_path = disk_name + partition_part self.mock_existence_of_partition(disk_name, partition_part) self.assertEqual( actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0', - lambda x: disk_name, - mock.Mock())) + cc_mounts.sanitize_devname( + "ephemeral0", lambda x: disk_name, mock.Mock() + ), + ) def test_nth_partition_returned_if_requested(self): - disk_name, partition_part = '/dev/sda', '3' + disk_name, partition_part = "/dev/sda", "3" actual_partition_path = disk_name + partition_part self.mock_existence_of_partition(disk_name, partition_part) self.assertEqual( actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.3', - lambda x: disk_name, - mock.Mock())) + cc_mounts.sanitize_devname( + "ephemeral0.3", lambda x: disk_name, mock.Mock() + ), + ) def test_transformer_returning_none_returns_none(self): self.assertIsNone( cc_mounts.sanitize_devname( - 'ephemeral0', lambda x: None, mock.Mock())) + "ephemeral0", lambda x: None, mock.Mock() + ) + ) def test_missing_device_returns_none(self): self.assertIsNone( - cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock())) + cc_mounts.sanitize_devname("/dev/sda", None, mock.Mock()) + ) def test_missing_sys_returns_none(self): - disk_path = '/dev/sda' + disk_path = "/dev/sda" self._makedirs(disk_path) self.assertIsNone( - cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) + cc_mounts.sanitize_devname(disk_path, None, mock.Mock()) + ) def test_existent_disk_but_missing_partition_returns_none(self): - disk_path = '/dev/sda' + disk_path = "/dev/sda" self.mock_existence_of_disk(disk_path) self.assertIsNone( cc_mounts.sanitize_devname( - 'ephemeral0.1', lambda x: disk_path, mock.Mock())) + "ephemeral0.1", lambda x: disk_path, mock.Mock() + ) + ) def test_network_device_returns_network_device(self): - disk_path = 'netdevice:/path' + disk_path = "netdevice:/path" self.assertEqual( - disk_path, - cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) + disk_path, cc_mounts.sanitize_devname(disk_path, None, mock.Mock()) + ) def test_device_aliases_remapping(self): - disk_path = '/dev/sda' + disk_path = "/dev/sda" self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname('mydata', - lambda x: None, - mock.Mock(), - {'mydata': disk_path})) + self.assertEqual( + disk_path, + cc_mounts.sanitize_devname( + "mydata", lambda x: None, mock.Mock(), {"mydata": disk_path} + ), + ) class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): - def setUp(self): super(TestSwapFileCreation, self).setUp() self.new_root = self.tmp_dir() self.patchOS(self.new_root) - self.fstab_path = os.path.join(self.new_root, 'etc/fstab') - self.swap_path = os.path.join(self.new_root, 'swap.img') - self._makedirs('/etc') + self.fstab_path = os.path.join(self.new_root, "etc/fstab") + self.swap_path = os.path.join(self.new_root, "swap.img") + self._makedirs("/etc") - self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', - 'mock_fstab_path', - self.fstab_path, - autospec=False) - - self.add_patch('cloudinit.config.cc_mounts.subp.subp', - 'm_subp_subp') + self.add_patch( + "cloudinit.config.cc_mounts.FSTAB_PATH", + "mock_fstab_path", + self.fstab_path, + autospec=False, + ) - self.add_patch('cloudinit.config.cc_mounts.util.mounts', - 'mock_util_mounts', - return_value={ - '/dev/sda1': {'fstype': 'ext4', - 'mountpoint': '/', - 'opts': 'rw,relatime,discard' - }}) + self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp") + + self.add_patch( + "cloudinit.config.cc_mounts.util.mounts", + "mock_util_mounts", + return_value={ + "/dev/sda1": { + "fstype": "ext4", + "mountpoint": "/", + "opts": "rw,relatime,discard", + } + }, + ) self.mock_cloud = mock.Mock() self.mock_log = mock.Mock() self.mock_cloud.device_name_to_device = self.device_name_to_device self.cc = { - 'swap': { - 'filename': self.swap_path, - 'size': '512', - 'maxsize': '512'}} + "swap": { + "filename": self.swap_path, + "size": "512", + "maxsize": "512", + } + } def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) + directory = os.path.join(self.new_root, directory.lstrip("/")) if not os.path.exists(directory): os.makedirs(directory) def device_name_to_device(self, path): - if path == 'swap': + if path == "swap": return self.swap_path else: dev = None return dev - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version, - m_get_mount_info): + @mock.patch("cloudinit.util.get_mount_info") + @mock.patch("cloudinit.util.kernel_version") + def test_swap_creation_method_fallocate_on_xfs( + self, m_kernel_version, m_get_mount_info + ): m_kernel_version.return_value = (4, 20) m_get_mount_info.return_value = ["", "xfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_xfs(self, m_kernel_version, - m_get_mount_info): + self.m_subp_subp.assert_has_calls( + [ + mock.call( + ["fallocate", "-l", "0M", self.swap_path], capture=True + ), + mock.call(["mkswap", self.swap_path]), + mock.call(["swapon", "-a"]), + ] + ) + + @mock.patch("cloudinit.util.get_mount_info") + @mock.patch("cloudinit.util.kernel_version") + def test_swap_creation_method_xfs( + self, m_kernel_version, m_get_mount_info + ): m_kernel_version.return_value = (3, 18) m_get_mount_info.return_value = ["", "xfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_btrfs(self, m_kernel_version, - m_get_mount_info): + self.m_subp_subp.assert_has_calls( + [ + mock.call( + [ + "dd", + "if=/dev/zero", + "of=" + self.swap_path, + "bs=1M", + "count=0", + ], + capture=True, + ), + mock.call(["mkswap", self.swap_path]), + mock.call(["swapon", "-a"]), + ] + ) + + @mock.patch("cloudinit.util.get_mount_info") + @mock.patch("cloudinit.util.kernel_version") + def test_swap_creation_method_btrfs( + self, m_kernel_version, m_get_mount_info + ): m_kernel_version.return_value = (4, 20) m_get_mount_info.return_value = ["", "btrfs"] cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_ext4(self, m_kernel_version, - m_get_mount_info): + self.m_subp_subp.assert_has_calls( + [ + mock.call( + [ + "dd", + "if=/dev/zero", + "of=" + self.swap_path, + "bs=1M", + "count=0", + ], + capture=True, + ), + mock.call(["mkswap", self.swap_path]), + mock.call(["swapon", "-a"]), + ] + ) + + @mock.patch("cloudinit.util.get_mount_info") + @mock.patch("cloudinit.util.kernel_version") + def test_swap_creation_method_ext4( + self, m_kernel_version, m_get_mount_info + ): m_kernel_version.return_value = (5, 14) m_get_mount_info.return_value = ["", "ext4"] cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) + self.m_subp_subp.assert_has_calls( + [ + mock.call( + ["fallocate", "-l", "0M", self.swap_path], capture=True + ), + mock.call(["mkswap", self.swap_path]), + mock.call(["swapon", "-a"]), + ] + ) class TestFstabHandling(test_helpers.FilesystemMockingTestCase): - swap_path = '/dev/sdb1' + swap_path = "/dev/sdb1" def setUp(self): super(TestFstabHandling, self).setUp() self.new_root = self.tmp_dir() self.patchOS(self.new_root) - self.fstab_path = os.path.join(self.new_root, 'etc/fstab') - self._makedirs('/etc') - - self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', - 'mock_fstab_path', - self.fstab_path, - autospec=False) + self.fstab_path = os.path.join(self.new_root, "etc/fstab") + self._makedirs("/etc") - self.add_patch('cloudinit.config.cc_mounts._is_block_device', - 'mock_is_block_device', - return_value=True) + self.add_patch( + "cloudinit.config.cc_mounts.FSTAB_PATH", + "mock_fstab_path", + self.fstab_path, + autospec=False, + ) - self.add_patch('cloudinit.config.cc_mounts.subp.subp', - 'm_subp_subp') + self.add_patch( + "cloudinit.config.cc_mounts._is_block_device", + "mock_is_block_device", + return_value=True, + ) - self.add_patch('cloudinit.config.cc_mounts.util.mounts', - 'mock_util_mounts', - return_value={ - '/dev/sda1': {'fstype': 'ext4', - 'mountpoint': '/', - 'opts': 'rw,relatime,discard' - }}) + self.add_patch("cloudinit.config.cc_mounts.subp.subp", "m_subp_subp") + + self.add_patch( + "cloudinit.config.cc_mounts.util.mounts", + "mock_util_mounts", + return_value={ + "/dev/sda1": { + "fstype": "ext4", + "mountpoint": "/", + "opts": "rw,relatime,discard", + } + }, + ) self.mock_cloud = mock.Mock() self.mock_log = mock.Mock() self.mock_cloud.device_name_to_device = self.device_name_to_device def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) + directory = os.path.join(self.new_root, directory.lstrip("/")) if not os.path.exists(directory): os.makedirs(directory) def device_name_to_device(self, path): - if path == 'swap': + if path == "swap": return self.swap_path else: dev = None @@ -304,127 +365,126 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): return dev def test_no_fstab(self): - """ Handle images which do not include an fstab. """ + """Handle images which do not include an fstab.""" self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH)) fstab_expected_content = ( - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) + "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n" + % (self.swap_path,) ) cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: + with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) def test_swap_integrity(self): - '''Ensure that the swap file is correctly created and can + """Ensure that the swap file is correctly created and can swapon successfully. Fixing the corner case of: - kernel: swapon: swapfile has holes''' + kernel: swapon: swapfile has holes""" - fstab = '/swap.img swap swap defaults 0 0\n' + fstab = "/swap.img swap swap defaults 0 0\n" - with open(cc_mounts.FSTAB_PATH, 'w') as fd: + with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab) - cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']} + cc = {"swap": ["filename: /swap.img", "size: 512", "maxsize: 512"]} cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) def test_fstab_no_swap_device(self): - '''Ensure that cloud-init adds a discovered swap partition - to /etc/fstab.''' + """Ensure that cloud-init adds a discovered swap partition + to /etc/fstab.""" - fstab_original_content = '' + fstab_original_content = "" fstab_expected_content = ( - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) + "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n" + % (self.swap_path,) ) - with open(cc_mounts.FSTAB_PATH, 'w') as fd: + with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab_original_content) cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: + with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) def test_fstab_same_swap_device_already_configured(self): - '''Ensure that cloud-init will not add a swap device if the same - device already exists in /etc/fstab.''' + """Ensure that cloud-init will not add a swap device if the same + device already exists in /etc/fstab.""" - fstab_original_content = '%s swap swap defaults 0 0\n' % ( - self.swap_path,) + fstab_original_content = "%s swap swap defaults 0 0\n" % ( + self.swap_path, + ) fstab_expected_content = fstab_original_content - with open(cc_mounts.FSTAB_PATH, 'w') as fd: + with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab_original_content) cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: + with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) def test_fstab_alternate_swap_device_already_configured(self): - '''Ensure that cloud-init will add a discovered swap device to + """Ensure that cloud-init will add a discovered swap device to /etc/fstab even when there exists a swap definition on another - device.''' + device.""" - fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n' + fstab_original_content = "/dev/sdc1 swap swap defaults 0 0\n" fstab_expected_content = ( - fstab_original_content + - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) + fstab_original_content + + "%s\tnone\tswap\tsw,comment=cloudconfig\t0\t0\n" + % (self.swap_path,) ) - with open(cc_mounts.FSTAB_PATH, 'w') as fd: + with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab_original_content) cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: + with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) def test_no_change_fstab_sets_needs_mount_all(self): - '''verify unchanged fstab entries are mounted if not call mount -a''' + """verify unchanged fstab entries are mounted if not call mount -a""" fstab_original_content = ( - 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' - 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' - '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' + "LABEL=cloudimg-rootfs / ext4 defaults 0 0\n" + "LABEL=UEFI /boot/efi vfat defaults 0 0\n" + "/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n" ) fstab_expected_content = fstab_original_content - cc = { - 'mounts': [ - ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec'] - ] - } - with open(cc_mounts.FSTAB_PATH, 'w') as fd: + cc = {"mounts": [["/dev/vdb", "/mnt", "auto", "defaults,noexec"]]} + with open(cc_mounts.FSTAB_PATH, "w") as fd: fd.write(fstab_original_content) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: + with open(cc_mounts.FSTAB_PATH, "r") as fd: fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['mount', '-a']), - mock.call(['systemctl', 'daemon-reload'])]) + self.m_subp_subp.assert_has_calls( + [ + mock.call(["mount", "-a"]), + mock.call(["systemctl", "daemon-reload"]), + ] + ) class TestCreateSwapfile: - - @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other')) - @mock.patch(M_PATH + 'util.get_mount_info') - @mock.patch(M_PATH + 'subp.subp') + @pytest.mark.parametrize("fstype", ("xfs", "btrfs", "ext4", "other")) + @mock.patch(M_PATH + "util.get_mount_info") + @mock.patch(M_PATH + "subp.subp") def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir): swap_file = tmpdir.join("swap-file") fname = str(swap_file) # Some of the calls to subp.subp should create the swap file; this # roughly approximates that - m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') + m_subp.side_effect = lambda *args, **kwargs: swap_file.write("") m_get_mount_info.return_value = (mock.ANY, fstype) - create_swapfile(fname, '') - assert mock.call(['mkswap', fname]) in m_subp.call_args_list + create_swapfile(fname, "") + assert mock.call(["mkswap", fname]) in m_subp.call_args_list @mock.patch(M_PATH + "util.get_mount_info") @mock.patch(M_PATH + "subp.subp") @@ -458,4 +518,5 @@ class TestCreateSwapfile: msg = "fallocate swap creation failed, will attempt with dd" assert msg in caplog.text + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ntp.py b/tests/unittests/config/test_cc_ntp.py index 3426533a..7da82cee 100644 --- a/tests/unittests/config/test_cc_ntp.py +++ b/tests/unittests/config/test_cc_ntp.py @@ -5,14 +5,16 @@ import shutil from functools import partial from os.path import dirname -from cloudinit import (helpers, util) +from cloudinit import helpers, util from cloudinit.config import cc_ntp from tests.unittests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) - + CiTestCase, + FilesystemMockingTestCase, + mock, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud - NTP_TEMPLATE = """\ ## template: jinja servers {{servers}} @@ -35,18 +37,18 @@ class TestNtp(FilesystemMockingTestCase): def setUp(self): super(TestNtp, self).setUp() self.new_root = self.tmp_dir() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch("cloudinit.util.system_is_snappy", "m_snappy") self.m_snappy.return_value = False self.new_root = self.reRoot() self._get_cloud = partial( - get_cloud, - paths=helpers.Paths({'templates_dir': self.new_root}) + get_cloud, paths=helpers.Paths({"templates_dir": self.new_root}) ) def _get_template_path(self, template_name, distro, basepath=None): # ntp.conf.{distro} -> ntp.conf.debian.tmpl - template_fn = '{0}.tmpl'.format( - template_name.replace('{distro}', distro)) + template_fn = "{0}.tmpl".format( + template_name.replace("{distro}", distro) + ) if not basepath: basepath = self.new_root path = os.path.join(basepath, template_fn) @@ -55,25 +57,25 @@ class TestNtp(FilesystemMockingTestCase): def _generate_template(self, template=None): if not template: template = NTP_TEMPLATE - confpath = os.path.join(self.new_root, 'client.conf') - template_fn = os.path.join(self.new_root, 'client.conf.tmpl') + confpath = os.path.join(self.new_root, "client.conf") + template_fn = os.path.join(self.new_root, "client.conf.tmpl") util.write_file(template_fn, content=template) return (confpath, template_fn) def _mock_ntp_client_config(self, client=None, distro=None): if not client: - client = 'ntp' + client = "ntp" if not distro: - distro = 'ubuntu' + distro = "ubuntu" dcfg = cc_ntp.distro_ntp_client_configs(distro) - if client == 'systemd-timesyncd': + if client == "systemd-timesyncd": template = TIMESYNCD_TEMPLATE else: template = NTP_TEMPLATE (confpath, _template_fn) = self._generate_template(template=template) ntpconfig = copy.deepcopy(dcfg[client]) - ntpconfig['confpath'] = confpath - ntpconfig['template_name'] = os.path.basename(confpath) + ntpconfig["confpath"] = confpath + ntpconfig["template_name"] = os.path.basename(confpath) return ntpconfig @mock.patch("cloudinit.config.cc_ntp.subp") @@ -81,19 +83,21 @@ class TestNtp(FilesystemMockingTestCase): """ntp_install_client runs install_func when check_exe is absent.""" mock_subp.which.return_value = None # check_exe not found. install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, - packages=['ntpx'], check_exe='ntpdx') - mock_subp.which.assert_called_with('ntpdx') - install_func.assert_called_once_with(['ntpx']) + cc_ntp.install_ntp_client( + install_func, packages=["ntpx"], check_exe="ntpdx" + ) + mock_subp.which.assert_called_with("ntpdx") + install_func.assert_called_once_with(["ntpx"]) @mock.patch("cloudinit.config.cc_ntp.subp") def test_ntp_install_not_needed(self, mock_subp): """ntp_install_client doesn't install when check_exe is found.""" - client = 'chrony' + client = "chrony" mock_subp.which.return_value = [client] # check_exe found. install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, packages=[client], - check_exe=client) + cc_ntp.install_ntp_client( + install_func, packages=[client], check_exe=client + ) install_func.assert_not_called() @mock.patch("cloudinit.config.cc_ntp.subp") @@ -101,8 +105,9 @@ class TestNtp(FilesystemMockingTestCase): """ntp_install_client runs install_func with empty list""" mock_subp.which.return_value = None # check_exe not found install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, packages=[], - check_exe='timesyncd') + cc_ntp.install_ntp_client( + install_func, packages=[], check_exe="timesyncd" + ) install_func.assert_called_once_with([]) def test_ntp_rename_ntp_conf(self): @@ -124,18 +129,22 @@ class TestNtp(FilesystemMockingTestCase): def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): """write_ntp_config_template reads from $client.conf.distro.tmpl""" servers = [] - pools = ['10.0.0.1', '10.0.0.2'] + pools = ["10.0.0.1", "10.0.0.2"] (confpath, template_fn) = self._generate_template() - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template('ubuntu', - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) + cc_ntp.write_ntp_config_template( + "ubuntu", + servers=servers, + pools=pools, + path=confpath, + template_fn=template_fn, + template=None, + ) self.assertEqual( "servers []\npools ['10.0.0.1', '10.0.0.2']\n", - util.load_file(confpath)) + util.load_file(confpath), + ) def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): """write_ntp_config_template defaults pools servers upon empty config. @@ -143,20 +152,23 @@ class TestNtp(FilesystemMockingTestCase): When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ - distro = 'ubuntu' + distro = "ubuntu" pools = cc_ntp.generate_server_names(distro) servers = [] (confpath, template_fn) = self._generate_template() - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template(distro, - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) + cc_ntp.write_ntp_config_template( + distro, + servers=servers, + pools=pools, + path=confpath, + template_fn=template_fn, + template=None, + ) self.assertEqual( - "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) + "servers []\npools {0}\n".format(pools), util.load_file(confpath) + ) def test_defaults_pools_empty_lists_sles(self): """write_ntp_config_template defaults opensuse pools upon empty config. @@ -164,39 +176,50 @@ class TestNtp(FilesystemMockingTestCase): When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ - distro = 'sles' + distro = "sles" default_pools = cc_ntp.generate_server_names(distro) (confpath, template_fn) = self._generate_template() - cc_ntp.write_ntp_config_template(distro, - servers=[], pools=[], - path=confpath, - template_fn=template_fn, - template=None) + cc_ntp.write_ntp_config_template( + distro, + servers=[], + pools=[], + path=confpath, + template_fn=template_fn, + template=None, + ) for pool in default_pools: - self.assertIn('opensuse', pool) + self.assertIn("opensuse", pool) self.assertEqual( "servers []\npools {0}\n".format(default_pools), - util.load_file(confpath)) + util.load_file(confpath), + ) self.assertIn( "Adding distro default ntp pool servers: {0}".format( - ",".join(default_pools)), - self.logs.getvalue()) + ",".join(default_pools) + ), + self.logs.getvalue(), + ) def test_timesyncd_template(self): """Test timesycnd template is correct""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] + pools = ["0.mycompany.pool.ntp.org", "3.mycompany.pool.ntp.org"] + servers = ["192.168.23.3", "192.168.23.4"] (confpath, template_fn) = self._generate_template( - template=TIMESYNCD_TEMPLATE) - cc_ntp.write_ntp_config_template('ubuntu', - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) + template=TIMESYNCD_TEMPLATE + ) + cc_ntp.write_ntp_config_template( + "ubuntu", + servers=servers, + pools=pools, + path=confpath, + template_fn=template_fn, + template=None, + ) self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), - util.load_file(confpath)) + util.load_file(confpath), + ) def test_distro_ntp_client_configs(self): """Test we have updated ntp client configs on different distros""" @@ -213,55 +236,62 @@ class TestNtp(FilesystemMockingTestCase): result = cc_ntp.distro_ntp_client_configs(distro) for client in delta[distro].keys(): for key in delta[distro][client].keys(): - self.assertEqual(delta[distro][client][key], - result[client][key]) + self.assertEqual( + delta[distro][client][key], result[client][key] + ) def _get_expected_pools(self, pools, distro, client): - if client in ['ntp', 'chrony']: - if client == 'ntp' and distro == 'alpine': + if client in ["ntp", "chrony"]: + if client == "ntp" and distro == "alpine": # NTP for Alpine Linux is Busybox's ntp which does not # support 'pool' lines in its configuration file. expected_pools = [] else: expected_pools = [ - 'pool {0} iburst'.format(pool) for pool in pools] - elif client == 'systemd-timesyncd': + "pool {0} iburst".format(pool) for pool in pools + ] + elif client == "systemd-timesyncd": expected_pools = " ".join(pools) return expected_pools def _get_expected_servers(self, servers, distro, client): - if client in ['ntp', 'chrony']: - if client == 'ntp' and distro == 'alpine': + if client in ["ntp", "chrony"]: + if client == "ntp" and distro == "alpine": # NTP for Alpine Linux is Busybox's ntp which only supports # 'server' lines without iburst option. expected_servers = [ - 'server {0}'.format(srv) for srv in servers] + "server {0}".format(srv) for srv in servers + ] else: expected_servers = [ - 'server {0} iburst'.format(srv) for srv in servers] - elif client == 'systemd-timesyncd': + "server {0} iburst".format(srv) for srv in servers + ] + elif client == "systemd-timesyncd": expected_servers = " ".join(servers) return expected_servers def test_ntp_handler_real_distro_ntp_templates(self): """Test ntp handler renders the shipped distro ntp client templates.""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - for client in ['ntp', 'systemd-timesyncd', 'chrony']: + pools = ["0.mycompany.pool.ntp.org", "3.mycompany.pool.ntp.org"] + servers = ["192.168.23.3", "192.168.23.4"] + for client in ["ntp", "systemd-timesyncd", "chrony"]: for distro in cc_ntp.distros: distro_cfg = cc_ntp.distro_ntp_client_configs(distro) ntpclient = distro_cfg[client] - confpath = ( - os.path.join(self.new_root, ntpclient.get('confpath')[1:])) - template = ntpclient.get('template_name') + confpath = os.path.join( + self.new_root, ntpclient.get("confpath")[1:] + ) + template = ntpclient.get("template_name") # find sourcetree template file root_dir = ( - dirname(dirname(os.path.realpath(util.__file__))) + - '/templates') - source_fn = self._get_template_path(template, distro, - basepath=root_dir) + dirname(dirname(os.path.realpath(util.__file__))) + + "/templates" + ) + source_fn = self._get_template_path( + template, distro, basepath=root_dir + ) template_fn = self._get_template_path(template, distro) # don't fail if cloud-init doesn't have a template for # a distro,client pair @@ -269,64 +299,77 @@ class TestNtp(FilesystemMockingTestCase): continue # Create a copy in our tmp_dir shutil.copy(source_fn, template_fn) - cc_ntp.write_ntp_config_template(distro, servers=servers, - pools=pools, path=confpath, - template_fn=template_fn) + cc_ntp.write_ntp_config_template( + distro, + servers=servers, + pools=pools, + path=confpath, + template_fn=template_fn, + ) content = util.load_file(confpath) - if client in ['ntp', 'chrony']: + if client in ["ntp", "chrony"]: content_lines = content.splitlines() - expected_servers = self._get_expected_servers(servers, - distro, - client) - print('distro=%s client=%s' % (distro, client)) + expected_servers = self._get_expected_servers( + servers, distro, client + ) + print("distro=%s client=%s" % (distro, client)) for sline in expected_servers: - self.assertIn(sline, content_lines, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, - distro))) - expected_pools = self._get_expected_pools(pools, distro, - client) + self.assertIn( + sline, + content_lines, + "failed to render {0} conf for distro:{1}".format( + client, distro + ), + ) + expected_pools = self._get_expected_pools( + pools, distro, client + ) if expected_pools != []: for pline in expected_pools: - self.assertIn(pline, content_lines, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, - distro))) - elif client == 'systemd-timesyncd': - expected_servers = self._get_expected_servers(servers, - distro, - client) - expected_pools = self._get_expected_pools(pools, - distro, - client) + self.assertIn( + pline, + content_lines, + "failed to render {0} conf" + " for distro:{1}".format(client, distro), + ) + elif client == "systemd-timesyncd": + expected_servers = self._get_expected_servers( + servers, distro, client + ) + expected_pools = self._get_expected_pools( + pools, distro, client + ) expected_content = ( - "# cloud-init generated file\n" + - "# See timesyncd.conf(5) for details.\n\n" + - "[Time]\nNTP=%s %s \n" % (expected_servers, - expected_pools)) + "# cloud-init generated file\n" + + "# See timesyncd.conf(5) for details.\n\n" + + "[Time]\nNTP=%s %s \n" + % (expected_servers, expected_pools) + ) self.assertEqual(expected_content, content) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" - cc_ntp.handle('cc_ntp', {}, None, None, []) + cc_ntp.handle("cc_ntp", {}, None, None, []) self.assertEqual( - 'DEBUG: Skipping module named cc_ntp, ' - 'not present or disabled by cfg\n', - self.logs.getvalue()) + "DEBUG: Skipping module named cc_ntp, " + "not present or disabled by cfg\n", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_allows_empty_ntp_config(self, - m_select): + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") + def test_ntp_handler_schema_validation_allows_empty_ntp_config( + self, m_select + ): """Ntp schema validation allows for an empty ntp: configuration.""" - valid_empty_configs = [{'ntp': {}}, {'ntp': None}] + valid_empty_configs = [{"ntp": {}}, {"ntp": None}] for valid_empty_config in valid_empty_configs: for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] + confpath = ntpconfig["confpath"] m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) - if distro == 'alpine': + cc_ntp.handle("cc_ntp", valid_empty_config, mycloud, None, []) + if distro == "alpine": # _mock_ntp_client_config call above did not specify a # client value and so it defaults to "ntp" which on # Alpine Linux only supports servers and not pools. @@ -334,217 +377,240 @@ class TestNtp(FilesystemMockingTestCase): servers = cc_ntp.generate_server_names(mycloud.distro.name) self.assertEqual( "servers {0}\npools []\n".format(servers), - util.load_file(confpath)) + util.load_file(confpath), + ) else: pools = cc_ntp.generate_server_names(mycloud.distro.name) self.assertEqual( "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) - self.assertNotIn('Invalid config:', self.logs.getvalue()) + util.load_file(confpath), + ) + self.assertNotIn("Invalid config:", self.logs.getvalue()) @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_non_string_item_type(self, - m_sel): + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") + def test_ntp_handler_schema_validation_warns_non_string_item_type( + self, m_sel + ): """Ntp schema validation warns of non-strings in pools or servers. Schema validation is not strict, so ntp config is still be rendered. """ - invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} + invalid_config = {"ntp": {"pools": [123], "servers": ["valid", None]}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] + confpath = ntpconfig["confpath"] m_sel.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" "ntp.servers.1: None is not of type 'string'", - self.logs.getvalue()) - self.assertEqual("servers ['valid', None]\npools [123]\n", - util.load_file(confpath)) + self.logs.getvalue(), + ) + self.assertEqual( + "servers ['valid', None]\npools [123]\n", + util.load_file(confpath), + ) @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_of_non_array_type(self, - m_select): + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") + def test_ntp_handler_schema_validation_warns_of_non_array_type( + self, m_select + ): """Ntp schema validation warns of non-array pools or servers types. Schema validation is not strict, so ntp config is still be rendered. """ - invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} + invalid_config = {"ntp": {"pools": 123, "servers": "non-array"}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] + confpath = ntpconfig["confpath"] m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp.pools: 123 is not of type 'array'\n" "ntp.servers: 'non-array' is not of type 'array'", - self.logs.getvalue()) - self.assertEqual("servers non-array\npools 123\n", - util.load_file(confpath)) + self.logs.getvalue(), + ) + self.assertEqual( + "servers non-array\npools 123\n", util.load_file(confpath) + ) @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_invalid_key_present(self, - m_select): + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") + def test_ntp_handler_schema_validation_warns_invalid_key_present( + self, m_select + ): """Ntp schema validation warns of invalid keys present in ntp config. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = { - 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} + "ntp": {"invalidkey": 1, "pools": ["0.mycompany.pool.ntp.org"]} + } for distro in cc_ntp.distros: - if distro != 'alpine': + if distro != "alpine": mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] + confpath = ntpconfig["confpath"] m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp: Additional properties are not " "allowed ('invalidkey' was unexpected)", - self.logs.getvalue()) + self.logs.getvalue(), + ) self.assertEqual( "servers []\npools ['0.mycompany.pool.ntp.org']\n", - util.load_file(confpath)) + util.load_file(confpath), + ) @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): """Ntp schema validation warns of duplicates in servers or pools. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = { - 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], - 'servers': ['10.0.0.1', '10.0.0.1']}} + "ntp": { + "pools": ["0.mypool.org", "0.mypool.org"], + "servers": ["10.0.0.1", "10.0.0.1"], + } + } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] + confpath = ntpconfig["confpath"] m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + cc_ntp.handle("cc_ntp", invalid_config, mycloud, None, []) self.assertIn( "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" " has non-unique elements\nntp.servers: " "['10.0.0.1', '10.0.0.1'] has non-unique elements", - self.logs.getvalue()) + self.logs.getvalue(), + ) self.assertEqual( "servers ['10.0.0.1', '10.0.0.1']\n" "pools ['0.mypool.org', '0.mypool.org']\n", - util.load_file(confpath)) + util.load_file(confpath), + ) - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") def test_ntp_handler_timesyncd(self, m_select): """Test ntp handler configures timesyncd""" - servers = ['192.168.2.1', '192.168.2.2'] - pools = ['0.mypool.org'] - cfg = {'ntp': {'servers': servers, 'pools': pools}} - client = 'systemd-timesyncd' + servers = ["192.168.2.1", "192.168.2.2"] + pools = ["0.mypool.org"] + cfg = {"ntp": {"servers": servers, "pools": pools}} + client = "systemd-timesyncd" for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro, - client=client) - confpath = ntpconfig['confpath'] + ntpconfig = self._mock_ntp_client_config( + distro=distro, client=client + ) + confpath = ntpconfig["confpath"] m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) + cc_ntp.handle("cc_ntp", cfg, mycloud, None, []) self.assertEqual( "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - util.load_file(confpath)) + util.load_file(confpath), + ) - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") def test_ntp_handler_enabled_false(self, m_select): - """Test ntp handler does not run if enabled: false """ - cfg = {'ntp': {'enabled': False}} + """Test ntp handler does not run if enabled: false""" + cfg = {"ntp": {"enabled": False}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) - cc_ntp.handle('notimportant', cfg, mycloud, None, None) + cc_ntp.handle("notimportant", cfg, mycloud, None, None) self.assertEqual(0, m_select.call_count) @mock.patch("cloudinit.distros.subp") @mock.patch("cloudinit.config.cc_ntp.subp") - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") @mock.patch("cloudinit.distros.Distro.uses_systemd") def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp): - """Test enabled config renders template, and restarts service """ - cfg = {'ntp': {'enabled': True}} + """Test enabled config renders template, and restarts service""" + cfg = {"ntp": {"enabled": True}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - service_name = ntpconfig['service_name'] + confpath = ntpconfig["confpath"] + service_name = ntpconfig["service_name"] m_select.return_value = ntpconfig hosts = cc_ntp.generate_server_names(mycloud.distro.name) uses_systemd = True - expected_service_call = ['systemctl', 'reload-or-restart', - service_name] + expected_service_call = [ + "systemctl", + "reload-or-restart", + service_name, + ] expected_content = "servers []\npools {0}\n".format(hosts) - if distro == 'alpine': + if distro == "alpine": uses_systemd = False - expected_service_call = ['rc-service', service_name, 'restart'] + expected_service_call = ["rc-service", service_name, "restart"] # _mock_ntp_client_config call above did not specify a client # value and so it defaults to "ntp" which on Alpine Linux only # supports servers and not pools. expected_content = "servers {0}\npools []\n".format(hosts) m_sysd.return_value = uses_systemd - with mock.patch('cloudinit.config.cc_ntp.util') as m_util: + with mock.patch("cloudinit.config.cc_ntp.util") as m_util: # allow use of util.mergemanydict m_util.mergemanydict.side_effect = util.mergemanydict # default client is present m_subp.which.return_value = True # use the config 'enabled' value m_util.is_false.return_value = util.is_false( - cfg['ntp']['enabled']) - cc_ntp.handle('notimportant', cfg, mycloud, None, None) + cfg["ntp"]["enabled"] + ) + cc_ntp.handle("notimportant", cfg, mycloud, None, None) m_dsubp.subp.assert_called_with( - expected_service_call, capture=True) + expected_service_call, capture=True + ) self.assertEqual(expected_content, util.load_file(confpath)) - @mock.patch('cloudinit.util.system_info') + @mock.patch("cloudinit.util.system_info") def test_opensuse_picks_chrony(self, m_sysinfo): """Test opensuse picks chrony or ntp on certain distro versions""" # < 15.0 => ntp - m_sysinfo.return_value = { - 'dist': ('openSUSE', '13.2', 'Harlequin') - } - mycloud = self._get_cloud('opensuse') + m_sysinfo.return_value = {"dist": ("openSUSE", "13.2", "Harlequin")} + mycloud = self._get_cloud("opensuse") expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('ntp', expected_client) + self.assertEqual("ntp", expected_client) # >= 15.0 and not openSUSE => chrony m_sysinfo.return_value = { - 'dist': ('SLES', '15.0', 'SUSE Linux Enterprise Server 15') + "dist": ("SLES", "15.0", "SUSE Linux Enterprise Server 15") } - mycloud = self._get_cloud('sles') + mycloud = self._get_cloud("sles") expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('chrony', expected_client) + self.assertEqual("chrony", expected_client) # >= 15.0 and openSUSE and ver != 42 => chrony m_sysinfo.return_value = { - 'dist': ('openSUSE Tumbleweed', '20180326', 'timbleweed') + "dist": ("openSUSE Tumbleweed", "20180326", "timbleweed") } - mycloud = self._get_cloud('opensuse') + mycloud = self._get_cloud("opensuse") expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('chrony', expected_client) + self.assertEqual("chrony", expected_client) - @mock.patch('cloudinit.util.system_info') + @mock.patch("cloudinit.util.system_info") def test_ubuntu_xenial_picks_ntp(self, m_sysinfo): """Test Ubuntu picks ntp on xenial release""" - m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')} - mycloud = self._get_cloud('ubuntu') + m_sysinfo.return_value = {"dist": ("Ubuntu", "16.04", "xenial")} + mycloud = self._get_cloud("ubuntu") expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('ntp', expected_client) + self.assertEqual("ntp", expected_client) - @mock.patch('cloudinit.config.cc_ntp.subp.which') + @mock.patch("cloudinit.config.cc_ntp.subp.which") def test_snappy_system_picks_timesyncd(self, m_which): """Test snappy systems prefer installed clients""" @@ -552,26 +618,27 @@ class TestNtp(FilesystemMockingTestCase): self.m_snappy.return_value = True # ubuntu core systems will have timesyncd installed - m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd', - None, None, None]) - distro = 'ubuntu' + m_which.side_effect = iter( + [None, "/lib/systemd/systemd-timesyncd", None, None, None] + ) + distro = "ubuntu" mycloud = self._get_cloud(distro) distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_client = 'systemd-timesyncd' + expected_client = "systemd-timesyncd" expected_cfg = distro_configs[expected_client] expected_calls = [] # we only get to timesyncd for client in mycloud.distro.preferred_ntp_clients[0:2]: cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) + expected_calls.append(mock.call(cfg["check_exe"])) result = cc_ntp.select_ntp_client(None, mycloud.distro) m_which.assert_has_calls(expected_calls) self.assertEqual(sorted(expected_cfg), sorted(cfg)) self.assertEqual(sorted(expected_cfg), sorted(result)) - @mock.patch('cloudinit.config.cc_ntp.subp.which') + @mock.patch("cloudinit.config.cc_ntp.subp.which") def test_ntp_distro_searches_all_preferred_clients(self, m_which): - """Test select_ntp_client search all distro perferred clients """ + """Test select_ntp_client search all distro perferred clients""" # nothing is installed m_which.return_value = None for distro in cc_ntp.distros: @@ -582,12 +649,12 @@ class TestNtp(FilesystemMockingTestCase): expected_calls = [] for client in mycloud.distro.preferred_ntp_clients: cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) + expected_calls.append(mock.call(cfg["check_exe"])) cc_ntp.select_ntp_client({}, mycloud.distro) m_which.assert_has_calls(expected_calls) self.assertEqual(sorted(expected_cfg), sorted(cfg)) - @mock.patch('cloudinit.config.cc_ntp.subp.which') + @mock.patch("cloudinit.config.cc_ntp.subp.which") def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which): """Test user_cfg.ntp_client='auto' defaults to distro search""" # nothing is installed @@ -600,34 +667,36 @@ class TestNtp(FilesystemMockingTestCase): expected_calls = [] for client in mycloud.distro.preferred_ntp_clients: cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) - cc_ntp.select_ntp_client('auto', mycloud.distro) + expected_calls.append(mock.call(cfg["check_exe"])) + cc_ntp.select_ntp_client("auto", mycloud.distro) m_which.assert_has_calls(expected_calls) self.assertEqual(sorted(expected_cfg), sorted(cfg)) - @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template') - @mock.patch('cloudinit.cloud.Cloud.get_template_filename') - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_custom_client_overrides_installed_clients(self, m_which, - m_tmpfn, m_write): - """Test user client is installed despite other clients present """ - client = 'ntpdate' - cfg = {'ntp': {'ntp_client': client}} + @mock.patch("cloudinit.config.cc_ntp.write_ntp_config_template") + @mock.patch("cloudinit.cloud.Cloud.get_template_filename") + @mock.patch("cloudinit.config.cc_ntp.subp.which") + def test_ntp_custom_client_overrides_installed_clients( + self, m_which, m_tmpfn, m_write + ): + """Test user client is installed despite other clients present""" + client = "ntpdate" + cfg = {"ntp": {"ntp_client": client}} for distro in cc_ntp.distros: # client is not installed m_which.side_effect = iter([None]) mycloud = self._get_cloud(distro) - with mock.patch.object(mycloud.distro, - 'install_packages') as m_install: - cc_ntp.handle('notimportant', cfg, mycloud, None, None) + with mock.patch.object( + mycloud.distro, "install_packages" + ) as m_install: + cc_ntp.handle("notimportant", cfg, mycloud, None, None) m_install.assert_called_with([client]) m_which.assert_called_with(client) - @mock.patch('cloudinit.config.cc_ntp.subp.which') + @mock.patch("cloudinit.config.cc_ntp.subp.which") def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which): """Test distro system_config overrides builtin preferred ntp clients""" - system_client = 'chrony' - sys_cfg = {'ntp_client': system_client} + system_client = "chrony" + sys_cfg = {"ntp_client": system_client} # no clients installed m_which.return_value = None for distro in cc_ntp.distros: @@ -638,12 +707,12 @@ class TestNtp(FilesystemMockingTestCase): self.assertEqual(sorted(expected_cfg), sorted(result)) m_which.assert_has_calls([]) - @mock.patch('cloudinit.config.cc_ntp.subp.which') + @mock.patch("cloudinit.config.cc_ntp.subp.which") def test_ntp_user_config_overrides_system_cfg(self, m_which): """Test user-data overrides system_config ntp_client""" - system_client = 'chrony' - sys_cfg = {'ntp_client': system_client} - user_client = 'systemd-timesyncd' + system_client = "chrony" + sys_cfg = {"ntp_client": system_client} + user_client = "systemd-timesyncd" # no clients installed m_which.return_value = None for distro in cc_ntp.distros: @@ -654,112 +723,145 @@ class TestNtp(FilesystemMockingTestCase): self.assertEqual(sorted(expected_cfg), sorted(result)) m_which.assert_has_calls([]) - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + @mock.patch("cloudinit.config.cc_ntp.install_ntp_client") def test_ntp_user_provided_config_with_template(self, m_install): - custom = r'\n#MyCustomTemplate' + custom = r"\n#MyCustomTemplate" user_template = NTP_TEMPLATE + custom - confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') + confpath = os.path.join(self.new_root, "etc/myntp/myntp.conf") cfg = { - 'ntp': { - 'pools': ['mypool.org'], - 'ntp_client': 'myntpd', - 'config': { - 'check_exe': 'myntpd', - 'confpath': confpath, - 'packages': ['myntp'], - 'service_name': 'myntp', - 'template': user_template, - } + "ntp": { + "pools": ["mypool.org"], + "ntp_client": "myntpd", + "config": { + "check_exe": "myntpd", + "confpath": confpath, + "packages": ["myntp"], + "service_name": "myntp", + "template": user_template, + }, } } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" with mock.patch(mock_path, self.new_root): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) + cc_ntp.handle("notimportant", cfg, mycloud, None, None) self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_user_provided_config_template_only(self, m_select, m_install, - m_schema): + util.load_file(confpath), + ) + + @mock.patch("cloudinit.config.cc_ntp.supplemental_schema_validation") + @mock.patch("cloudinit.config.cc_ntp.install_ntp_client") + @mock.patch("cloudinit.config.cc_ntp.select_ntp_client") + def test_ntp_user_provided_config_template_only( + self, m_select, m_install, m_schema + ): """Test custom template for default client""" - custom = r'\n#MyCustomTemplate' + custom = r"\n#MyCustomTemplate" user_template = NTP_TEMPLATE + custom - client = 'chrony' + client = "chrony" cfg = { - 'pools': ['mypool.org'], - 'ntp_client': client, - 'config': { - 'template': user_template, - } + "pools": ["mypool.org"], + "ntp_client": client, + "config": { + "template": user_template, + }, } expected_merged_cfg = { - 'check_exe': 'chronyd', - 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), - 'template_name': 'client.conf', 'template': user_template, - 'service_name': 'chrony', 'packages': ['chrony']} + "check_exe": "chronyd", + "confpath": "{tmpdir}/client.conf".format(tmpdir=self.new_root), + "template_name": "client.conf", + "template": user_template, + "service_name": "chrony", + "packages": ["chrony"], + } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(client=client, - distro=distro) - confpath = ntpconfig['confpath'] + ntpconfig = self._mock_ntp_client_config( + client=client, distro=distro + ) + confpath = ntpconfig["confpath"] m_select.return_value = ntpconfig - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + mock_path = "cloudinit.config.cc_ntp.temp_utils._TMPDIR" with mock.patch(mock_path, self.new_root): - cc_ntp.handle('notimportant', - {'ntp': cfg}, mycloud, None, None) + cc_ntp.handle( + "notimportant", {"ntp": cfg}, mycloud, None, None + ) self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath)) + util.load_file(confpath), + ) m_schema.assert_called_with(expected_merged_cfg) class TestSupplementalSchemaValidation(CiTestCase): - def test_error_on_missing_keys(self): """ValueError raised reporting any missing required ntp:config keys""" cfg = {} - match = (r'Invalid ntp configuration:\\nMissing required ntp:config' - ' keys: check_exe, confpath, packages, service_name') + match = ( + r"Invalid ntp configuration:\\nMissing required ntp:config" + " keys: check_exe, confpath, packages, service_name" + ) with self.assertRaisesRegex(ValueError, match): cc_ntp.supplemental_schema_validation(cfg) def test_error_requiring_either_template_or_template_name(self): """ValueError raised if both template not template_name are None.""" - cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', - 'template': None, 'template_name': None, 'packages': []} - match = (r'Invalid ntp configuration:\\nEither ntp:config:template' - ' or ntp:config:template_name values are required') + cfg = { + "confpath": "someconf", + "check_exe": "", + "service_name": "", + "template": None, + "template_name": None, + "packages": [], + } + match = ( + r"Invalid ntp configuration:\\nEither ntp:config:template" + " or ntp:config:template_name values are required" + ) with self.assertRaisesRegex(ValueError, match): cc_ntp.supplemental_schema_validation(cfg) def test_error_on_non_list_values(self): """ValueError raised when packages is not of type list.""" - cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', - 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} - match = (r'Invalid ntp configuration:\\nExpected a list of required' - ' package names for ntp:config:packages. Found \\(NOPE\\)') + cfg = { + "confpath": "someconf", + "check_exe": "", + "service_name": "", + "template": "asdf", + "template_name": None, + "packages": "NOPE", + } + match = ( + r"Invalid ntp configuration:\\nExpected a list of required" + " package names for ntp:config:packages. Found \\(NOPE\\)" + ) with self.assertRaisesRegex(ValueError, match): cc_ntp.supplemental_schema_validation(cfg) def test_error_on_non_string_values(self): """ValueError raised for any values expected as string type.""" - cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3, - 'template': 4, 'template_name': 5, 'packages': []} + cfg = { + "confpath": 1, + "check_exe": 2, + "service_name": 3, + "template": 4, + "template_name": 5, + "packages": [], + } errors = [ - 'Expected a config file path ntp:config:confpath. Found (1)', - 'Expected a string type for ntp:config:check_exe. Found (2)', - 'Expected a string type for ntp:config:service_name. Found (3)', - 'Expected a string type for ntp:config:template. Found (4)', - 'Expected a string type for ntp:config:template_name. Found (5)'] + "Expected a config file path ntp:config:confpath. Found (1)", + "Expected a string type for ntp:config:check_exe. Found (2)", + "Expected a string type for ntp:config:service_name. Found (3)", + "Expected a string type for ntp:config:template. Found (4)", + "Expected a string type for ntp:config:template_name. Found (5)", + ] with self.assertRaises(ValueError) as context_mgr: cc_ntp.supplemental_schema_validation(cfg) error_msg = str(context_mgr.exception) for error in errors: self.assertIn(error, error_msg) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py index e699f424..47eb0d58 100644 --- a/tests/unittests/config/test_cc_power_state_change.py +++ b/tests/unittests/config/test_cc_power_state_change.py @@ -2,11 +2,8 @@ import sys +from cloudinit import distros, helpers from cloudinit.config import cc_power_state_change as psc - -from cloudinit import distros -from cloudinit import helpers - from tests.unittests import helpers as t_help from tests.unittests.helpers import mock @@ -14,9 +11,9 @@ from tests.unittests.helpers import mock class TestLoadPowerState(t_help.TestCase): def setUp(self): super(TestLoadPowerState, self).setUp() - cls = distros.fetch('ubuntu') + cls = distros.fetch("ubuntu") paths = helpers.Paths({}) - self.dist = cls('ubuntu', {}, paths) + self.dist = cls("ubuntu", {}, paths) def test_no_config(self): # completely empty config should mean do nothing @@ -25,85 +22,86 @@ class TestLoadPowerState(t_help.TestCase): def test_irrelevant_config(self): # no power_state field in config should return None for cmd - (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}, - self.dist) + (cmd, _timeout, _condition) = psc.load_power_state( + {"foo": "bar"}, self.dist + ) self.assertIsNone(cmd) def test_invalid_mode(self): - cfg = {'power_state': {'mode': 'gibberish'}} + cfg = {"power_state": {"mode": "gibberish"}} self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - cfg = {'power_state': {'mode': ''}} + cfg = {"power_state": {"mode": ""}} self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) def test_empty_mode(self): - cfg = {'power_state': {'message': 'goodbye'}} + cfg = {"power_state": {"message": "goodbye"}} self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) def test_valid_modes(self): - cfg = {'power_state': {}} - for mode in ('halt', 'poweroff', 'reboot'): - cfg['power_state']['mode'] = mode + cfg = {"power_state": {}} + for mode in ("halt", "poweroff", "reboot"): + cfg["power_state"]["mode"] = mode check_lps_ret(psc.load_power_state(cfg, self.dist), mode=mode) def test_invalid_delay(self): - cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}} + cfg = {"power_state": {"mode": "poweroff", "delay": "goodbye"}} self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) def test_valid_delay(self): - cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} + cfg = {"power_state": {"mode": "poweroff", "delay": ""}} for delay in ("now", "+1", "+30"): - cfg['power_state']['delay'] = delay + cfg["power_state"]["delay"] = delay check_lps_ret(psc.load_power_state(cfg, self.dist)) def test_message_present(self): - cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}} + cfg = {"power_state": {"mode": "poweroff", "message": "GOODBYE"}} ret = psc.load_power_state(cfg, self.dist) check_lps_ret(psc.load_power_state(cfg, self.dist)) - self.assertIn(cfg['power_state']['message'], ret[0]) + self.assertIn(cfg["power_state"]["message"], ret[0]) def test_no_message(self): # if message is not present, then no argument should be passed for it - cfg = {'power_state': {'mode': 'poweroff'}} + cfg = {"power_state": {"mode": "poweroff"}} (cmd, _timeout, _condition) = psc.load_power_state(cfg, self.dist) self.assertNotIn("", cmd) check_lps_ret(psc.load_power_state(cfg, self.dist)) self.assertTrue(len(cmd) == 3) def test_condition_null_raises(self): - cfg = {'power_state': {'mode': 'poweroff', 'condition': None}} + cfg = {"power_state": {"mode": "poweroff", "condition": None}} self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) def test_condition_default_is_true(self): - cfg = {'power_state': {'mode': 'poweroff'}} + cfg = {"power_state": {"mode": "poweroff"}} _cmd, _timeout, cond = psc.load_power_state(cfg, self.dist) self.assertEqual(cond, True) def test_freebsd_poweroff_uses_lowercase_p(self): - cls = distros.fetch('freebsd') + cls = distros.fetch("freebsd") paths = helpers.Paths({}) - freebsd = cls('freebsd', {}, paths) - cfg = {'power_state': {'mode': 'poweroff'}} + freebsd = cls("freebsd", {}, paths) + cfg = {"power_state": {"mode": "poweroff"}} ret = psc.load_power_state(cfg, freebsd) - self.assertIn('-p', ret[0]) + self.assertIn("-p", ret[0]) def test_alpine_delay(self): # alpine takes delay in seconds. - cls = distros.fetch('alpine') + cls = distros.fetch("alpine") paths = helpers.Paths({}) - alpine = cls('alpine', {}, paths) - cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} - for delay, value in (('now', 0), ("+1", 60), ("+30", 1800)): - cfg['power_state']['delay'] = delay + alpine = cls("alpine", {}, paths) + cfg = {"power_state": {"mode": "poweroff", "delay": ""}} + for delay, value in (("now", 0), ("+1", 60), ("+30", 1800)): + cfg["power_state"]["delay"] = delay ret = psc.load_power_state(cfg, alpine) - self.assertEqual('-d', ret[0][1]) + self.assertEqual("-d", ret[0][1]) self.assertEqual(str(value), ret[0][2]) class TestCheckCondition(t_help.TestCase): def cmd_with_exit(self, rc): - return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc]) + return [sys.executable, "-c", "import sys; sys.exit(%s)" % rc] def test_true_is_true(self): self.assertEqual(psc.check_condition(True), True) @@ -120,7 +118,8 @@ class TestCheckCondition(t_help.TestCase): def test_cmd_exit_nonzero_warns(self): mocklog = mock.Mock() self.assertEqual( - psc.check_condition(self.cmd_with_exit(2), mocklog), False) + psc.check_condition(self.cmd_with_exit(2), mocklog), False + ) self.assertEqual(mocklog.warning.call_count, 1) @@ -133,14 +132,14 @@ def check_lps_ret(psc_return, mode=None): timeout = psc_return[1] condition = psc_return[2] - if 'shutdown' not in psc_return[0][0]: + if "shutdown" not in psc_return[0][0]: errs.append("string 'shutdown' not in cmd") if condition is None: errs.append("condition was not returned") if mode is not None: - opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode] + opt = {"halt": "-H", "poweroff": "-P", "reboot": "-r"}[mode] if opt not in psc_return[0]: errs.append("opt '%s' not in cmd: %s" % (opt, cmd)) @@ -154,6 +153,7 @@ def check_lps_ret(psc_return, mode=None): if len(errs): lines = ["Errors in result: %s" % str(psc_return)] + errs - raise Exception('\n'.join(lines)) + raise Exception("\n".join(lines)) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py index 1f67dc4c..2c4481da 100644 --- a/tests/unittests/config/test_cc_puppet.py +++ b/tests/unittests/config/test_cc_puppet.py @@ -2,58 +2,71 @@ import logging import textwrap -from cloudinit.config import cc_puppet from cloudinit import util +from cloudinit.config import cc_puppet from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock - from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) -@mock.patch('cloudinit.config.cc_puppet.subp.subp') -@mock.patch('cloudinit.config.cc_puppet.os') +@mock.patch("cloudinit.config.cc_puppet.subp.subp") +@mock.patch("cloudinit.config.cc_puppet.os") class TestAutostartPuppet(CiTestCase): - def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp): """Update /etc/default/puppet to autostart if it exists.""" def _fake_exists(path): - return path == '/etc/default/puppet' + return path == "/etc/default/puppet" m_os.path.exists.side_effect = _fake_exists cc_puppet._autostart_puppet(LOG) self.assertEqual( - [mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False)], - m_subp.call_args_list) + [ + mock.call( + [ + "sed", + "-i", + "-e", + "s/^START=.*/START=yes/", + "/etc/default/puppet", + ], + capture=False, + ) + ], + m_subp.call_args_list, + ) def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp): """If systemctl is present, enable puppet via systemctl.""" def _fake_exists(path): - return path == '/bin/systemctl' + return path == "/bin/systemctl" m_os.path.exists.side_effect = _fake_exists cc_puppet._autostart_puppet(LOG) - expected_calls = [mock.call( - ['/bin/systemctl', 'enable', 'puppet.service'], capture=False)] + expected_calls = [ + mock.call( + ["/bin/systemctl", "enable", "puppet.service"], capture=False + ) + ] self.assertEqual(expected_calls, m_subp.call_args_list) def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp): """If chkconfig is present, enable puppet via checkcfg.""" def _fake_exists(path): - return path == '/sbin/chkconfig' + return path == "/sbin/chkconfig" m_os.path.exists.side_effect = _fake_exists cc_puppet._autostart_puppet(LOG) - expected_calls = [mock.call( - ['/sbin/chkconfig', 'puppet', 'on'], capture=False)] + expected_calls = [ + mock.call(["/sbin/chkconfig", "puppet", "on"], capture=False) + ] self.assertEqual(expected_calls, m_subp.call_args_list) -@mock.patch('cloudinit.config.cc_puppet._autostart_puppet') +@mock.patch("cloudinit.config.cc_puppet._autostart_puppet") class TestPuppetHandle(CiTestCase): with_logs = True @@ -61,145 +74,164 @@ class TestPuppetHandle(CiTestCase): def setUp(self): super(TestPuppetHandle, self).setUp() self.new_root = self.tmp_dir() - self.conf = self.tmp_path('puppet.conf') - self.csr_attributes_path = self.tmp_path( - 'csr_attributes.yaml') + self.conf = self.tmp_path("puppet.conf") + self.csr_attributes_path = self.tmp_path("csr_attributes.yaml") self.cloud = get_cloud() def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto): """Cloud-config containing no 'puppet' key is skipped.""" cfg = {} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertIn( - "no 'puppet' configuration found", self.logs.getvalue()) + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) + self.assertIn("no 'puppet' configuration found", self.logs.getvalue()) self.assertEqual(0, m_auto.call_count) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_puppet_config_starts_puppet_service(self, m_subp, m_auto): """Cloud-config 'puppet' configuration starts puppet.""" - cfg = {'puppet': {'install': False}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = {"puppet": {"install": False}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) + [mock.call(["service", "puppet", "start"], capture=False)], + m_subp.call_args_list, + ) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto): """Cloud-config empty 'puppet' configuration installs latest puppet.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = {"puppet": {}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual( - [mock.call(('puppet', None))], - self.cloud.distro.install_packages.call_args_list) + [mock.call(("puppet", None))], + self.cloud.distro.install_packages.call_args_list, + ) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_puppet_config_installs_puppet_on_true(self, m_subp, _): """Cloud-config with 'puppet' key installs when 'install' is True.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = {"puppet": {"install": True}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual( - [mock.call(('puppet', None))], - self.cloud.distro.install_packages.call_args_list) + [mock.call(("puppet", None))], + self.cloud.distro.install_packages.call_args_list, + ) - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio'.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_version(self, - m_subp, m_aio, _): + cfg = {"puppet": {"install": True, "install_type": "aio"}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) + m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, True) + + @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_version( + self, m_subp, m_aio, _ + ): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and 'version' is specified.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'version': '6.24.0', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = { + "puppet": { + "install": True, + "version": "6.24.0", + "install_type": "aio", + } + } + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - '6.24.0', None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_collection(self, - m_subp, - m_aio, _): + cc_puppet.AIO_INSTALL_URL, "6.24.0", None, True + ) + + @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_collection( + self, m_subp, m_aio, _ + ): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and 'collection' is specified.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'collection': 'puppet6', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = { + "puppet": { + "install": True, + "collection": "puppet6", + "install_type": "aio", + } + } + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, 'puppet6', True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_custom_url(self, - m_subp, - m_aio, _): + cc_puppet.AIO_INSTALL_URL, None, "puppet6", True + ) + + @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_custom_url( + self, m_subp, m_aio, _ + ): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and 'aio_install_url' is specified.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': - {'install': True, - 'aio_install_url': 'http://test.url/path/to/script.sh', - 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = { + "puppet": { + "install": True, + "aio_install_url": "http://test.url/path/to/script.sh", + "install_type": "aio", + } + } + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) m_aio.assert_called_with( - 'http://test.url/path/to/script.sh', None, None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_without_cleanup(self, - m_subp, - m_aio, _): + "http://test.url/path/to/script.sh", None, None, True + ) + + @mock.patch("cloudinit.config.cc_puppet.install_puppet_aio", autospec=True) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) + def test_puppet_config_installs_puppet_aio_without_cleanup( + self, m_subp, m_aio, _ + ): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and no cleanup.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'cleanup': False, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, None, False) + cfg = { + "puppet": { + "install": True, + "cleanup": False, + "install_type": "aio", + } + } + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) + m_aio.assert_called_with(cc_puppet.AIO_INSTALL_URL, None, None, False) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_puppet_config_installs_puppet_version(self, m_subp, _): """Cloud-config 'puppet' configuration can specify a version.""" self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'version': '3.8'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = {"puppet": {"version": "3.8"}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual( - [mock.call(('puppet', '3.8'))], - self.cloud.distro.install_packages.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.get_config_value') - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_updates_puppet_conf(self, - m_subp, m_default, m_auto): + [mock.call(("puppet", "3.8"))], + self.cloud.distro.install_packages.call_args_list, + ) + + @mock.patch("cloudinit.config.cc_puppet.get_config_value") + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) + def test_puppet_config_updates_puppet_conf( + self, m_subp, m_default, m_auto + ): """When 'conf' is provided update values in PUPPET_CONF_PATH.""" def _fake_get_config_value(puppet_bin, setting): @@ -208,22 +240,24 @@ class TestPuppetHandle(CiTestCase): m_default.side_effect = _fake_get_config_value cfg = { - 'puppet': { - 'conf': {'agent': {'server': 'puppetserver.example.org'}}}} - util.write_file( - self.conf, '[agent]\nserver = origpuppet\nother = 3') + "puppet": { + "conf": {"agent": {"server": "puppetserver.example.org"}} + } + } + util.write_file(self.conf, "[agent]\nserver = origpuppet\nother = 3") self.cloud.distro = mock.MagicMock() - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) content = util.load_file(self.conf) - expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n' + expected = "[agent]\nserver = puppetserver.example.org\nother = 3\n\n" self.assertEqual(expected, content) - @mock.patch('cloudinit.config.cc_puppet.get_config_value') - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_puppet_writes_csr_attributes_file(self, - m_subp, m_default, m_auto): + @mock.patch("cloudinit.config.cc_puppet.get_config_value") + @mock.patch("cloudinit.config.cc_puppet.subp.subp") + def test_puppet_writes_csr_attributes_file( + self, m_subp, m_default, m_auto + ): """When csr_attributes is provided - creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" + creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" def _fake_get_config_value(puppet_bin, setting): return self.csr_attributes_path @@ -232,105 +266,131 @@ class TestPuppetHandle(CiTestCase): self.cloud.distro = mock.MagicMock() cfg = { - 'puppet': { - 'csr_attributes': { - 'custom_attributes': { - '1.2.840.113549.1.9.7': - '342thbjkt82094y0uthhor289jnqthpc2290' + "puppet": { + "csr_attributes": { + "custom_attributes": { + "1.2.840.113549.1.9.7": ( + "342thbjkt82094y0uthhor289jnqthpc2290" + ) + }, + "extension_requests": { + "pp_uuid": "ED803750-E3C7-44F5-BB08-41A04433FE2E", + "pp_image_name": "my_ami_image", + "pp_preshared_key": ( + "342thbjkt82094y0uthhor289jnqthpc2290" + ), }, - 'extension_requests': { - 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E', - 'pp_image_name': 'my_ami_image', - 'pp_preshared_key': - '342thbjkt82094y0uthhor289jnqthpc2290' - } } } } - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) content = util.load_file(self.csr_attributes_path) - expected = textwrap.dedent("""\ + expected = textwrap.dedent( + """\ custom_attributes: 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 extension_requests: pp_image_name: my_ami_image pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E - """) + """ + ) self.assertEqual(expected, content) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto): """Run puppet with default args if 'exec' is set to True.""" - cfg = {'puppet': {'exec': True}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = {"puppet": {"exec": True}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( - [mock.call(['puppet', 'agent', '--test'], capture=False)], - m_subp.call_args_list) + [mock.call(["puppet", "agent", "--test"], capture=False)], + m_subp.call_args_list, + ) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_puppet_starts_puppetd(self, m_subp, m_auto): """Run puppet with default args if 'exec' is set to True.""" - cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = {"puppet": {}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) + [mock.call(["service", "puppet", "start"], capture=False)], + m_subp.call_args_list, + ) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) def test_puppet_skips_puppetd(self, m_subp, m_auto): """Run puppet with default args if 'exec' is set to True.""" - cfg = {'puppet': {'start_service': False}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = {"puppet": {"start_service": False}} + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual(0, m_auto.call_count) self.assertNotIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_with_args_list_if_requested(self, - m_subp, m_auto): + [mock.call(["service", "puppet", "start"], capture=False)], + m_subp.call_args_list, + ) + + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) + def test_puppet_runs_puppet_with_args_list_if_requested( + self, m_subp, m_auto + ): """Run puppet with 'exec_args' list if 'exec' is set to True.""" - cfg = {'puppet': {'exec': True, 'exec_args': [ - '--onetime', '--detailed-exitcodes']}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = { + "puppet": { + "exec": True, + "exec_args": ["--onetime", "--detailed-exitcodes"], + } + } + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( - [mock.call( - ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], - capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_with_args_string_if_requested(self, - m_subp, m_auto): + [ + mock.call( + ["puppet", "agent", "--onetime", "--detailed-exitcodes"], + capture=False, + ) + ], + m_subp.call_args_list, + ) + + @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", "")) + def test_puppet_runs_puppet_with_args_string_if_requested( + self, m_subp, m_auto + ): """Run puppet with 'exec_args' string if 'exec' is set to True.""" - cfg = {'puppet': {'exec': True, - 'exec_args': '--onetime --detailed-exitcodes'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + cfg = { + "puppet": { + "exec": True, + "exec_args": "--onetime --detailed-exitcodes", + } + } + cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( - [mock.call( - ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], - capture=False)], - m_subp.call_args_list) + [ + mock.call( + ["puppet", "agent", "--onetime", "--detailed-exitcodes"], + capture=False, + ) + ], + m_subp.call_args_list, + ) URL_MOCK = mock.Mock() URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"' -@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=(None, None)) +@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=(None, None)) @mock.patch( - 'cloudinit.config.cc_puppet.url_helper.readurl', - return_value=URL_MOCK, autospec=True, + "cloudinit.config.cc_puppet.url_helper.readurl", + return_value=URL_MOCK, + autospec=True, ) class TestInstallPuppetAio(HttprettyTestCase): def test_install_with_default_arguments(self, m_readurl, m_subp): @@ -338,43 +398,53 @@ class TestInstallPuppetAio(HttprettyTestCase): cc_puppet.install_puppet_aio() self.assertEqual( - [mock.call([mock.ANY, '--cleanup'], capture=False)], - m_subp.call_args_list) + [mock.call([mock.ANY, "--cleanup"], capture=False)], + m_subp.call_args_list, + ) def test_install_with_custom_url(self, m_readurl, m_subp): """Install AIO from custom URL""" - cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh') + cc_puppet.install_puppet_aio("http://custom.url/path/to/script.sh") m_readurl.assert_called_with( - url='http://custom.url/path/to/script.sh', - retries=5) + url="http://custom.url/path/to/script.sh", retries=5 + ) self.assertEqual( - [mock.call([mock.ANY, '--cleanup'], capture=False)], - m_subp.call_args_list) + [mock.call([mock.ANY, "--cleanup"], capture=False)], + m_subp.call_args_list, + ) def test_install_with_version(self, m_readurl, m_subp): """Install AIO with specific version""" - cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0') + cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, "7.6.0") self.assertEqual( - [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)], - m_subp.call_args_list) + [mock.call([mock.ANY, "-v", "7.6.0", "--cleanup"], capture=False)], + m_subp.call_args_list, + ) def test_install_with_collection(self, m_readurl, m_subp): """Install AIO with specific collection""" cc_puppet.install_puppet_aio( - cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly') + cc_puppet.AIO_INSTALL_URL, None, "puppet6-nightly" + ) self.assertEqual( - [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'], - capture=False)], - m_subp.call_args_list) + [ + mock.call( + [mock.ANY, "-c", "puppet6-nightly", "--cleanup"], + capture=False, + ) + ], + m_subp.call_args_list, + ) def test_install_with_no_cleanup(self, m_readurl, m_subp): """Install AIO with no cleanup""" cc_puppet.install_puppet_aio( - cc_puppet.AIO_INSTALL_URL, None, None, False) + cc_puppet.AIO_INSTALL_URL, None, None, False + ) self.assertEqual( - [mock.call([mock.ANY], capture=False)], - m_subp.call_args_list) + [mock.call([mock.ANY], capture=False)], m_subp.call_args_list + ) diff --git a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py index 522de23d..e038f814 100644 --- a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py +++ b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py @@ -1,51 +1,83 @@ -from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci +import logging +from textwrap import dedent from cloudinit import util - +from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci from tests.unittests import helpers as t_help from tests.unittests.helpers import mock -from textwrap import dedent -import logging - LOG = logging.getLogger(__name__) MPATH = "cloudinit.config.cc_refresh_rmc_and_interface" NET_INFO = { - 'lo': {'ipv4': [{'ip': '127.0.0.1', - 'bcast': '', 'mask': '255.0.0.0', - 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', - 'scope6': 'host'}], 'hwaddr': '', - 'up': 'True'}, - 'env2': {'ipv4': [{'ip': '8.0.0.19', - 'bcast': '8.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20', - 'up': 'True'}, - 'env3': {'ipv4': [{'ip': '90.0.0.14', - 'bcast': '90.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21', - 'up': 'True'}, - 'env4': {'ipv4': [{'ip': '9.114.23.7', - 'bcast': '9.114.23.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22', - 'up': 'True'}, - 'env5': {'ipv4': [], - 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64', - 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c', - 'up': 'True'}} + "lo": { + "ipv4": [ + { + "ip": "127.0.0.1", + "bcast": "", + "mask": "255.0.0.0", + "scope": "host", + } + ], + "ipv6": [{"ip": "::1/128", "scope6": "host"}], + "hwaddr": "", + "up": "True", + }, + "env2": { + "ipv4": [ + { + "ip": "8.0.0.19", + "bcast": "8.0.0.255", + "mask": "255.255.255.0", + "scope": "global", + } + ], + "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8220/64", "scope6": "link"}], + "hwaddr": "fa:96:c2:81:82:20", + "up": "True", + }, + "env3": { + "ipv4": [ + { + "ip": "90.0.0.14", + "bcast": "90.0.0.255", + "mask": "255.255.255.0", + "scope": "global", + } + ], + "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8221/64", "scope6": "link"}], + "hwaddr": "fa:96:c2:81:82:21", + "up": "True", + }, + "env4": { + "ipv4": [ + { + "ip": "9.114.23.7", + "bcast": "9.114.23.255", + "mask": "255.255.255.0", + "scope": "global", + } + ], + "ipv6": [{"ip": "fe80::f896:c2ff:fe81:8222/64", "scope6": "link"}], + "hwaddr": "fa:96:c2:81:82:22", + "up": "True", + }, + "env5": { + "ipv4": [], + "ipv6": [{"ip": "fe80::9c26:c3ff:fea4:62c8/64", "scope6": "link"}], + "hwaddr": "42:20:86:df:fa:4c", + "up": "True", + }, +} class TestRsctNodeFile(t_help.CiTestCase): def test_disable_ipv6_interface(self): """test parsing of iface files.""" fname = self.tmp_path("iface-eth5") - util.write_file(fname, dedent("""\ + util.write_file( + fname, + dedent( + """\ BOOTPROTO=static DEVICE=eth5 HWADDR=42:20:86:df:fa:4c @@ -57,10 +89,14 @@ class TestRsctNodeFile(t_help.CiTestCase): STARTMODE=auto TYPE=Ethernet USERCTL=no - """)) + """ + ), + ) ccrmci.disable_ipv6(fname) - self.assertEqual(dedent("""\ + self.assertEqual( + dedent( + """\ BOOTPROTO=static DEVICE=eth5 HWADDR=42:20:86:df:fa:4c @@ -69,41 +105,53 @@ class TestRsctNodeFile(t_help.CiTestCase): TYPE=Ethernet USERCTL=no NM_CONTROLLED=no - """), util.load_file(fname)) + """ + ), + util.load_file(fname), + ) - @mock.patch(MPATH + '.refresh_rmc') - @mock.patch(MPATH + '.restart_network_manager') - @mock.patch(MPATH + '.disable_ipv6') - @mock.patch(MPATH + '.refresh_ipv6') - @mock.patch(MPATH + '.netinfo.netdev_info') - @mock.patch(MPATH + '.subp.which') - def test_handle(self, m_refresh_rmc, - m_netdev_info, m_refresh_ipv6, m_disable_ipv6, - m_restart_nm, m_which): + @mock.patch(MPATH + ".refresh_rmc") + @mock.patch(MPATH + ".restart_network_manager") + @mock.patch(MPATH + ".disable_ipv6") + @mock.patch(MPATH + ".refresh_ipv6") + @mock.patch(MPATH + ".netinfo.netdev_info") + @mock.patch(MPATH + ".subp.which") + def test_handle( + self, + m_refresh_rmc, + m_netdev_info, + m_refresh_ipv6, + m_disable_ipv6, + m_restart_nm, + m_which, + ): """Basic test of handle.""" m_netdev_info.return_value = NET_INFO - m_which.return_value = '/opt/rsct/bin/rmcctrl' - ccrmci.handle( - "refresh_rmc_and_interface", None, None, None, None) + m_which.return_value = "/opt/rsct/bin/rmcctrl" + ccrmci.handle("refresh_rmc_and_interface", None, None, None, None) self.assertEqual(1, m_netdev_info.call_count) - m_refresh_ipv6.assert_called_with('env5') + m_refresh_ipv6.assert_called_with("env5") m_disable_ipv6.assert_called_with( - '/etc/sysconfig/network-scripts/ifcfg-env5') + "/etc/sysconfig/network-scripts/ifcfg-env5" + ) self.assertEqual(1, m_restart_nm.call_count) self.assertEqual(1, m_refresh_rmc.call_count) - @mock.patch(MPATH + '.netinfo.netdev_info') + @mock.patch(MPATH + ".netinfo.netdev_info") def test_find_ipv6(self, m_netdev_info): """find_ipv6_ifaces parses netdev_info returning those with ipv6""" m_netdev_info.return_value = NET_INFO found = ccrmci.find_ipv6_ifaces() - self.assertEqual(['env5'], found) + self.assertEqual(["env5"], found) - @mock.patch(MPATH + '.subp.subp') + @mock.patch(MPATH + ".subp.subp") def test_refresh_ipv6(self, m_subp): """refresh_ipv6 should ip down and up the interface.""" iface = "myeth0" ccrmci.refresh_ipv6(iface) - m_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', iface, 'down']), - mock.call(['ip', 'link', 'set', iface, 'up'])]) + m_subp.assert_has_calls( + [ + mock.call(["ip", "link", "set", iface, "down"]), + mock.call(["ip", "link", "set", iface, "up"]), + ] + ) diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py index 1f9e24da..228f1e45 100644 --- a/tests/unittests/config/test_cc_resizefs.py +++ b/tests/unittests/config/test_cc_resizefs.py @@ -1,16 +1,26 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config.cc_resizefs import ( - can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs, - _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs) - -from collections import namedtuple import logging +from collections import namedtuple +from cloudinit.config.cc_resizefs import ( + _resize_btrfs, + _resize_ext, + _resize_ufs, + _resize_xfs, + _resize_zfs, + can_skip_resize, + handle, + maybe_get_writable_device_path, +) from cloudinit.subp import ProcessExecutionError from tests.unittests.helpers import ( - CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call) - + CiTestCase, + mock, + skipUnlessJsonSchema, + util, + wrap_and_call, +) LOG = logging.getLogger(__name__) @@ -22,32 +32,34 @@ class TestResizefs(CiTestCase): super(TestResizefs, self).setUp() self.name = "resizefs" - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_skip_ufs_resize(self, m_subp): fs_type = "ufs" resize_what = "/" devpth = "/dev/da0p2" - err = ("growfs: requested size 2.0GB is not larger than the " - "current filesystem size 2.0GB\n") + err = ( + "growfs: requested size 2.0GB is not larger than the " + "current filesystem size 2.0GB\n" + ) exception = ProcessExecutionError(stderr=err, exit_code=1) m_subp.side_effect = exception res = can_skip_resize(fs_type, resize_what, devpth) self.assertTrue(res) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_cannot_skip_ufs_resize(self, m_subp): fs_type = "ufs" resize_what = "/" devpth = "/dev/da0p2" m_subp.return_value = ( - ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"), - ("growfs: no room to allocate last cylinder group; " - "leaving 364KB unused\n") + "stdout: super-block backups (for fsck_ffs -b #) at:\n\n", + "growfs: no room to allocate last cylinder group; " + "leaving 364KB unused\n", ) res = can_skip_resize(fs_type, resize_what, devpth) self.assertFalse(res) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_cannot_skip_ufs_growfs_exception(self, m_subp): fs_type = "ufs" resize_what = "/" @@ -59,15 +71,16 @@ class TestResizefs(CiTestCase): can_skip_resize(fs_type, resize_what, devpth) def test_can_skip_resize_ext(self): - self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1')) + self.assertFalse(can_skip_resize("ext", "/", "/dev/sda1")) def test_handle_noops_on_disabled(self): """The handle function logs when the configuration disables resize.""" - cfg = {'resize_rootfs': False} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + cfg = {"resize_rootfs": False} + handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[]) self.assertIn( - 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', - self.logs.getvalue()) + "DEBUG: Skipping module named cc_resizefs, resizing disabled\n", + self.logs.getvalue(), + ) @skipUnlessJsonSchema() def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self): @@ -75,164 +88,189 @@ class TestResizefs(CiTestCase): Invalid values for resize_rootfs result in disabling the module. """ - cfg = {'resize_rootfs': 'junk'} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + cfg = {"resize_rootfs": "junk"} + handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[]) logs = self.logs.getvalue() self.assertIn( "WARNING: Invalid config:\nresize_rootfs: 'junk' is not one of" " [True, False, 'noblock']", - logs) + logs, + ) self.assertIn( - 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', - logs) + "DEBUG: Skipping module named cc_resizefs, resizing disabled\n", + logs, + ) - @mock.patch('cloudinit.config.cc_resizefs.util.get_mount_info') + @mock.patch("cloudinit.config.cc_resizefs.util.get_mount_info") def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info): """handle warns when get_mount_info sees unknown filesystem for /.""" m_get_mount_info.return_value = None - cfg = {'resize_rootfs': True} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + cfg = {"resize_rootfs": True} + handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[]) logs = self.logs.getvalue() self.assertNotIn("WARNING: Invalid config:\nresize_rootfs:", logs) self.assertIn( - 'WARNING: Could not determine filesystem type of /\n', - logs) + "WARNING: Could not determine filesystem type of /\n", logs + ) self.assertEqual( - [mock.call('/', LOG)], - m_get_mount_info.call_args_list) + [mock.call("/", LOG)], m_get_mount_info.call_args_list + ) def test_handle_warns_on_undiscoverable_root_path_in_commandline(self): """handle noops when the root path is not found on the commandline.""" - cfg = {'resize_rootfs': True} - exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' + cfg = {"resize_rootfs": True} + exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists" def fake_mount_info(path, log): - self.assertEqual('/', path) + self.assertEqual("/", path) self.assertEqual(LOG, log) - return ('/dev/root', 'ext4', '/') + return ("/dev/root", "ext4", "/") with mock.patch(exists_mock_path) as m_exists: m_exists.return_value = False wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}, - 'get_mount_info': {'side_effect': fake_mount_info}, - 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, - handle, 'cc_resizefs', cfg, _cloud=None, log=LOG, - args=[]) + "cloudinit.config.cc_resizefs.util", + { + "is_container": {"return_value": False}, + "get_mount_info": {"side_effect": fake_mount_info}, + "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"}, + }, + handle, + "cc_resizefs", + cfg, + _cloud=None, + log=LOG, + args=[], + ) logs = self.logs.getvalue() self.assertIn("WARNING: Unable to find device '/dev/root'", logs) def test_resize_zfs_cmd_return(self): - zpool = 'zroot' - devpth = 'gpt/system' - self.assertEqual(('zpool', 'online', '-e', zpool, devpth), - _resize_zfs(zpool, devpth)) + zpool = "zroot" + devpth = "gpt/system" + self.assertEqual( + ("zpool", "online", "-e", zpool, devpth), + _resize_zfs(zpool, devpth), + ) def test_resize_xfs_cmd_return(self): - mount_point = '/mnt/test' - devpth = '/dev/sda1' - self.assertEqual(('xfs_growfs', mount_point), - _resize_xfs(mount_point, devpth)) + mount_point = "/mnt/test" + devpth = "/dev/sda1" + self.assertEqual( + ("xfs_growfs", mount_point), _resize_xfs(mount_point, devpth) + ) def test_resize_ext_cmd_return(self): - mount_point = '/' - devpth = '/dev/sdb1' - self.assertEqual(('resize2fs', devpth), - _resize_ext(mount_point, devpth)) + mount_point = "/" + devpth = "/dev/sdb1" + self.assertEqual( + ("resize2fs", devpth), _resize_ext(mount_point, devpth) + ) def test_resize_ufs_cmd_return(self): - mount_point = '/' - devpth = '/dev/sda2' - self.assertEqual(('growfs', '-y', mount_point), - _resize_ufs(mount_point, devpth)) - - @mock.patch('cloudinit.util.is_container', return_value=False) - @mock.patch('cloudinit.util.parse_mount') - @mock.patch('cloudinit.util.get_device_info_from_zpool') - @mock.patch('cloudinit.util.get_mount_info') - def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount, - is_container): - devpth = 'vmzroot/ROOT/freebsd' - disk = 'gpt/system' - fs_type = 'zfs' - mount_point = '/' + mount_point = "/" + devpth = "/dev/sda2" + self.assertEqual( + ("growfs", "-y", mount_point), _resize_ufs(mount_point, devpth) + ) + + @mock.patch("cloudinit.util.is_container", return_value=False) + @mock.patch("cloudinit.util.parse_mount") + @mock.patch("cloudinit.util.get_device_info_from_zpool") + @mock.patch("cloudinit.util.get_mount_info") + def test_handle_zfs_root( + self, mount_info, zpool_info, parse_mount, is_container + ): + devpth = "vmzroot/ROOT/freebsd" + disk = "gpt/system" + fs_type = "zfs" + mount_point = "/" mount_info.return_value = (devpth, fs_type, mount_point) zpool_info.return_value = disk parse_mount.return_value = (devpth, fs_type, mount_point) - cfg = {'resize_rootfs': True} + cfg = {"resize_rootfs": True} - with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize: + handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[]) ret = dresize.call_args[0][0] - self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret) + self.assertEqual(("zpool", "online", "-e", "vmzroot", disk), ret) - @mock.patch('cloudinit.util.is_container', return_value=False) - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.get_device_info_from_zpool') - @mock.patch('cloudinit.util.parse_mount') - def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount, - is_container): - devpth = 'zroot/ROOT/default' - disk = 'da0p3' - fs_type = 'zfs' - mount_point = '/' + @mock.patch("cloudinit.util.is_container", return_value=False) + @mock.patch("cloudinit.util.get_mount_info") + @mock.patch("cloudinit.util.get_device_info_from_zpool") + @mock.patch("cloudinit.util.parse_mount") + def test_handle_modern_zfsroot( + self, mount_info, zpool_info, parse_mount, is_container + ): + devpth = "zroot/ROOT/default" + disk = "da0p3" + fs_type = "zfs" + mount_point = "/" mount_info.return_value = (devpth, fs_type, mount_point) zpool_info.return_value = disk parse_mount.return_value = (devpth, fs_type, mount_point) - cfg = {'resize_rootfs': True} + cfg = {"resize_rootfs": True} def fake_stat(devpath): if devpath == disk: raise OSError("not here") FakeStat = namedtuple( - 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat + "FakeStat", ["st_mode", "st_size", "st_mtime"] + ) # minimal stat return FakeStat(25008, 0, 1) # fake char block device - with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: - with mock.patch('cloudinit.config.cc_resizefs.os.stat') as m_stat: + with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize: + with mock.patch("cloudinit.config.cc_resizefs.os.stat") as m_stat: m_stat.side_effect = fake_stat - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[]) - self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk), - dresize.call_args[0][0]) + self.assertEqual( + ("zpool", "online", "-e", "zroot", "/dev/" + disk), + dresize.call_args[0][0], + ) class TestRootDevFromCmdline(CiTestCase): - def test_rootdev_from_cmdline_with_no_root(self): """Return None from rootdev_from_cmdline when root is not present.""" invalid_cases = [ - 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', ''] + "BOOT_IMAGE=/adsf asdfa werasef root adf", + "BOOT_IMAGE=/adsf", + "", + ] for case in invalid_cases: self.assertIsNone(util.rootdev_from_cmdline(case)) def test_rootdev_from_cmdline_with_root_startswith_dev(self): """Return the cmdline root when the path starts with /dev.""" self.assertEqual( - '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this')) + "/dev/this", util.rootdev_from_cmdline("asdf root=/dev/this") + ) def test_rootdev_from_cmdline_with_root_without_dev_prefix(self): """Add /dev prefix to cmdline root when the path lacks the prefix.""" self.assertEqual( - '/dev/this', util.rootdev_from_cmdline('asdf root=this')) + "/dev/this", util.rootdev_from_cmdline("asdf root=this") + ) def test_rootdev_from_cmdline_with_root_with_label(self): """When cmdline root contains a LABEL, our root is disk/by-label.""" self.assertEqual( - '/dev/disk/by-label/unique', - util.rootdev_from_cmdline('asdf root=LABEL=unique')) + "/dev/disk/by-label/unique", + util.rootdev_from_cmdline("asdf root=LABEL=unique"), + ) def test_rootdev_from_cmdline_with_root_with_uuid(self): """When cmdline root contains a UUID, our root is disk/by-uuid.""" self.assertEqual( - '/dev/disk/by-uuid/adsfdsaf-adsf', - util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) + "/dev/disk/by-uuid/adsfdsaf-adsf", + util.rootdev_from_cmdline("asdf root=UUID=adsfdsaf-adsf"), + ) class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): @@ -241,158 +279,210 @@ class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): def test_maybe_get_writable_device_path_none_on_overlayroot(self): """When devpath is overlayroot (on MAAS), is_dev_writable is False.""" - info = 'does not matter' + info = "does not matter" devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, 'overlayroot', info, LOG) + "cloudinit.config.cc_resizefs.util", + {"is_container": {"return_value": False}}, + maybe_get_writable_device_path, + "overlayroot", + info, + LOG, + ) self.assertIsNone(devpath) self.assertIn( "Not attempting to resize devpath 'overlayroot'", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self): """When root does not exist isn't in the cmdline, log warning.""" - info = 'does not matter' + info = "does not matter" def fake_mount_info(path, log): - self.assertEqual('/', path) + self.assertEqual("/", path) self.assertEqual(LOG, log) - return ('/dev/root', 'ext4', '/') + return ("/dev/root", "ext4", "/") - exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' + exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists" with mock.patch(exists_mock_path) as m_exists: m_exists.return_value = False devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}, - 'get_mount_info': {'side_effect': fake_mount_info}, - 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, - maybe_get_writable_device_path, '/dev/root', info, LOG) + "cloudinit.config.cc_resizefs.util", + { + "is_container": {"return_value": False}, + "get_mount_info": {"side_effect": fake_mount_info}, + "get_cmdline": {"return_value": "BOOT_IMAGE=/vmlinuz.efi"}, + }, + maybe_get_writable_device_path, + "/dev/root", + info, + LOG, + ) self.assertIsNone(devpath) logs = self.logs.getvalue() self.assertIn("WARNING: Unable to find device '/dev/root'", logs) def test_maybe_get_writable_device_path_does_not_exist(self): """When devpath does not exist, a warning is logged.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' + info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none" devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) + "cloudinit.config.cc_resizefs.util", + {"is_container": {"return_value": False}}, + maybe_get_writable_device_path, + "/dev/I/dont/exist", + info, + LOG, + ) self.assertIsNone(devpath) self.assertIn( "WARNING: Device '/dev/I/dont/exist' did not exist." - ' cannot resize: %s' % info, - self.logs.getvalue()) + " cannot resize: %s" % info, + self.logs.getvalue(), + ) def test_maybe_get_writable_device_path_does_not_exist_in_container(self): """When devpath does not exist in a container, log a debug message.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' + info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none" devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': True}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) + "cloudinit.config.cc_resizefs.util", + {"is_container": {"return_value": True}}, + maybe_get_writable_device_path, + "/dev/I/dont/exist", + info, + LOG, + ) self.assertIsNone(devpath) self.assertIn( "DEBUG: Device '/dev/I/dont/exist' did not exist in container." - ' cannot resize: %s' % info, - self.logs.getvalue()) + " cannot resize: %s" % info, + self.logs.getvalue(), + ) def test_maybe_get_writable_device_path_raises_oserror(self): """When unexpected OSError is raises by os.stat it is reraised.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' + info = "dev=/dev/I/dont/exist mnt_point=/ path=/dev/none" with self.assertRaises(OSError) as context_manager: wrap_and_call( - 'cloudinit.config.cc_resizefs', - {'util.is_container': {'return_value': True}, - 'os.stat': {'side_effect': OSError('Something unexpected')}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) + "cloudinit.config.cc_resizefs", + { + "util.is_container": {"return_value": True}, + "os.stat": { + "side_effect": OSError("Something unexpected") + }, + }, + maybe_get_writable_device_path, + "/dev/I/dont/exist", + info, + LOG, + ) self.assertEqual( - 'Something unexpected', str(context_manager.exception)) + "Something unexpected", str(context_manager.exception) + ) def test_maybe_get_writable_device_path_non_block(self): """When device is not a block device, emit warning return False.""" - fake_devpath = self.tmp_path('dev/readwrite') - util.write_file(fake_devpath, '', mode=0o600) # read-write - info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) + fake_devpath = self.tmp_path("dev/readwrite") + util.write_file(fake_devpath, "", mode=0o600) # read-write + info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath) devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, fake_devpath, info, LOG) + "cloudinit.config.cc_resizefs.util", + {"is_container": {"return_value": False}}, + maybe_get_writable_device_path, + fake_devpath, + info, + LOG, + ) self.assertIsNone(devpath) self.assertIn( "WARNING: device '{0}' not a block device. cannot resize".format( - fake_devpath), - self.logs.getvalue()) + fake_devpath + ), + self.logs.getvalue(), + ) def test_maybe_get_writable_device_path_non_block_on_container(self): """When device is non-block device in container, emit debug log.""" - fake_devpath = self.tmp_path('dev/readwrite') - util.write_file(fake_devpath, '', mode=0o600) # read-write - info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) + fake_devpath = self.tmp_path("dev/readwrite") + util.write_file(fake_devpath, "", mode=0o600) # read-write + info = "dev=/dev/root mnt_point=/ path={0}".format(fake_devpath) devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': True}}, - maybe_get_writable_device_path, fake_devpath, info, LOG) + "cloudinit.config.cc_resizefs.util", + {"is_container": {"return_value": True}}, + maybe_get_writable_device_path, + fake_devpath, + info, + LOG, + ) self.assertIsNone(devpath) self.assertIn( "DEBUG: device '{0}' not a block device in container." - ' cannot resize'.format(fake_devpath), - self.logs.getvalue()) + " cannot resize".format(fake_devpath), + self.logs.getvalue(), + ) def test_maybe_get_writable_device_path_returns_cmdline_root(self): """When root device is UUID in kernel commandline, update devpath.""" # XXX Long-term we want to use FilesystemMocking test to avoid # touching os.stat. FakeStat = namedtuple( - 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def. - info = 'dev=/dev/root mnt_point=/ path=/does/not/matter' + "FakeStat", ["st_mode", "st_size", "st_mtime"] + ) # minimal def. + info = "dev=/dev/root mnt_point=/ path=/does/not/matter" devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs', - {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'}, - 'util.is_container': False, - 'os.path.exists': False, # /dev/root doesn't exist - 'os.stat': { - 'return_value': FakeStat(25008, 0, 1)} # char block device - }, - maybe_get_writable_device_path, '/dev/root', info, LOG) - self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath) + "cloudinit.config.cc_resizefs", + { + "util.get_cmdline": {"return_value": "asdf root=UUID=my-uuid"}, + "util.is_container": False, + "os.path.exists": False, # /dev/root doesn't exist + "os.stat": { + "return_value": FakeStat(25008, 0, 1) + }, # char block device + }, + maybe_get_writable_device_path, + "/dev/root", + info, + LOG, + ) + self.assertEqual("/dev/disk/by-uuid/my-uuid", devpath) self.assertIn( "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'" " per kernel cmdline", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.util.mount_is_read_write') - @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + @mock.patch("cloudinit.util.mount_is_read_write") + @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir") def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw): """Do not resize / directly if it is read-only. (LP: #1734787).""" m_is_rw.return_value = False m_is_dir.return_value = True self.assertEqual( - ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'), - _resize_btrfs("/", "/dev/sda1")) + ("btrfs", "filesystem", "resize", "max", "//.snapshots"), + _resize_btrfs("/", "/dev/sda1"), + ) - @mock.patch('cloudinit.util.mount_is_read_write') - @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + @mock.patch("cloudinit.util.mount_is_read_write") + @mock.patch("cloudinit.config.cc_resizefs.os.path.isdir") def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw): """Do not resize / directly if it is read-only. (LP: #1734787).""" m_is_rw.return_value = True m_is_dir.return_value = True self.assertEqual( - ('btrfs', 'filesystem', 'resize', 'max', '/'), - _resize_btrfs("/", "/dev/sda1")) + ("btrfs", "filesystem", "resize", "max", "/"), + _resize_btrfs("/", "/dev/sda1"), + ) - @mock.patch('cloudinit.util.is_container', return_value=True) - @mock.patch('cloudinit.util.is_FreeBSD') - def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd, - m_is_container): + @mock.patch("cloudinit.util.is_container", return_value=True) + @mock.patch("cloudinit.util.is_FreeBSD") + def test_maybe_get_writable_device_path_zfs_freebsd( + self, freebsd, m_is_container + ): freebsd.return_value = True - info = 'dev=gpt/system mnt_point=/ path=/' - devpth = maybe_get_writable_device_path('gpt/system', info, LOG) - self.assertEqual('gpt/system', devpth) + info = "dev=gpt/system mnt_point=/ path=/" + devpth = maybe_get_writable_device_path("gpt/system", info, LOG) + self.assertEqual("gpt/system", devpth) # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py index ab2de17a..8896a4e8 100644 --- a/tests/unittests/config/test_cc_resolv_conf.py +++ b/tests/unittests/config/test_cc_resolv_conf.py @@ -4,19 +4,16 @@ import logging import os import shutil import tempfile -import pytest from copy import deepcopy from unittest import mock -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util +import pytest -from tests.unittests import helpers as t_help -from tests.unittests.util import MockDistro +from cloudinit import cloud, distros, helpers, util from cloudinit.config import cc_resolv_conf from cloudinit.config.cc_resolv_conf import generate_resolv_conf +from tests.unittests import helpers as t_help +from tests.unittests.util import MockDistro LOG = logging.getLogger(__name__) EXPECTED_HEADER = """\ @@ -29,17 +26,17 @@ EXPECTED_HEADER = """\ class TestResolvConf(t_help.FilesystemMockingTestCase): with_logs = True - cfg = {'manage_resolv_conf': True, 'resolv_conf': {}} + cfg = {"manage_resolv_conf": True, "resolv_conf": {}} def setUp(self): super(TestResolvConf, self).setUp() self.tmp = tempfile.mkdtemp() - util.ensure_dir(os.path.join(self.tmp, 'data')) + util.ensure_dir(os.path.join(self.tmp, "data")) self.addCleanup(shutil.rmtree, self.tmp) def _fetch_distro(self, kind, conf=None): cls = distros.fetch(kind) - paths = helpers.Paths({'cloud_dir': self.tmp}) + paths = helpers.Paths({"cloud_dir": self.tmp}) conf = {} if conf is None else conf return cls(kind, conf, paths) @@ -47,67 +44,73 @@ class TestResolvConf(t_help.FilesystemMockingTestCase): if not cc: ds = None distro = self._fetch_distro(distro_name, conf) - paths = helpers.Paths({'cloud_dir': self.tmp}) + paths = helpers.Paths({"cloud_dir": self.tmp}) cc = cloud.Cloud(ds, paths, {}, distro, None) - cc_resolv_conf.handle('cc_resolv_conf', conf, cc, LOG, []) + cc_resolv_conf.handle("cc_resolv_conf", conf, cc, LOG, []) @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_resolv_conf_systemd_resolved(self, m_render_to_file): - self.call_resolv_conf_handler('photon', self.cfg) + self.call_resolv_conf_handler("photon", self.cfg) assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY) ] == m_render_to_file.call_args_list @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_resolv_conf_no_param(self, m_render_to_file): tmp = deepcopy(self.cfg) self.logs.truncate(0) - tmp.pop('resolv_conf') - self.call_resolv_conf_handler('photon', tmp) + tmp.pop("resolv_conf") + self.call_resolv_conf_handler("photon", tmp) - self.assertIn('manage_resolv_conf True but no parameters provided', - self.logs.getvalue()) + self.assertIn( + "manage_resolv_conf True but no parameters provided", + self.logs.getvalue(), + ) assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY) ] not in m_render_to_file.call_args_list @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file): tmp = deepcopy(self.cfg) self.logs.truncate(0) - tmp['manage_resolv_conf'] = False - self.call_resolv_conf_handler('photon', tmp) - self.assertIn("'manage_resolv_conf' present but set to False", - self.logs.getvalue()) + tmp["manage_resolv_conf"] = False + self.call_resolv_conf_handler("photon", tmp) + self.assertIn( + "'manage_resolv_conf' present but set to False", + self.logs.getvalue(), + ) assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + mock.call(mock.ANY, "/etc/systemd/resolved.conf", mock.ANY) ] not in m_render_to_file.call_args_list @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_resolv_conf_etc_resolv_conf(self, m_render_to_file): - self.call_resolv_conf_handler('rhel', self.cfg) + self.call_resolv_conf_handler("rhel", self.cfg) assert [ - mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) + mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY) ] == m_render_to_file.call_args_list @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file): ds = None - distro = self._fetch_distro('rhel', self.cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) + distro = self._fetch_distro("rhel", self.cfg) + paths = helpers.Paths({"cloud_dir": self.tmp}) cc = cloud.Cloud(ds, paths, {}, distro, None) - cc.distro.resolve_conf_fn = 'bla' + cc.distro.resolve_conf_fn = "bla" self.logs.truncate(0) - self.call_resolv_conf_handler('rhel', self.cfg, cc) + self.call_resolv_conf_handler("rhel", self.cfg, cc) - self.assertIn('No template found, not rendering resolve configs', - self.logs.getvalue()) + self.assertIn( + "No template found, not rendering resolve configs", + self.logs.getvalue(), + ) assert [ - mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) + mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY) ] not in m_render_to_file.call_args_list @@ -119,9 +122,9 @@ class TestGenerateResolvConf: @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_dist_resolv_conf_fn(self, m_render_to_file): self.dist.resolve_conf_fn = "/tmp/resolv-test.conf" - generate_resolv_conf(self.tmpl_fn, - mock.MagicMock(), - self.dist.resolve_conf_fn) + generate_resolv_conf( + self.tmpl_fn, mock.MagicMock(), self.dist.resolve_conf_fn + ) assert [ mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY) @@ -190,4 +193,5 @@ class TestGenerateResolvConf: mock.call(mock.ANY, expected_content, mode=mock.ANY) ] == m_write_file.call_args_list + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py index bd7ebc98..fcc7db34 100644 --- a/tests/unittests/config/test_cc_rh_subscription.py +++ b/tests/unittests/config/test_cc_rh_subscription.py @@ -5,13 +5,12 @@ import copy import logging -from cloudinit.config import cc_rh_subscription from cloudinit import subp - +from cloudinit.config import cc_rh_subscription from tests.unittests.helpers import CiTestCase, mock SUBMGR = cc_rh_subscription.SubscriptionManager -SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli' +SUB_MAN_CLI = "cloudinit.config.cc_rh_subscription._sub_man_cli" @mock.patch(SUB_MAN_CLI) @@ -26,87 +25,115 @@ class GoodTests(CiTestCase): self.args = [] self.handle = cc_rh_subscription.handle - self.config = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks' - }} - self.config_full = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'auto-attach': True, - 'service-level': 'self-support', - 'add-pool': ['pool1', 'pool2', 'pool3'], - 'enable-repo': ['repo1', 'repo2', 'repo3'], - 'disable-repo': ['repo4', 'repo5'] - }} + self.config = { + "rh_subscription": { + "username": "scooby@do.com", + "password": "scooby-snacks", + } + } + self.config_full = { + "rh_subscription": { + "username": "scooby@do.com", + "password": "scooby-snacks", + "auto-attach": True, + "service-level": "self-support", + "add-pool": ["pool1", "pool2", "pool3"], + "enable-repo": ["repo1", "repo2", "repo3"], + "disable-repo": ["repo4", "repo5"], + } + } def test_already_registered(self, m_sman_cli): - ''' + """ Emulates a system that is already registered. Ensure it gets a non-ProcessExecution error from is_registered() - ''' - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) + """ + self.handle( + self.name, self.config, self.cloud_init, self.log, self.args + ) self.assertEqual(m_sman_cli.call_count, 1) - self.assertIn('System is already registered', self.logs.getvalue()) + self.assertIn("System is already registered", self.logs.getvalue()) def test_simple_registration(self, m_sman_cli): - ''' + """ Simple registration with username and password - ''' - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')] - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) - self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list) - self.assertIn(mock.call(['register', '--username=scooby@do.com', - '--password=scooby-snacks'], - logstring_val=True), - m_sman_cli.call_args_list) - self.assertIn('rh_subscription plugin completed successfully', - self.logs.getvalue()) + """ + reg = ( + "The system has been registered with ID:" + " 12345678-abde-abcde-1234-1234567890abc" + ) + m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, "bar")] + self.handle( + self.name, self.config, self.cloud_init, self.log, self.args + ) + self.assertIn(mock.call(["identity"]), m_sman_cli.call_args_list) + self.assertIn( + mock.call( + [ + "register", + "--username=scooby@do.com", + "--password=scooby-snacks", + ], + logstring_val=True, + ), + m_sman_cli.call_args_list, + ) + self.assertIn( + "rh_subscription plugin completed successfully", + self.logs.getvalue(), + ) self.assertEqual(m_sman_cli.call_count, 2) @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos") def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): cfg = copy.deepcopy(self.config) - m_get_repos.return_value = ([], ['repo1']) - cfg['rh_subscription'].update( - {'enable-repo': ['repo1'], 'disable-repo': None}) + m_get_repos.return_value = ([], ["repo1"]) + cfg["rh_subscription"].update( + {"enable-repo": ["repo1"], "disable-repo": None} + ) mysm = cc_rh_subscription.SubscriptionManager(cfg) self.assertEqual(True, mysm.update_repos()) m_get_repos.assert_called_with() - self.assertEqual(m_sman_cli.call_args_list, - [mock.call(['repos', '--enable=repo1'])]) + self.assertEqual( + m_sman_cli.call_args_list, [mock.call(["repos", "--enable=repo1"])] + ) def test_full_registration(self, m_sman_cli): - ''' + """ Registration with auto-attach, service-level, adding pools, and enabling and disabling yum repos - ''' + """ call_lists = [] - call_lists.append(['attach', '--pool=pool1', '--pool=pool3']) - call_lists.append(['repos', '--disable=repo5', '--enable=repo2', - '--enable=repo3']) - call_lists.append(['attach', '--auto', '--servicelevel=self-support']) - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" + call_lists.append(["attach", "--pool=pool1", "--pool=pool3"]) + call_lists.append( + ["repos", "--disable=repo5", "--enable=repo2", "--enable=repo3"] + ) + call_lists.append(["attach", "--auto", "--servicelevel=self-support"]) + reg = ( + "The system has been registered with ID:" + " 12345678-abde-abcde-1234-1234567890abc" + ) m_sman_cli.side_effect = [ subp.ProcessExecutionError, - (reg, 'bar'), - ('Service level set to: self-support', ''), - ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), - ('Repo ID: repo1\nRepo ID: repo5\n', ''), - ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''), - ('', '')] - self.handle(self.name, self.config_full, self.cloud_init, - self.log, self.args) + (reg, "bar"), + ("Service level set to: self-support", ""), + ("pool1\npool3\n", ""), + ("pool2\n", ""), + ("", ""), + ("Repo ID: repo1\nRepo ID: repo5\n", ""), + ("Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4", ""), + ("", ""), + ] + self.handle( + self.name, self.config_full, self.cloud_init, self.log, self.args + ) self.assertEqual(m_sman_cli.call_count, 9) for call in call_lists: self.assertIn(mock.call(call), m_sman_cli.call_args_list) - self.assertIn("rh_subscription plugin completed successfully", - self.logs.getvalue()) + self.assertIn( + "rh_subscription plugin completed successfully", + self.logs.getvalue(), + ) @mock.patch(SUB_MAN_CLI) @@ -117,38 +144,48 @@ class TestBadInput(CiTestCase): log = logging.getLogger("bad_tests") args = [] SM = cc_rh_subscription.SubscriptionManager - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - - config_no_password = {'rh_subscription': - {'username': 'scooby@do.com' - }} - - config_no_key = {'rh_subscription': - {'activation-key': '1234abcde', - }} - - config_service = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'service-level': 'self-support' - }} - - config_badpool = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'add-pool': 'not_a_list' - }} - config_badrepo = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'enable-repo': 'not_a_list' - }} - config_badkey = {'rh_subscription': - {'activation-key': 'abcdef1234', - 'fookey': 'bar', - 'org': '123', - }} + reg = ( + "The system has been registered with ID:" + " 12345678-abde-abcde-1234-1234567890abc" + ) + + config_no_password = {"rh_subscription": {"username": "scooby@do.com"}} + + config_no_key = { + "rh_subscription": { + "activation-key": "1234abcde", + } + } + + config_service = { + "rh_subscription": { + "username": "scooby@do.com", + "password": "scooby-snacks", + "service-level": "self-support", + } + } + + config_badpool = { + "rh_subscription": { + "username": "scooby@do.com", + "password": "scooby-snacks", + "add-pool": "not_a_list", + } + } + config_badrepo = { + "rh_subscription": { + "username": "scooby@do.com", + "password": "scooby-snacks", + "enable-repo": "not_a_list", + } + } + config_badkey = { + "rh_subscription": { + "activation-key": "abcdef1234", + "fookey": "bar", + "org": "123", + } + } def setUp(self): super(TestBadInput, self).setUp() @@ -160,75 +197,124 @@ class TestBadInput(CiTestCase): self.assertEqual([], missing, "Missing expected warnings.") def test_no_password(self, m_sman_cli): - '''Attempt to register without the password key/value.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_no_password, self.cloud_init, - self.log, self.args) + """Attempt to register without the password key/value.""" + m_sman_cli.side_effect = [ + subp.ProcessExecutionError, + (self.reg, "bar"), + ] + self.handle( + self.name, + self.config_no_password, + self.cloud_init, + self.log, + self.args, + ) self.assertEqual(m_sman_cli.call_count, 0) def test_no_org(self, m_sman_cli): - '''Attempt to register without the org key/value.''' + """Attempt to register without the org key/value.""" m_sman_cli.side_effect = [subp.ProcessExecutionError] - self.handle(self.name, self.config_no_key, self.cloud_init, - self.log, self.args) - m_sman_cli.assert_called_with(['identity']) + self.handle( + self.name, self.config_no_key, self.cloud_init, self.log, self.args + ) + m_sman_cli.assert_called_with(["identity"]) self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'Unable to register system due to incomplete information.', - 'Use either activationkey and org *or* userid and password', - 'Registration failed or did not run completely', - 'rh_subscription plugin did not complete successfully')) + self.assert_logged_warnings( + ( + "Unable to register system due to incomplete information.", + "Use either activationkey and org *or* userid and password", + "Registration failed or did not run completely", + "rh_subscription plugin did not complete successfully", + ) + ) def test_service_level_without_auto(self, m_sman_cli): - '''Attempt to register using service-level without auto-attach key.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_service, self.cloud_init, - self.log, self.args) + """Attempt to register using service-level without auto-attach key.""" + m_sman_cli.side_effect = [ + subp.ProcessExecutionError, + (self.reg, "bar"), + ] + self.handle( + self.name, + self.config_service, + self.cloud_init, + self.log, + self.args, + ) self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'The service-level key must be used in conjunction with ', - 'rh_subscription plugin did not complete successfully')) + self.assert_logged_warnings( + ( + "The service-level key must be used in conjunction with ", + "rh_subscription plugin did not complete successfully", + ) + ) def test_pool_not_a_list(self, m_sman_cli): - ''' + """ Register with pools that are not in the format of a list - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badpool, self.cloud_init, - self.log, self.args) + """ + m_sman_cli.side_effect = [ + subp.ProcessExecutionError, + (self.reg, "bar"), + ] + self.handle( + self.name, + self.config_badpool, + self.cloud_init, + self.log, + self.args, + ) self.assertEqual(m_sman_cli.call_count, 2) - self.assert_logged_warnings(( - 'Pools must in the format of a list', - 'rh_subscription plugin did not complete successfully')) + self.assert_logged_warnings( + ( + "Pools must in the format of a list", + "rh_subscription plugin did not complete successfully", + ) + ) def test_repo_not_a_list(self, m_sman_cli): - ''' + """ Register with repos that are not in the format of a list - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badrepo, self.cloud_init, - self.log, self.args) + """ + m_sman_cli.side_effect = [ + subp.ProcessExecutionError, + (self.reg, "bar"), + ] + self.handle( + self.name, + self.config_badrepo, + self.cloud_init, + self.log, + self.args, + ) self.assertEqual(m_sman_cli.call_count, 2) - self.assert_logged_warnings(( - 'Repo IDs must in the format of a list.', - 'Unable to add or remove repos', - 'rh_subscription plugin did not complete successfully')) + self.assert_logged_warnings( + ( + "Repo IDs must in the format of a list.", + "Unable to add or remove repos", + "rh_subscription plugin did not complete successfully", + ) + ) def test_bad_key_value(self, m_sman_cli): - ''' + """ Attempt to register with a key that we don't know - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badkey, self.cloud_init, - self.log, self.args) + """ + m_sman_cli.side_effect = [ + subp.ProcessExecutionError, + (self.reg, "bar"), + ] + self.handle( + self.name, self.config_badkey, self.cloud_init, self.log, self.args + ) self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'fookey is not a valid key for rh_subscription. Valid keys are:', - 'rh_subscription plugin did not complete successfully')) + self.assert_logged_warnings( + ( + "fookey is not a valid key for rh_subscription. Valid keys" + " are:", + "rh_subscription plugin did not complete successfully", + ) + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_rsyslog.py b/tests/unittests/config/test_cc_rsyslog.py index bc147dac..e5d06ca2 100644 --- a/tests/unittests/config/test_cc_rsyslog.py +++ b/tests/unittests/config/test_cc_rsyslog.py @@ -4,11 +4,16 @@ import os import shutil import tempfile -from cloudinit.config.cc_rsyslog import ( - apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config, - parse_remotes_line, remotes_to_rsyslog_cfg) from cloudinit import util - +from cloudinit.config.cc_rsyslog import ( + DEF_DIR, + DEF_FILENAME, + DEF_RELOAD, + apply_rsyslog_changes, + load_config, + parse_remotes_line, + remotes_to_rsyslog_cfg, +) from tests.unittests import helpers as t_help @@ -16,43 +21,46 @@ class TestLoadConfig(t_help.TestCase): def setUp(self): super(TestLoadConfig, self).setUp() self.basecfg = { - 'config_filename': DEF_FILENAME, - 'config_dir': DEF_DIR, - 'service_reload_command': DEF_RELOAD, - 'configs': [], - 'remotes': {}, + "config_filename": DEF_FILENAME, + "config_dir": DEF_DIR, + "service_reload_command": DEF_RELOAD, + "configs": [], + "remotes": {}, } def test_legacy_full(self): - found = load_config({ - 'rsyslog': ['*.* @192.168.1.1'], - 'rsyslog_dir': "mydir", - 'rsyslog_filename': "myfilename"}) - self.basecfg.update({ - 'configs': ['*.* @192.168.1.1'], - 'config_dir': "mydir", - 'config_filename': 'myfilename', - 'service_reload_command': 'auto'} + found = load_config( + { + "rsyslog": ["*.* @192.168.1.1"], + "rsyslog_dir": "mydir", + "rsyslog_filename": "myfilename", + } + ) + self.basecfg.update( + { + "configs": ["*.* @192.168.1.1"], + "config_dir": "mydir", + "config_filename": "myfilename", + "service_reload_command": "auto", + } ) self.assertEqual(found, self.basecfg) def test_legacy_defaults(self): - found = load_config({ - 'rsyslog': ['*.* @192.168.1.1']}) - self.basecfg.update({ - 'configs': ['*.* @192.168.1.1']}) + found = load_config({"rsyslog": ["*.* @192.168.1.1"]}) + self.basecfg.update({"configs": ["*.* @192.168.1.1"]}) self.assertEqual(found, self.basecfg) def test_new_defaults(self): self.assertEqual(load_config({}), self.basecfg) def test_new_configs(self): - cfgs = ['*.* myhost', '*.* my2host'] - self.basecfg.update({'configs': cfgs}) + cfgs = ["*.* myhost", "*.* my2host"] + self.basecfg.update({"configs": cfgs}) self.assertEqual( - load_config({'rsyslog': {'configs': cfgs}}), - self.basecfg) + load_config({"rsyslog": {"configs": cfgs}}), self.basecfg + ) class TestApplyChanges(t_help.TestCase): @@ -63,27 +71,29 @@ class TestApplyChanges(t_help.TestCase): def test_simple(self): cfgline = "*.* foohost" changed = apply_rsyslog_changes( - configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp) + configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp + ) fname = os.path.join(self.tmp, "foo.cfg") self.assertEqual([fname], changed) - self.assertEqual( - util.load_file(fname), cfgline + "\n") + self.assertEqual(util.load_file(fname), cfgline + "\n") def test_multiple_files(self): configs = [ - '*.* foohost', - {'content': 'abc', 'filename': 'my.cfg'}, - {'content': 'filefoo-content', - 'filename': os.path.join(self.tmp, 'mydir/mycfg')}, + "*.* foohost", + {"content": "abc", "filename": "my.cfg"}, + { + "content": "filefoo-content", + "filename": os.path.join(self.tmp, "mydir/mycfg"), + }, ] changed = apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) + configs=configs, def_fname="default.cfg", cfg_dir=self.tmp + ) expected = [ - (os.path.join(self.tmp, "default.cfg"), - "*.* foohost\n"), + (os.path.join(self.tmp, "default.cfg"), "*.* foohost\n"), (os.path.join(self.tmp, "my.cfg"), "abc\n"), (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"), ] @@ -91,30 +101,37 @@ class TestApplyChanges(t_help.TestCase): actual = [] for fname, _content in expected: util.load_file(fname) - actual.append((fname, util.load_file(fname),)) + actual.append( + ( + fname, + util.load_file(fname), + ) + ) self.assertEqual(expected, actual) def test_repeat_def(self): - configs = ['*.* foohost', "*.warn otherhost"] + configs = ["*.* foohost", "*.warn otherhost"] changed = apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) + configs=configs, def_fname="default.cfg", cfg_dir=self.tmp + ) fname = os.path.join(self.tmp, "default.cfg") self.assertEqual([fname], changed) - expected_content = '\n'.join([c for c in configs]) + '\n' + expected_content = "\n".join([c for c in configs]) + "\n" found_content = util.load_file(fname) self.assertEqual(expected_content, found_content) def test_multiline_content(self): - configs = ['line1', 'line2\nline3\n'] + configs = ["line1", "line2\nline3\n"] apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) + configs=configs, def_fname="default.cfg", cfg_dir=self.tmp + ) fname = os.path.join(self.tmp, "default.cfg") - expected_content = '\n'.join([c for c in configs]) + expected_content = "\n".join([c for c in configs]) found_content = util.load_file(fname) self.assertEqual(expected_content, found_content) @@ -152,7 +169,7 @@ class TestRemotesToSyslog(t_help.TestCase): # str rendered line must appear in remotes_to_ryslog_cfg return mycfg = "*.* myhost" myline = str(parse_remotes_line(mycfg, name="myname")) - r = remotes_to_rsyslog_cfg({'myname': mycfg}) + r = remotes_to_rsyslog_cfg({"myname": mycfg}) lines = r.splitlines() self.assertEqual(1, len(lines)) self.assertTrue(myline in r.splitlines()) @@ -161,7 +178,8 @@ class TestRemotesToSyslog(t_help.TestCase): header = "#foo head" footer = "#foo foot" r = remotes_to_rsyslog_cfg( - {'myname': "*.* myhost"}, header=header, footer=footer) + {"myname": "*.* myhost"}, header=header, footer=footer + ) lines = r.splitlines() self.assertTrue(header, lines[0]) self.assertTrue(footer, lines[-1]) @@ -170,9 +188,11 @@ class TestRemotesToSyslog(t_help.TestCase): mycfg = "*.* myhost" myline = str(parse_remotes_line(mycfg, name="myname")) r = remotes_to_rsyslog_cfg( - {'myname': mycfg, 'removed': None, 'removed2': ""}) + {"myname": mycfg, "removed": None, "removed2": ""} + ) lines = r.splitlines() self.assertEqual(1, len(lines)) self.assertTrue(myline in r.splitlines()) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_runcmd.py b/tests/unittests/config/test_cc_runcmd.py index 01de6af0..34b3fb77 100644 --- a/tests/unittests/config/test_cc_runcmd.py +++ b/tests/unittests/config/test_cc_runcmd.py @@ -4,12 +4,14 @@ import os import stat from unittest.mock import patch +from cloudinit import helpers, subp, util from cloudinit.config.cc_runcmd import handle, schema -from cloudinit import (helpers, subp, util) from tests.unittests.helpers import ( - CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, - skipUnlessJsonSchema) - + CiTestCase, + FilesystemMockingTestCase, + SchemaTestCaseMixin, + skipUnlessJsonSchema, +) from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -24,38 +26,40 @@ class TestRuncmd(FilesystemMockingTestCase): self.subp = subp.subp self.new_root = self.tmp_dir() self.patchUtils(self.new_root) - self.paths = helpers.Paths({'scripts': self.new_root}) + self.paths = helpers.Paths({"scripts": self.new_root}) def test_handler_skip_if_no_runcmd(self): """When the provided config doesn't contain runcmd, skip it.""" cfg = {} mycloud = get_cloud(paths=self.paths) - handle('notimportant', cfg, mycloud, LOG, None) + handle("notimportant", cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'runcmd' key", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @patch('cloudinit.util.shellify') + @patch("cloudinit.util.shellify") def test_runcmd_shellify_fails(self, cls): """When shellify fails throw exception""" cls.side_effect = TypeError("patched shellify") - valid_config = {'runcmd': ['echo 42']} + valid_config = {"runcmd": ["echo 42"]} cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: - with self.allow_subp(['/bin/sh']): - handle('cc_runcmd', valid_config, cc, LOG, None) + with self.allow_subp(["/bin/sh"]): + handle("cc_runcmd", valid_config, cc, LOG, None) self.assertIn("Failed to shellify", str(cm.exception)) def test_handler_invalid_command_set(self): """Commands which can't be converted to shell will raise errors.""" - invalid_config = {'runcmd': 1} + invalid_config = {"runcmd": 1} cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) + handle("cc_runcmd", invalid_config, cc, LOG, []) self.assertIn( - 'Failed to shellify 1 into file' - ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd', - str(cm.exception)) + "Failed to shellify 1 into file" + " /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd", + str(cm.exception), + ) @skipUnlessJsonSchema() def test_handler_schema_validation_warns_non_array_type(self): @@ -64,14 +68,15 @@ class TestRuncmd(FilesystemMockingTestCase): Schema validation is not strict, so runcmd attempts to shellify the invalid content. """ - invalid_config = {'runcmd': 1} + invalid_config = {"runcmd": 1} cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) + handle("cc_runcmd", invalid_config, cc, LOG, []) self.assertIn( - 'Invalid config:\nruncmd: 1 is not of type \'array\'', - self.logs.getvalue()) - self.assertIn('Failed to shellify', str(cm.exception)) + "Invalid config:\nruncmd: 1 is not of type 'array'", + self.logs.getvalue(), + ) + self.assertIn("Failed to shellify", str(cm.exception)) @skipUnlessJsonSchema() def test_handler_schema_validation_warns_non_array_item_type(self): @@ -81,28 +86,29 @@ class TestRuncmd(FilesystemMockingTestCase): invalid content. """ invalid_config = { - 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} + "runcmd": ["ls /", 20, ["wget", "http://stuff/blah"], {"a": "n"}] + } cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) + handle("cc_runcmd", invalid_config, cc, LOG, []) expected_warnings = [ - 'runcmd.1: 20 is not valid under any of the given schemas', - 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given' - ' schema' + "runcmd.1: 20 is not valid under any of the given schemas", + "runcmd.3: {'a': 'n'} is not valid under any of the given schema", ] logs = self.logs.getvalue() for warning in expected_warnings: self.assertIn(warning, logs) - self.assertIn('Failed to shellify', str(cm.exception)) + self.assertIn("Failed to shellify", str(cm.exception)) def test_handler_write_valid_runcmd_schema_to_file(self): """Valid runcmd schema is written to a runcmd shell script.""" - valid_config = {'runcmd': [['ls', '/']]} + valid_config = {"runcmd": [["ls", "/"]]} cc = get_cloud(paths=self.paths) - handle('cc_runcmd', valid_config, cc, LOG, []) + handle("cc_runcmd", valid_config, cc, LOG, []) runcmd_file = os.path.join( self.new_root, - 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd') + "var/lib/cloud/instances/iid-datasource-none/scripts/runcmd", + ) self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file)) file_stat = os.stat(runcmd_file) self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) @@ -118,12 +124,14 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): """Duplicated commands array/array entries are allowed.""" self.assertSchemaValid( [["echo", "bye"], ["echo", "bye"]], - "command entries can be duplicate.") + "command entries can be duplicate.", + ) def test_duplicates_are_fine_array_string(self): """Duplicated commands array/string entries are allowed.""" self.assertSchemaValid( - ["echo bye", "echo bye"], - "command entries can be duplicate.") + ["echo bye", "echo bye"], "command entries can be duplicate." + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py index cfd67dce..8b2fdcdd 100644 --- a/tests/unittests/config/test_cc_seed_random.py +++ b/tests/unittests/config/test_cc_seed_random.py @@ -12,11 +12,9 @@ import logging import tempfile from io import BytesIO -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.config import cc_seed_random from tests.unittests import helpers as t_help - from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -29,8 +27,8 @@ class TestRandomSeed(t_help.TestCase): self.unapply = [] # by default 'which' has nothing in its path - self.apply_patches([(subp, 'which', self._which)]) - self.apply_patches([(subp, 'subp', self._subp)]) + self.apply_patches([(subp, "which", self._which)]) + self.apply_patches([(subp, "subp", self._subp)]) self.subp_called = [] self.whichdata = {} @@ -47,149 +45,166 @@ class TestRandomSeed(t_help.TestCase): def _subp(self, *args, **kwargs): # supports subp calling with cmd as args or kwargs - if 'args' not in kwargs: - kwargs['args'] = args[0] + if "args" not in kwargs: + kwargs["args"] = args[0] self.subp_called.append(kwargs) return def _compress(self, text): contents = BytesIO() - gz_fh = gzip.GzipFile(mode='wb', fileobj=contents) + gz_fh = gzip.GzipFile(mode="wb", fileobj=contents) gz_fh.write(text) gz_fh.close() return contents.getvalue() def test_append_random(self): cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': 'tiny-tim-was-here', + "random_seed": { + "file": self._seed_file, + "data": "tiny-tim-was-here", } } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("tiny-tim-was-here", contents) def test_append_random_unknown_encoding(self): data = self._compress(b"tiny-toe") cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'special_encoding', + "random_seed": { + "file": self._seed_file, + "data": data, + "encoding": "special_encoding", } } - self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg, - get_cloud('ubuntu'), LOG, []) + self.assertRaises( + IOError, + cc_seed_random.handle, + "test", + cfg, + get_cloud("ubuntu"), + LOG, + [], + ) def test_append_random_gzip(self): data = self._compress(b"tiny-toe") cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'gzip', + "random_seed": { + "file": self._seed_file, + "data": data, + "encoding": "gzip", } } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("tiny-toe", contents) def test_append_random_gz(self): data = self._compress(b"big-toe") cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'gz', + "random_seed": { + "file": self._seed_file, + "data": data, + "encoding": "gz", } } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("big-toe", contents) def test_append_random_base64(self): - data = util.b64e('bubbles') + data = util.b64e("bubbles") cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'base64', + "random_seed": { + "file": self._seed_file, + "data": data, + "encoding": "base64", } } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("bubbles", contents) def test_append_random_b64(self): - data = util.b64e('kit-kat') + data = util.b64e("kit-kat") cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'b64', + "random_seed": { + "file": self._seed_file, + "data": data, + "encoding": "b64", } } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle("test", cfg, get_cloud("ubuntu"), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("kit-kat", contents) def test_append_random_metadata(self): cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': 'tiny-tim-was-here', + "random_seed": { + "file": self._seed_file, + "data": "tiny-tim-was-here", } } - c = get_cloud('ubuntu', metadata={'random_seed': '-so-was-josh'}) - cc_seed_random.handle('test', cfg, c, LOG, []) + c = get_cloud("ubuntu", metadata={"random_seed": "-so-was-josh"}) + cc_seed_random.handle("test", cfg, c, LOG, []) contents = util.load_file(self._seed_file) - self.assertEqual('tiny-tim-was-here-so-was-josh', contents) + self.assertEqual("tiny-tim-was-here-so-was-josh", contents) def test_seed_command_provided_and_available(self): - c = get_cloud('ubuntu') - self.whichdata = {'pollinate': '/usr/bin/pollinate'} - cfg = {'random_seed': {'command': ['pollinate', '-q']}} - cc_seed_random.handle('test', cfg, c, LOG, []) + c = get_cloud("ubuntu") + self.whichdata = {"pollinate": "/usr/bin/pollinate"} + cfg = {"random_seed": {"command": ["pollinate", "-q"]}} + cc_seed_random.handle("test", cfg, c, LOG, []) - subp_args = [f['args'] for f in self.subp_called] - self.assertIn(['pollinate', '-q'], subp_args) + subp_args = [f["args"] for f in self.subp_called] + self.assertIn(["pollinate", "-q"], subp_args) def test_seed_command_not_provided(self): - c = get_cloud('ubuntu') + c = get_cloud("ubuntu") self.whichdata = {} - cc_seed_random.handle('test', {}, c, LOG, []) + cc_seed_random.handle("test", {}, c, LOG, []) # subp should not have been called as which would say not available self.assertFalse(self.subp_called) def test_unavailable_seed_command_and_required_raises_error(self): - c = get_cloud('ubuntu') + c = get_cloud("ubuntu") self.whichdata = {} - cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'], - 'command_required': True}} - self.assertRaises(ValueError, cc_seed_random.handle, - 'test', cfg, c, LOG, []) + cfg = { + "random_seed": { + "command": ["THIS_NO_COMMAND"], + "command_required": True, + } + } + self.assertRaises( + ValueError, cc_seed_random.handle, "test", cfg, c, LOG, [] + ) def test_seed_command_and_required(self): - c = get_cloud('ubuntu') - self.whichdata = {'foo': 'foo'} - cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} - cc_seed_random.handle('test', cfg, c, LOG, []) + c = get_cloud("ubuntu") + self.whichdata = {"foo": "foo"} + cfg = {"random_seed": {"command_required": True, "command": ["foo"]}} + cc_seed_random.handle("test", cfg, c, LOG, []) - self.assertIn(['foo'], [f['args'] for f in self.subp_called]) + self.assertIn(["foo"], [f["args"] for f in self.subp_called]) def test_file_in_environment_for_command(self): - c = get_cloud('ubuntu') - self.whichdata = {'foo': 'foo'} - cfg = {'random_seed': {'command_required': True, 'command': ['foo'], - 'file': self._seed_file}} - cc_seed_random.handle('test', cfg, c, LOG, []) + c = get_cloud("ubuntu") + self.whichdata = {"foo": "foo"} + cfg = { + "random_seed": { + "command_required": True, + "command": ["foo"], + "file": self._seed_file, + } + } + cc_seed_random.handle("test", cfg, c, LOG, []) # this just instists that the first time subp was called, # RANDOM_SEED_FILE was in the environment set up correctly - subp_env = [f['env'] for f in self.subp_called] - self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file) + subp_env = [f["env"] for f in self.subp_called] + self.assertEqual(subp_env[0].get("RANDOM_SEED_FILE"), self._seed_file) def apply_patches(patches): @@ -202,4 +217,5 @@ def apply_patches(patches): ret.append((ref, name, orig)) return ret + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py index b9a783a7..fd994c4e 100644 --- a/tests/unittests/config/test_cc_set_hostname.py +++ b/tests/unittests/config/test_cc_set_hostname.py @@ -1,15 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_set_hostname - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util - -from tests.unittests import helpers as t_help - -from configobj import ConfigObj import logging import os import shutil @@ -17,6 +7,12 @@ import tempfile from io import BytesIO from unittest import mock +from configobj import ConfigObj + +from cloudinit import cloud, distros, helpers, util +from cloudinit.config import cc_set_hostname +from tests.unittests import helpers as t_help + LOG = logging.getLogger(__name__) @@ -27,181 +23,186 @@ class TestHostname(t_help.FilesystemMockingTestCase): def setUp(self): super(TestHostname, self).setUp() self.tmp = tempfile.mkdtemp() - util.ensure_dir(os.path.join(self.tmp, 'data')) + util.ensure_dir(os.path.join(self.tmp, "data")) self.addCleanup(shutil.rmtree, self.tmp) def _fetch_distro(self, kind, conf=None): cls = distros.fetch(kind) - paths = helpers.Paths({'cloud_dir': self.tmp}) + paths = helpers.Paths({"cloud_dir": self.tmp}) conf = {} if conf is None else conf return cls(kind, conf, paths) def test_debian_write_hostname_prefer_fqdn(self): cfg = { - 'hostname': 'blah', - 'prefer_fqdn_over_hostname': True, - 'fqdn': 'blah.yahoo.com', + "hostname": "blah", + "prefer_fqdn_over_hostname": True, + "fqdn": "blah.yahoo.com", } - distro = self._fetch_distro('debian', cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) + distro = self._fetch_distro("debian", cfg) + paths = helpers.Paths({"cloud_dir": self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) + cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, []) contents = util.load_file("/etc/hostname") - self.assertEqual('blah.yahoo.com', contents.strip()) + self.assertEqual("blah.yahoo.com", contents.strip()) - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False) def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd): cfg = { - 'hostname': 'blah', - 'prefer_fqdn_over_hostname': False, - 'fqdn': 'blah.yahoo.com', + "hostname": "blah", + "prefer_fqdn_over_hostname": False, + "fqdn": "blah.yahoo.com", } - distro = self._fetch_distro('rhel', cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) + distro = self._fetch_distro("rhel", cfg) + paths = helpers.Paths({"cloud_dir": self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) + cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, []) contents = util.load_file("/etc/sysconfig/network", decode=False) n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual( - {'HOSTNAME': 'blah'}, - dict(n_cfg)) + self.assertEqual({"HOSTNAME": "blah"}, dict(n_cfg)) - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False) def test_write_hostname_rhel(self, m_uses_systemd): - cfg = { - 'hostname': 'blah', - 'fqdn': 'blah.blah.blah.yahoo.com' - } - distro = self._fetch_distro('rhel') - paths = helpers.Paths({'cloud_dir': self.tmp}) + cfg = {"hostname": "blah", "fqdn": "blah.blah.blah.yahoo.com"} + distro = self._fetch_distro("rhel") + paths = helpers.Paths({"cloud_dir": self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) + cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, []) contents = util.load_file("/etc/sysconfig/network", decode=False) n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual( - {'HOSTNAME': 'blah.blah.blah.yahoo.com'}, - dict(n_cfg)) + self.assertEqual({"HOSTNAME": "blah.blah.blah.yahoo.com"}, dict(n_cfg)) def test_write_hostname_debian(self): cfg = { - 'hostname': 'blah', - 'fqdn': 'blah.blah.blah.yahoo.com', + "hostname": "blah", + "fqdn": "blah.blah.blah.yahoo.com", } - distro = self._fetch_distro('debian') - paths = helpers.Paths({'cloud_dir': self.tmp}) + distro = self._fetch_distro("debian") + paths = helpers.Paths({"cloud_dir": self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) + cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, []) contents = util.load_file("/etc/hostname") - self.assertEqual('blah', contents.strip()) + self.assertEqual("blah", contents.strip()) - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + @mock.patch("cloudinit.distros.Distro.uses_systemd", return_value=False) def test_write_hostname_sles(self, m_uses_systemd): cfg = { - 'hostname': 'blah.blah.blah.suse.com', + "hostname": "blah.blah.blah.suse.com", } - distro = self._fetch_distro('sles') - paths = helpers.Paths({'cloud_dir': self.tmp}) + distro = self._fetch_distro("sles") + paths = helpers.Paths({"cloud_dir": self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) + cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, []) contents = util.load_file(distro.hostname_conf_fn) - self.assertEqual('blah', contents.strip()) + self.assertEqual("blah", contents.strip()) - @mock.patch('cloudinit.distros.photon.subp.subp') + @mock.patch("cloudinit.distros.photon.subp.subp") def test_photon_hostname(self, m_subp): cfg1 = { - 'hostname': 'photon', - 'prefer_fqdn_over_hostname': True, - 'fqdn': 'test1.vmware.com', + "hostname": "photon", + "prefer_fqdn_over_hostname": True, + "fqdn": "test1.vmware.com", } cfg2 = { - 'hostname': 'photon', - 'prefer_fqdn_over_hostname': False, - 'fqdn': 'test2.vmware.com', + "hostname": "photon", + "prefer_fqdn_over_hostname": False, + "fqdn": "test2.vmware.com", } ds = None m_subp.return_value = (None, None) - distro = self._fetch_distro('photon', cfg1) - paths = helpers.Paths({'cloud_dir': self.tmp}) + distro = self._fetch_distro("photon", cfg1) + paths = helpers.Paths({"cloud_dir": self.tmp}) cc = cloud.Cloud(ds, paths, {}, distro, None) for c in [cfg1, cfg2]: - cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, []) + cc_set_hostname.handle("cc_set_hostname", c, cc, LOG, []) print("\n", m_subp.call_args_list) - if c['prefer_fqdn_over_hostname']: + if c["prefer_fqdn_over_hostname"]: assert [ - mock.call(['hostnamectl', 'set-hostname', c['fqdn']], - capture=True) + mock.call( + ["hostnamectl", "set-hostname", c["fqdn"]], + capture=True, + ) ] in m_subp.call_args_list assert [ - mock.call(['hostnamectl', 'set-hostname', c['hostname']], - capture=True) + mock.call( + ["hostnamectl", "set-hostname", c["hostname"]], + capture=True, + ) ] not in m_subp.call_args_list else: assert [ - mock.call(['hostnamectl', 'set-hostname', c['hostname']], - capture=True) + mock.call( + ["hostnamectl", "set-hostname", c["hostname"]], + capture=True, + ) ] in m_subp.call_args_list assert [ - mock.call(['hostnamectl', 'set-hostname', c['fqdn']], - capture=True) + mock.call( + ["hostnamectl", "set-hostname", c["fqdn"]], + capture=True, + ) ] not in m_subp.call_args_list def test_multiple_calls_skips_unchanged_hostname(self): """Only new hostname or fqdn values will generate a hostname call.""" - distro = self._fetch_distro('debian') - paths = helpers.Paths({'cloud_dir': self.tmp}) + distro = self._fetch_distro("debian") + paths = helpers.Paths({"cloud_dir": self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, [] + ) contents = util.load_file("/etc/hostname") - self.assertEqual('hostname1', contents.strip()) + self.assertEqual("hostname1", contents.strip()) cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + "cc_set_hostname", {"hostname": "hostname1.me.com"}, cc, LOG, [] + ) self.assertIn( - 'DEBUG: No hostname changes. Skipping set-hostname\n', - self.logs.getvalue()) + "DEBUG: No hostname changes. Skipping set-hostname\n", + self.logs.getvalue(), + ) cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, []) + "cc_set_hostname", {"hostname": "hostname2.me.com"}, cc, LOG, [] + ) contents = util.load_file("/etc/hostname") - self.assertEqual('hostname2', contents.strip()) + self.assertEqual("hostname2", contents.strip()) self.assertIn( - 'Non-persistently setting the system hostname to hostname2', - self.logs.getvalue()) + "Non-persistently setting the system hostname to hostname2", + self.logs.getvalue(), + ) def test_error_on_distro_set_hostname_errors(self): """Raise SetHostnameError on exceptions from distro.set_hostname.""" - distro = self._fetch_distro('debian') + distro = self._fetch_distro("debian") def set_hostname_error(hostname, fqdn): raise Exception("OOPS on: %s" % fqdn) distro.set_hostname = set_hostname_error - paths = helpers.Paths({'cloud_dir': self.tmp}) + paths = helpers.Paths({"cloud_dir": self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr: cc_set_hostname.handle( - 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + "somename", {"hostname": "hostname1.me.com"}, cc, LOG, [] + ) self.assertEqual( - 'Failed to set the hostname to hostname1.me.com (hostname1):' - ' OOPS on: hostname1.me.com', - str(ctx_mgr.exception)) + "Failed to set the hostname to hostname1.me.com (hostname1):" + " OOPS on: hostname1.me.com", + str(ctx_mgr.exception), + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py index 9bcd0439..bc81214b 100644 --- a/tests/unittests/config/test_cc_set_passwords.py +++ b/tests/unittests/config/test_cc_set_passwords.py @@ -2,9 +2,9 @@ from unittest import mock +from cloudinit import util from cloudinit.config import cc_set_passwords as setpass from tests.unittests.helpers import CiTestCase -from cloudinit import util MODPATH = "cloudinit.config.cc_set_passwords." @@ -16,27 +16,29 @@ class TestHandleSshPwauth(CiTestCase): @mock.patch("cloudinit.distros.subp.subp") def test_unknown_value_logs_warning(self, m_subp): - cloud = self.tmp_cloud(distro='ubuntu') + cloud = self.tmp_cloud(distro="ubuntu") setpass.handle_ssh_pwauth("floo", cloud.distro) - self.assertIn("Unrecognized value: ssh_pwauth=floo", - self.logs.getvalue()) + self.assertIn( + "Unrecognized value: ssh_pwauth=floo", self.logs.getvalue() + ) m_subp.assert_not_called() @mock.patch(MODPATH + "update_ssh_config", return_value=True) @mock.patch("cloudinit.distros.subp.subp") def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): """If systemctl in service cmd: systemctl restart name.""" - cloud = self.tmp_cloud(distro='ubuntu') - cloud.distro.init_cmd = ['systemctl'] + cloud = self.tmp_cloud(distro="ubuntu") + cloud.distro.init_cmd = ["systemctl"] setpass.handle_ssh_pwauth(True, cloud.distro) m_subp.assert_called_with( - ["systemctl", "restart", "ssh"], capture=True) + ["systemctl", "restart", "ssh"], capture=True + ) @mock.patch(MODPATH + "update_ssh_config", return_value=False) @mock.patch("cloudinit.distros.subp.subp") def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): """If config is not updated, then no system restart should be done.""" - cloud = self.tmp_cloud(distro='ubuntu') + cloud = self.tmp_cloud(distro="ubuntu") setpass.handle_ssh_pwauth(True, cloud.distro) m_subp.assert_not_called() self.assertIn("No need to restart SSH", self.logs.getvalue()) @@ -45,7 +47,7 @@ class TestHandleSshPwauth(CiTestCase): @mock.patch("cloudinit.distros.subp.subp") def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): """If 'unchanged', then no updates to config and no restart.""" - cloud = self.tmp_cloud(distro='ubuntu') + cloud = self.tmp_cloud(distro="ubuntu") setpass.handle_ssh_pwauth("unchanged", cloud.distro) m_update_ssh_config.assert_not_called() m_subp.assert_not_called() @@ -53,7 +55,7 @@ class TestHandleSshPwauth(CiTestCase): @mock.patch("cloudinit.distros.subp.subp") def test_valid_change_values(self, m_subp): """If value is a valid changen value, then update should be called.""" - cloud = self.tmp_cloud(distro='ubuntu') + cloud = self.tmp_cloud(distro="ubuntu") upname = MODPATH + "update_ssh_config" optname = "PasswordAuthentication" for value in util.FALSE_STRINGS + util.TRUE_STRINGS: @@ -71,52 +73,65 @@ class TestSetPasswordsHandle(CiTestCase): def test_handle_on_empty_config(self, *args): """handle logs that no password has changed when config is empty.""" - cloud = self.tmp_cloud(distro='ubuntu') + cloud = self.tmp_cloud(distro="ubuntu") setpass.handle( - 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) + "IGNORED", cfg={}, cloud=cloud, log=self.logger, args=[] + ) self.assertEqual( "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. " - 'ssh_pwauth=None\n', - self.logs.getvalue()) + "ssh_pwauth=None\n", + self.logs.getvalue(), + ) def test_handle_on_chpasswd_list_parses_common_hashes(self): """handle parses command password hashes.""" - cloud = self.tmp_cloud(distro='ubuntu') + cloud = self.tmp_cloud(distro="ubuntu") valid_hashed_pwds = [ - 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/' - 'Dlew1Va', - 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' - 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] - cfg = {'chpasswd': {'list': valid_hashed_pwds}} - with mock.patch.object(setpass, 'chpasswd') as chpasswd: + "root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/" + "Dlew1Va", + "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q" + "SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1", + ] + cfg = {"chpasswd": {"list": valid_hashed_pwds}} + with mock.patch.object(setpass, "chpasswd") as chpasswd: setpass.handle( - 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[] + ) self.assertIn( - 'DEBUG: Handling input for chpasswd as list.', - self.logs.getvalue()) + "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue() + ) self.assertIn( "DEBUG: Setting hashed password for ['root', 'ubuntu']", - self.logs.getvalue()) - valid = '\n'.join(valid_hashed_pwds) + '\n' + self.logs.getvalue(), + ) + valid = "\n".join(valid_hashed_pwds) + "\n" called = chpasswd.call_args[0][1] self.assertEqual(valid, called) @mock.patch(MODPATH + "util.is_BSD") @mock.patch(MODPATH + "subp.subp") def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords( - self, m_subp, m_is_bsd): + self, m_subp, m_is_bsd + ): """BSD don't use chpasswd""" m_is_bsd.return_value = True - cloud = self.tmp_cloud(distro='freebsd') - valid_pwds = ['ubuntu:passw0rd'] - cfg = {'chpasswd': {'list': valid_pwds}} + cloud = self.tmp_cloud(distro="freebsd") + valid_pwds = ["ubuntu:passw0rd"] + cfg = {"chpasswd": {"list": valid_pwds}} setpass.handle( - 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) - self.assertEqual([ - mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd', - logstring="chpasswd for ubuntu"), - mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], - m_subp.call_args_list) + "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[] + ) + self.assertEqual( + [ + mock.call( + ["pw", "usermod", "ubuntu", "-h", "0"], + data="passw0rd", + logstring="chpasswd for ubuntu", + ), + mock.call(["pw", "usermod", "ubuntu", "-p", "01-Jan-1970"]), + ], + m_subp.call_args_list, + ) @mock.patch(MODPATH + "util.multi_log") @mock.patch(MODPATH + "subp.subp") @@ -124,29 +139,29 @@ class TestSetPasswordsHandle(CiTestCase): self, m_subp, m_multi_log ): """handle parses command set random passwords.""" - cloud = self.tmp_cloud(distro='ubuntu') - valid_random_pwds = [ - 'root:R', - 'ubuntu:RANDOM'] - cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}} - with mock.patch.object(setpass, 'chpasswd') as chpasswd: + cloud = self.tmp_cloud(distro="ubuntu") + valid_random_pwds = ["root:R", "ubuntu:RANDOM"] + cfg = {"chpasswd": {"expire": "false", "list": valid_random_pwds}} + with mock.patch.object(setpass, "chpasswd") as chpasswd: setpass.handle( - 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[] + ) self.assertIn( - 'DEBUG: Handling input for chpasswd as list.', - self.logs.getvalue()) + "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue() + ) self.assertEqual(1, chpasswd.call_count) passwords, _ = chpasswd.call_args user_pass = { user: password - for user, password - in (line.split(":") for line in passwords[1].splitlines()) + for user, password in ( + line.split(":") for line in passwords[1].splitlines() + ) } self.assertEqual(1, m_multi_log.call_count) self.assertEqual( mock.call(mock.ANY, stderr=False, fallback_to_stdout=False), - m_multi_log.call_args + m_multi_log.call_args, ) self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys())) diff --git a/tests/unittests/config/test_cc_snap.py b/tests/unittests/config/test_cc_snap.py index e8113eca..f7e66ad2 100644 --- a/tests/unittests/config/test_cc_snap.py +++ b/tests/unittests/config/test_cc_snap.py @@ -3,14 +3,23 @@ import re from io import StringIO +from cloudinit import util from cloudinit.config.cc_snap import ( - ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, - run_commands, schema) + ASSERTIONS_FILE, + add_assertions, + handle, + maybe_install_squashfuse, + run_commands, + schema, +) from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit import util from tests.unittests.helpers import ( - CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) - + CiTestCase, + SchemaTestCaseMixin, + mock, + skipUnlessJsonSchema, + wrap_and_call, +) SYSTEM_USER_ASSERTION = """\ type: system-user @@ -92,11 +101,11 @@ class TestAddAssertions(CiTestCase): super(TestAddAssertions, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('cloudinit.config.cc_snap.subp.subp') + @mock.patch("cloudinit.config.cc_snap.subp.subp") def test_add_assertions_on_empty_list(self, m_subp): """When provided with an empty list, add_assertions does nothing.""" add_assertions([]) - self.assertEqual('', self.logs.getvalue()) + self.assertEqual("", self.logs.getvalue()) m_subp.assert_not_called() def test_add_assertions_on_non_list_or_dict(self): @@ -105,58 +114,72 @@ class TestAddAssertions(CiTestCase): add_assertions(assertions="I'm Not Valid") self.assertEqual( "assertion parameter was not a list or dict: I'm Not Valid", - str(context_manager.exception)) + str(context_manager.exception), + ) - @mock.patch('cloudinit.config.cc_snap.subp.subp') + @mock.patch("cloudinit.config.cc_snap.subp.subp") def test_add_assertions_adds_assertions_as_list(self, m_subp): """When provided with a list, add_assertions adds all assertions.""" self.assertEqual( - ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions" + ) + assert_file = self.tmp_path("snapd.assertions", dir=self.tmp) assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION] wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - add_assertions, assertions) + "cloudinit.config.cc_snap", + {"ASSERTIONS_FILE": {"new": assert_file}}, + add_assertions, + assertions, + ) self.assertIn( - 'Importing user-provided snap assertions', self.logs.getvalue()) - self.assertIn( - 'sertions', self.logs.getvalue()) + "Importing user-provided snap assertions", self.logs.getvalue() + ) + self.assertIn("sertions", self.logs.getvalue()) self.assertEqual( - [mock.call(['snap', 'ack', assert_file], capture=True)], - m_subp.call_args_list) - compare_file = self.tmp_path('comparison', dir=self.tmp) - util.write_file(compare_file, '\n'.join(assertions).encode('utf-8')) + [mock.call(["snap", "ack", assert_file], capture=True)], + m_subp.call_args_list, + ) + compare_file = self.tmp_path("comparison", dir=self.tmp) + util.write_file(compare_file, "\n".join(assertions).encode("utf-8")) self.assertEqual( - util.load_file(compare_file), util.load_file(assert_file)) + util.load_file(compare_file), util.load_file(assert_file) + ) - @mock.patch('cloudinit.config.cc_snap.subp.subp') + @mock.patch("cloudinit.config.cc_snap.subp.subp") def test_add_assertions_adds_assertions_as_dict(self, m_subp): """When provided with a dict, add_assertions adds all assertions.""" self.assertEqual( - ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) - assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION} + ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions" + ) + assert_file = self.tmp_path("snapd.assertions", dir=self.tmp) + assertions = {"00": SYSTEM_USER_ASSERTION, "01": ACCOUNT_ASSERTION} wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - add_assertions, assertions) + "cloudinit.config.cc_snap", + {"ASSERTIONS_FILE": {"new": assert_file}}, + add_assertions, + assertions, + ) self.assertIn( - 'Importing user-provided snap assertions', self.logs.getvalue()) + "Importing user-provided snap assertions", self.logs.getvalue() + ) self.assertIn( "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv", - self.logs.getvalue()) + self.logs.getvalue(), + ) self.assertIn( "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic", - self.logs.getvalue()) + self.logs.getvalue(), + ) self.assertEqual( - [mock.call(['snap', 'ack', assert_file], capture=True)], - m_subp.call_args_list) - compare_file = self.tmp_path('comparison', dir=self.tmp) - combined = '\n'.join(assertions.values()) - util.write_file(compare_file, combined.encode('utf-8')) + [mock.call(["snap", "ack", assert_file], capture=True)], + m_subp.call_args_list, + ) + compare_file = self.tmp_path("comparison", dir=self.tmp) + combined = "\n".join(assertions.values()) + util.write_file(compare_file, combined.encode("utf-8")) self.assertEqual( - util.load_file(compare_file), util.load_file(assert_file)) + util.load_file(compare_file), util.load_file(assert_file) + ) class TestRunCommands(CiTestCase): @@ -168,11 +191,11 @@ class TestRunCommands(CiTestCase): super(TestRunCommands, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('cloudinit.config.cc_snap.subp.subp') + @mock.patch("cloudinit.config.cc_snap.subp.subp") def test_run_commands_on_empty_list(self, m_subp): """When provided with an empty list, run_commands does nothing.""" run_commands([]) - self.assertEqual('', self.logs.getvalue()) + self.assertEqual("", self.logs.getvalue()) m_subp.assert_not_called() def test_run_commands_on_non_list_or_dict(self): @@ -181,68 +204,74 @@ class TestRunCommands(CiTestCase): run_commands(commands="I'm Not Valid") self.assertEqual( "commands parameter was not a list or dict: I'm Not Valid", - str(context_manager.exception)) + str(context_manager.exception), + ) def test_run_command_logs_commands_and_exit_codes_to_stderr(self): """All exit codes are logged to stderr.""" - outfile = self.tmp_path('output.log', dir=self.tmp) + outfile = self.tmp_path("output.log", dir=self.tmp) cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'bogus command' + cmd2 = "bogus command" cmd3 = 'echo "MOM" >> %s' % outfile commands = [cmd1, cmd2, cmd3] - mock_path = 'cloudinit.config.cc_snap.sys.stderr' + mock_path = "cloudinit.config.cc_snap.sys.stderr" with mock.patch(mock_path, new_callable=StringIO) as m_stderr: with self.assertRaises(RuntimeError) as context_manager: run_commands(commands=commands) self.assertIsNotNone( - re.search(r'bogus: (command )?not found', - str(context_manager.exception)), - msg='Expected bogus command not found') - expected_stderr_log = '\n'.join([ - 'Begin run command: {cmd}'.format(cmd=cmd1), - 'End run command: exit(0)', - 'Begin run command: {cmd}'.format(cmd=cmd2), - 'ERROR: End run command: exit(127)', - 'Begin run command: {cmd}'.format(cmd=cmd3), - 'End run command: exit(0)\n']) + re.search( + r"bogus: (command )?not found", str(context_manager.exception) + ), + msg="Expected bogus command not found", + ) + expected_stderr_log = "\n".join( + [ + "Begin run command: {cmd}".format(cmd=cmd1), + "End run command: exit(0)", + "Begin run command: {cmd}".format(cmd=cmd2), + "ERROR: End run command: exit(127)", + "Begin run command: {cmd}".format(cmd=cmd3), + "End run command: exit(0)\n", + ] + ) self.assertEqual(expected_stderr_log, m_stderr.getvalue()) def test_run_command_as_lists(self): """When commands are specified as a list, run them in order.""" - outfile = self.tmp_path('output.log', dir=self.tmp) + outfile = self.tmp_path("output.log", dir=self.tmp) cmd1 = 'echo "HI" >> %s' % outfile cmd2 = 'echo "MOM" >> %s' % outfile commands = [cmd1, cmd2] - mock_path = 'cloudinit.config.cc_snap.sys.stderr' + mock_path = "cloudinit.config.cc_snap.sys.stderr" with mock.patch(mock_path, new_callable=StringIO): run_commands(commands=commands) self.assertIn( - 'DEBUG: Running user-provided snap commands', - self.logs.getvalue()) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + "DEBUG: Running user-provided snap commands", self.logs.getvalue() + ) + self.assertEqual("HI\nMOM\n", util.load_file(outfile)) self.assertIn( - 'WARNING: Non-snap commands in snap config:', self.logs.getvalue()) + "WARNING: Non-snap commands in snap config:", self.logs.getvalue() + ) def test_run_command_dict_sorted_as_command_script(self): """When commands are a dict, sort them and run.""" - outfile = self.tmp_path('output.log', dir=self.tmp) + outfile = self.tmp_path("output.log", dir=self.tmp) cmd1 = 'echo "HI" >> %s' % outfile cmd2 = 'echo "MOM" >> %s' % outfile - commands = {'02': cmd1, '01': cmd2} - mock_path = 'cloudinit.config.cc_snap.sys.stderr' + commands = {"02": cmd1, "01": cmd2} + mock_path = "cloudinit.config.cc_snap.sys.stderr" with mock.patch(mock_path, new_callable=StringIO): run_commands(commands=commands) - expected_messages = [ - 'DEBUG: Running user-provided snap commands'] + expected_messages = ["DEBUG: Running user-provided snap commands"] for message in expected_messages: self.assertIn(message, self.logs.getvalue()) - self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + self.assertEqual("MOM\nHI\n", util.load_file(outfile)) @skipUnlessJsonSchema() @@ -253,70 +282,72 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): def test_schema_warns_on_snap_not_as_dict(self): """If the snap configuration is not a dict, emit a warning.""" - validate_cloudconfig_schema({'snap': 'wrong type'}, schema) + validate_cloudconfig_schema({"snap": "wrong type"}, schema) self.assertEqual( "WARNING: Invalid config:\nsnap: 'wrong type' is not of type" " 'object'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch("cloudinit.config.cc_snap.run_commands") def test_schema_disallows_unknown_keys(self, _): """Unknown keys in the snap configuration emit warnings.""" validate_cloudconfig_schema( - {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema) + {"snap": {"commands": ["ls"], "invalid-key": ""}}, schema + ) self.assertIn( - 'WARNING: Invalid config:\nsnap: Additional properties are not' + "WARNING: Invalid config:\nsnap: Additional properties are not" " allowed ('invalid-key' was unexpected)", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_warn_schema_requires_either_commands_or_assertions(self): """Warn when snap configuration lacks both commands and assertions.""" - validate_cloudconfig_schema( - {'snap': {}}, schema) + validate_cloudconfig_schema({"snap": {}}, schema) self.assertIn( - 'WARNING: Invalid config:\nsnap: {} does not have enough' - ' properties', - self.logs.getvalue()) + "WARNING: Invalid config:\nsnap: {} does not have enough" + " properties", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch("cloudinit.config.cc_snap.run_commands") def test_warn_schema_commands_is_not_list_or_dict(self, _): """Warn when snap:commands config is not a list or dict.""" - validate_cloudconfig_schema( - {'snap': {'commands': 'broken'}}, schema) + validate_cloudconfig_schema({"snap": {"commands": "broken"}}, schema) self.assertEqual( "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type" " 'object', 'array'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch("cloudinit.config.cc_snap.run_commands") def test_warn_schema_when_commands_is_empty(self, _): """Emit warnings when snap:commands is an empty list or dict.""" - validate_cloudconfig_schema( - {'snap': {'commands': []}}, schema) - validate_cloudconfig_schema( - {'snap': {'commands': {}}}, schema) + validate_cloudconfig_schema({"snap": {"commands": []}}, schema) + validate_cloudconfig_schema({"snap": {"commands": {}}}, schema) self.assertEqual( "WARNING: Invalid config:\nsnap.commands: [] is too short\n" "WARNING: Invalid config:\nsnap.commands: {} does not have enough" " properties\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch("cloudinit.config.cc_snap.run_commands") def test_schema_when_commands_are_list_or_dict(self, _): """No warnings when snap:commands are either a list or dict.""" + validate_cloudconfig_schema({"snap": {"commands": ["valid"]}}, schema) validate_cloudconfig_schema( - {'snap': {'commands': ['valid']}}, schema) - validate_cloudconfig_schema( - {'snap': {'commands': {'01': 'also valid'}}}, schema) - self.assertEqual('', self.logs.getvalue()) + {"snap": {"commands": {"01": "also valid"}}}, schema + ) + self.assertEqual("", self.logs.getvalue()) - @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch("cloudinit.config.cc_snap.run_commands") def test_schema_when_commands_values_are_invalid_type(self, _): """Warnings when snap:commands values are invalid type (e.g. int)""" + validate_cloudconfig_schema({"snap": {"commands": [123]}}, schema) validate_cloudconfig_schema( - {'snap': {'commands': [123]}}, schema) - validate_cloudconfig_schema( - {'snap': {'commands': {'01': 123}}}, schema) + {"snap": {"commands": {"01": 123}}}, schema + ) self.assertEqual( "WARNING: Invalid config:\n" "snap.commands.0: 123 is not valid under any of the given" @@ -324,15 +355,18 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): "WARNING: Invalid config:\n" "snap.commands.01: 123 is not valid under any of the given" " schemas\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch("cloudinit.config.cc_snap.run_commands") def test_schema_when_commands_list_values_are_invalid_type(self, _): """Warnings when snap:commands list values are wrong type (e.g. int)""" validate_cloudconfig_schema( - {'snap': {'commands': [["snap", "install", 123]]}}, schema) + {"snap": {"commands": [["snap", "install", 123]]}}, schema + ) validate_cloudconfig_schema( - {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema) + {"snap": {"commands": {"01": ["snap", "install", 123]}}}, schema + ) self.assertEqual( "WARNING: Invalid config:\n" "snap.commands.0: ['snap', 'install', 123] is not valid under any" @@ -340,77 +374,84 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): "WARNING: Invalid config:\n" "snap.commands.0: ['snap', 'install', 123] is not valid under any" " of the given schemas\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch("cloudinit.config.cc_snap.run_commands") def test_schema_when_assertions_values_are_invalid_type(self, _): """Warnings when snap:assertions values are invalid type (e.g. int)""" + validate_cloudconfig_schema({"snap": {"assertions": [123]}}, schema) validate_cloudconfig_schema( - {'snap': {'assertions': [123]}}, schema) - validate_cloudconfig_schema( - {'snap': {'assertions': {'01': 123}}}, schema) + {"snap": {"assertions": {"01": 123}}}, schema + ) self.assertEqual( "WARNING: Invalid config:\n" "snap.assertions.0: 123 is not of type 'string'\n" "WARNING: Invalid config:\n" "snap.assertions.01: 123 is not of type 'string'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.add_assertions') + @mock.patch("cloudinit.config.cc_snap.add_assertions") def test_warn_schema_assertions_is_not_list_or_dict(self, _): """Warn when snap:assertions config is not a list or dict.""" - validate_cloudconfig_schema( - {'snap': {'assertions': 'broken'}}, schema) + validate_cloudconfig_schema({"snap": {"assertions": "broken"}}, schema) self.assertEqual( "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of" " type 'object', 'array'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.add_assertions') + @mock.patch("cloudinit.config.cc_snap.add_assertions") def test_warn_schema_when_assertions_is_empty(self, _): """Emit warnings when snap:assertions is an empty list or dict.""" - validate_cloudconfig_schema( - {'snap': {'assertions': []}}, schema) - validate_cloudconfig_schema( - {'snap': {'assertions': {}}}, schema) + validate_cloudconfig_schema({"snap": {"assertions": []}}, schema) + validate_cloudconfig_schema({"snap": {"assertions": {}}}, schema) self.assertEqual( "WARNING: Invalid config:\nsnap.assertions: [] is too short\n" "WARNING: Invalid config:\nsnap.assertions: {} does not have" " enough properties\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('cloudinit.config.cc_snap.add_assertions') + @mock.patch("cloudinit.config.cc_snap.add_assertions") def test_schema_when_assertions_are_list_or_dict(self, _): """No warnings when snap:assertions are a list or dict.""" validate_cloudconfig_schema( - {'snap': {'assertions': ['valid']}}, schema) + {"snap": {"assertions": ["valid"]}}, schema + ) validate_cloudconfig_schema( - {'snap': {'assertions': {'01': 'also valid'}}}, schema) - self.assertEqual('', self.logs.getvalue()) + {"snap": {"assertions": {"01": "also valid"}}}, schema + ) + self.assertEqual("", self.logs.getvalue()) def test_duplicates_are_fine_array_array(self): """Duplicated commands array/array entries are allowed.""" self.assertSchemaValid( - {'commands': [["echo", "bye"], ["echo", "bye"]]}, - "command entries can be duplicate.") + {"commands": [["echo", "bye"], ["echo", "bye"]]}, + "command entries can be duplicate.", + ) def test_duplicates_are_fine_array_string(self): """Duplicated commands array/string entries are allowed.""" self.assertSchemaValid( - {'commands': ["echo bye", "echo bye"]}, - "command entries can be duplicate.") + {"commands": ["echo bye", "echo bye"]}, + "command entries can be duplicate.", + ) def test_duplicates_are_fine_dict_array(self): """Duplicated commands dict/array entries are allowed.""" self.assertSchemaValid( - {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, - "command entries can be duplicate.") + {"commands": {"00": ["echo", "bye"], "01": ["echo", "bye"]}}, + "command entries can be duplicate.", + ) def test_duplicates_are_fine_dict_string(self): """Duplicated commands dict/string entries are allowed.""" self.assertSchemaValid( - {'commands': {'00': "echo bye", '01': "echo bye"}}, - "command entries can be duplicate.") + {"commands": {"00": "echo bye", "01": "echo bye"}}, + "command entries can be duplicate.", + ) class TestHandle(CiTestCase): @@ -421,92 +462,122 @@ class TestHandle(CiTestCase): super(TestHandle, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('cloudinit.config.cc_snap.run_commands') - @mock.patch('cloudinit.config.cc_snap.add_assertions') - @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema') + @mock.patch("cloudinit.config.cc_snap.run_commands") + @mock.patch("cloudinit.config.cc_snap.add_assertions") + @mock.patch("cloudinit.config.cc_snap.validate_cloudconfig_schema") def test_handle_no_config(self, m_schema, m_add, m_run): """When no snap-related configuration is provided, nothing happens.""" cfg = {} - handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None) self.assertIn( "DEBUG: Skipping module named snap, no 'snap' key in config", - self.logs.getvalue()) + self.logs.getvalue(), + ) m_schema.assert_not_called() m_add.assert_not_called() m_run.assert_not_called() - @mock.patch('cloudinit.config.cc_snap.run_commands') - @mock.patch('cloudinit.config.cc_snap.add_assertions') - @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') - def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add, - m_run): + @mock.patch("cloudinit.config.cc_snap.run_commands") + @mock.patch("cloudinit.config.cc_snap.add_assertions") + @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse") + def test_handle_skips_squashfuse_when_unconfigured( + self, m_squash, m_add, m_run + ): """When squashfuse_in_container is unset, don't attempt to install.""" handle( - 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None) + "snap", cfg={"snap": {}}, cloud=None, log=self.logger, args=None + ) handle( - 'snap', cfg={'snap': {'squashfuse_in_container': None}}, - cloud=None, log=self.logger, args=None) + "snap", + cfg={"snap": {"squashfuse_in_container": None}}, + cloud=None, + log=self.logger, + args=None, + ) handle( - 'snap', cfg={'snap': {'squashfuse_in_container': False}}, - cloud=None, log=self.logger, args=None) + "snap", + cfg={"snap": {"squashfuse_in_container": False}}, + cloud=None, + log=self.logger, + args=None, + ) self.assertEqual([], m_squash.call_args_list) # No calls # snap configuration missing assertions and commands will default to [] self.assertIn(mock.call([]), m_add.call_args_list) self.assertIn(mock.call([]), m_run.call_args_list) - @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') + @mock.patch("cloudinit.config.cc_snap.maybe_install_squashfuse") def test_handle_tries_to_install_squashfuse(self, m_squash): """If squashfuse_in_container is True, try installing squashfuse.""" - cfg = {'snap': {'squashfuse_in_container': True}} + cfg = {"snap": {"squashfuse_in_container": True}} mycloud = FakeCloud(None) - handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None) - self.assertEqual( - [mock.call(mycloud)], m_squash.call_args_list) + handle("snap", cfg=cfg, cloud=mycloud, log=self.logger, args=None) + self.assertEqual([mock.call(mycloud)], m_squash.call_args_list) def test_handle_runs_commands_provided(self): """If commands are specified as a list, run them.""" - outfile = self.tmp_path('output.log', dir=self.tmp) + outfile = self.tmp_path("output.log", dir=self.tmp) cfg = { - 'snap': {'commands': ['echo "HI" >> %s' % outfile, - 'echo "MOM" >> %s' % outfile]}} - mock_path = 'cloudinit.config.cc_snap.sys.stderr' + "snap": { + "commands": [ + 'echo "HI" >> %s' % outfile, + 'echo "MOM" >> %s' % outfile, + ] + } + } + mock_path = "cloudinit.config.cc_snap.sys.stderr" with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): with mock.patch(mock_path, new_callable=StringIO): - handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + handle("snap", cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + self.assertEqual("HI\nMOM\n", util.load_file(outfile)) - @mock.patch('cloudinit.config.cc_snap.subp.subp') + @mock.patch("cloudinit.config.cc_snap.subp.subp") def test_handle_adds_assertions(self, m_subp): """Any configured snap assertions are provided to add_assertions.""" - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) - compare_file = self.tmp_path('comparison', dir=self.tmp) + assert_file = self.tmp_path("snapd.assertions", dir=self.tmp) + compare_file = self.tmp_path("comparison", dir=self.tmp) cfg = { - 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}} + "snap": {"assertions": [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]} + } wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) - content = '\n'.join(cfg['snap']['assertions']) - util.write_file(compare_file, content.encode('utf-8')) + "cloudinit.config.cc_snap", + {"ASSERTIONS_FILE": {"new": assert_file}}, + handle, + "snap", + cfg=cfg, + cloud=None, + log=self.logger, + args=None, + ) + content = "\n".join(cfg["snap"]["assertions"]) + util.write_file(compare_file, content.encode("utf-8")) self.assertEqual( - util.load_file(compare_file), util.load_file(assert_file)) + util.load_file(compare_file), util.load_file(assert_file) + ) - @mock.patch('cloudinit.config.cc_snap.subp.subp') + @mock.patch("cloudinit.config.cc_snap.subp.subp") @skipUnlessJsonSchema() def test_handle_validates_schema(self, m_subp): """Any provided configuration is runs validate_cloudconfig_schema.""" - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) - cfg = {'snap': {'invalid': ''}} # Generates schema warning + assert_file = self.tmp_path("snapd.assertions", dir=self.tmp) + cfg = {"snap": {"invalid": ""}} # Generates schema warning wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) + "cloudinit.config.cc_snap", + {"ASSERTIONS_FILE": {"new": assert_file}}, + handle, + "snap", + cfg=cfg, + cloud=None, + log=self.logger, + args=None, + ) self.assertEqual( "WARNING: Invalid config:\nsnap: Additional properties are not" " allowed ('invalid' was unexpected)\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) class TestMaybeInstallSquashFuse(CiTestCase): @@ -517,48 +588,52 @@ class TestMaybeInstallSquashFuse(CiTestCase): super(TestMaybeInstallSquashFuse, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('cloudinit.config.cc_snap.util.is_container') + @mock.patch("cloudinit.config.cc_snap.util.is_container") def test_maybe_install_squashfuse_skips_non_containers(self, m_container): """maybe_install_squashfuse does nothing when not on a container.""" m_container.return_value = False maybe_install_squashfuse(cloud=FakeCloud(None)) self.assertEqual([mock.call()], m_container.call_args_list) - self.assertEqual('', self.logs.getvalue()) + self.assertEqual("", self.logs.getvalue()) - @mock.patch('cloudinit.config.cc_snap.util.is_container') + @mock.patch("cloudinit.config.cc_snap.util.is_container") def test_maybe_install_squashfuse_raises_install_errors(self, m_container): """maybe_install_squashfuse logs and raises package install errors.""" m_container.return_value = True distro = mock.MagicMock() distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') + "Some apt error" + ) with self.assertRaises(RuntimeError) as context_manager: maybe_install_squashfuse(cloud=FakeCloud(distro)) - self.assertEqual('Some apt error', str(context_manager.exception)) - self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + self.assertEqual("Some apt error", str(context_manager.exception)) + self.assertIn("Package update failed\nTraceback", self.logs.getvalue()) - @mock.patch('cloudinit.config.cc_snap.util.is_container') + @mock.patch("cloudinit.config.cc_snap.util.is_container") def test_maybe_install_squashfuse_raises_update_errors(self, m_container): """maybe_install_squashfuse logs and raises package update errors.""" m_container.return_value = True distro = mock.MagicMock() distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') + "Some apt error" + ) with self.assertRaises(RuntimeError) as context_manager: maybe_install_squashfuse(cloud=FakeCloud(distro)) - self.assertEqual('Some apt error', str(context_manager.exception)) - self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + self.assertEqual("Some apt error", str(context_manager.exception)) + self.assertIn("Package update failed\nTraceback", self.logs.getvalue()) - @mock.patch('cloudinit.config.cc_snap.util.is_container') + @mock.patch("cloudinit.config.cc_snap.util.is_container") def test_maybe_install_squashfuse_happy_path(self, m_container): """maybe_install_squashfuse logs and raises package install errors.""" m_container.return_value = True distro = mock.MagicMock() # No errors raised maybe_install_squashfuse(cloud=FakeCloud(distro)) self.assertEqual( - [mock.call()], distro.update_package_sources.call_args_list) + [mock.call()], distro.update_package_sources.call_args_list + ) self.assertEqual( - [mock.call(['squashfuse'])], - distro.install_packages.call_args_list) + [mock.call(["squashfuse"])], distro.install_packages.call_args_list + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_spacewalk.py b/tests/unittests/config/test_cc_spacewalk.py index 96efccf0..e1f42968 100644 --- a/tests/unittests/config/test_cc_spacewalk.py +++ b/tests/unittests/config/test_cc_spacewalk.py @@ -1,21 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_spacewalk -from cloudinit import subp - -from tests.unittests import helpers - import logging from unittest import mock +from cloudinit import subp +from cloudinit.config import cc_spacewalk +from tests.unittests import helpers + LOG = logging.getLogger(__name__) class TestSpacewalk(helpers.TestCase): space_cfg = { - 'spacewalk': { - 'server': 'localhost', - 'profile_name': 'test', + "spacewalk": { + "server": "localhost", + "profile_name": "test", } } @@ -31,12 +30,19 @@ class TestSpacewalk(helpers.TestCase): @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") def test_do_register(self, mock_subp): - cc_spacewalk.do_register(**self.space_cfg['spacewalk']) - mock_subp.assert_called_with([ - 'rhnreg_ks', - '--serverUrl', 'https://localhost/XMLRPC', - '--profilename', 'test', - '--sslCACert', cc_spacewalk.def_ca_cert_path, - ], capture=False) + cc_spacewalk.do_register(**self.space_cfg["spacewalk"]) + mock_subp.assert_called_with( + [ + "rhnreg_ks", + "--serverUrl", + "https://localhost/XMLRPC", + "--profilename", + "test", + "--sslCACert", + cc_spacewalk.def_ca_cert_path, + ], + capture=False, + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py index ba179bbf..d66cc4cb 100644 --- a/tests/unittests/config/test_cc_ssh.py +++ b/tests/unittests/config/test_cc_ssh.py @@ -1,17 +1,18 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging import os.path -from cloudinit.config import cc_ssh from cloudinit import ssh_util +from cloudinit.config import cc_ssh from tests.unittests.helpers import CiTestCase, mock -import logging LOG = logging.getLogger(__name__) MODPATH = "cloudinit.config.cc_ssh." -KEY_NAMES_NO_DSA = [name for name in cc_ssh.GENERATE_KEY_NAMES - if name not in 'dsa'] +KEY_NAMES_NO_DSA = [ + name for name in cc_ssh.GENERATE_KEY_NAMES if name not in "dsa" +] @mock.patch(MODPATH + "ssh_util.setup_user_keys") @@ -20,39 +21,45 @@ class TestHandleSsh(CiTestCase): def _publish_hostkey_test_setup(self): self.test_hostkeys = { - 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), - 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), - 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), - 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), + "dsa": ("ssh-dss", "AAAAB3NzaC1kc3MAAACB"), + "ecdsa": ("ecdsa-sha2-nistp256", "AAAAE2VjZ"), + "ed25519": ("ssh-ed25519", "AAAAC3NzaC1lZDI"), + "rsa": ("ssh-rsa", "AAAAB3NzaC1yc2EAAA"), } self.test_hostkey_files = [] hostkey_tmpdir = self.tmp_dir() for key_type in cc_ssh.GENERATE_KEY_NAMES: key_data = self.test_hostkeys[key_type] - filename = 'ssh_host_%s_key.pub' % key_type + filename = "ssh_host_%s_key.pub" % key_type filepath = os.path.join(hostkey_tmpdir, filename) self.test_hostkey_files.append(filepath) - with open(filepath, 'w') as f: - f.write(' '.join(key_data)) + with open(filepath, "w") as f: + f.write(" ".join(key_data)) - cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') + cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, "ssh_host_%s_key") def test_apply_credentials_with_user(self, m_setup_keys): """Apply keys for the given user and root.""" keys = ["key1"] user = "clouduser" cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options="")], - m_setup_keys.call_args_list) + self.assertEqual( + [ + mock.call(set(keys), user), + mock.call(set(keys), "root", options=""), + ], + m_setup_keys.call_args_list, + ) def test_apply_credentials_with_no_user(self, m_setup_keys): """Apply keys for root only.""" keys = ["key1"] user = None cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) - self.assertEqual([mock.call(set(keys), "root", options="")], - m_setup_keys.call_args_list) + self.assertEqual( + [mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list, + ) def test_apply_credentials_with_user_disable_root(self, m_setup_keys): """Apply keys for the given user and disable root ssh.""" @@ -62,9 +69,13 @@ class TestHandleSsh(CiTestCase): cc_ssh.apply_credentials(keys, user, True, options) options = options.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) + self.assertEqual( + [ + mock.call(set(keys), user), + mock.call(set(keys), "root", options=options), + ], + m_setup_keys.call_args_list, + ) def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys): """Apply keys no user and disable root ssh.""" @@ -74,14 +85,15 @@ class TestHandleSsh(CiTestCase): cc_ssh.apply_credentials(keys, user, True, options) options = options.replace("$USER", "NONE") options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) + self.assertEqual( + [mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") - def test_handle_no_cfg(self, m_path_exists, m_nug, - m_glob, m_setup_keys): + def test_handle_no_cfg(self, m_path_exists, m_nug, m_glob, m_setup_keys): """Test handle with no config ignores generating existing keyfiles.""" cfg = {} keys = ["key1"] @@ -90,28 +102,33 @@ class TestHandleSsh(CiTestCase): m_path_exists.return_value = True m_nug.return_value = ([], {}) cc_ssh.PUBLISH_HOST_KEYS = False - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") options = options.replace("$DISABLE_USER", "root") - m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') + m_glob.assert_called_once_with("/etc/ssh/ssh_host_*key*") self.assertIn( - [mock.call('/etc/ssh/ssh_host_rsa_key'), - mock.call('/etc/ssh/ssh_host_dsa_key'), - mock.call('/etc/ssh/ssh_host_ecdsa_key'), - mock.call('/etc/ssh/ssh_host_ed25519_key')], - m_path_exists.call_args_list) - self.assertEqual([mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) + [ + mock.call("/etc/ssh/ssh_host_rsa_key"), + mock.call("/etc/ssh/ssh_host_dsa_key"), + mock.call("/etc/ssh/ssh_host_ecdsa_key"), + mock.call("/etc/ssh/ssh_host_ed25519_key"), + ], + m_path_exists.call_args_list, + ) + self.assertEqual( + [mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") - def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug, - m_glob, m_setup_keys): + def test_dont_allow_public_ssh_keys( + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test allow_public_ssh_keys=False ignores ssh public keys from - platform. + platform. """ cfg = {"allow_public_ssh_keys": False} keys = ["key1"] @@ -120,21 +137,25 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(), user), - mock.call(set(), "root", options=options)], - m_setup_keys.call_args_list) + self.assertEqual( + [ + mock.call(set(), user), + mock.call(set(), "root", options=options), + ], + m_setup_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") - def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug, - m_glob, m_setup_keys): + def test_handle_no_cfg_and_default_root( + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with no config and a default distro user.""" cfg = {} keys = ["key1"] @@ -143,21 +164,25 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) + self.assertEqual( + [ + mock.call(set(keys), user), + mock.call(set(keys), "root", options=options), + ], + m_setup_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") - def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug, - m_glob, m_setup_keys): + def test_handle_cfg_with_explicit_disable_root( + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with explicit disable_root and a default distro user.""" # This test is identical to test_handle_no_cfg_and_default_root, # except this uses an explicit cfg value @@ -168,21 +193,25 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) + self.assertEqual( + [ + mock.call(set(keys), user), + mock.call(set(keys), "root", options=options), + ], + m_setup_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") - def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug, - m_glob, m_setup_keys): + def test_handle_cfg_without_disable_root( + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with disable_root == False.""" # When disable_root == False, the ssh redirect for root is skipped cfg = {"disable_root": False} @@ -192,96 +221,111 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cloud.get_public_ssh_keys = mock.Mock(return_value=keys) cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options="")], - m_setup_keys.call_args_list) + self.assertEqual( + [ + mock.call(set(keys), user), + mock.call(set(keys), "root", options=""), + ], + m_setup_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") def test_handle_publish_hostkeys_default( - self, m_path_exists, m_nug, m_glob, m_setup_keys): + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with various configs for ssh_publish_hostkeys.""" self._publish_hostkey_test_setup() cc_ssh.PUBLISH_HOST_KEYS = True keys = ["key1"] user = "clouduser" # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) + m_glob.side_effect = iter( + [ + [], + self.test_hostkey_files, + ] + ) # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cloud.datasource.publish_host_keys = mock.Mock() cfg = {} - expected_call = [self.test_hostkeys[key_type] for key_type - in KEY_NAMES_NO_DSA] + expected_call = [ + self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA + ] cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) + self.assertEqual( + [mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") def test_handle_publish_hostkeys_config_enable( - self, m_path_exists, m_nug, m_glob, m_setup_keys): + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with various configs for ssh_publish_hostkeys.""" self._publish_hostkey_test_setup() cc_ssh.PUBLISH_HOST_KEYS = False keys = ["key1"] user = "clouduser" # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) + m_glob.side_effect = iter( + [ + [], + self.test_hostkey_files, + ] + ) # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cloud.datasource.publish_host_keys = mock.Mock() - cfg = {'ssh_publish_hostkeys': {'enabled': True}} - expected_call = [self.test_hostkeys[key_type] for key_type - in KEY_NAMES_NO_DSA] + cfg = {"ssh_publish_hostkeys": {"enabled": True}} + expected_call = [ + self.test_hostkeys[key_type] for key_type in KEY_NAMES_NO_DSA + ] cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) + self.assertEqual( + [mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") def test_handle_publish_hostkeys_config_disable( - self, m_path_exists, m_nug, m_glob, m_setup_keys): + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with various configs for ssh_publish_hostkeys.""" self._publish_hostkey_test_setup() cc_ssh.PUBLISH_HOST_KEYS = True keys = ["key1"] user = "clouduser" # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) + m_glob.side_effect = iter( + [ + [], + self.test_hostkey_files, + ] + ) # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cloud.datasource.publish_host_keys = mock.Mock() - cfg = {'ssh_publish_hostkeys': {'enabled': False}} + cfg = {"ssh_publish_hostkeys": {"enabled": False}} cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) cloud.datasource.publish_host_keys.assert_not_called() @@ -290,61 +334,75 @@ class TestHandleSsh(CiTestCase): @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") def test_handle_publish_hostkeys_config_blacklist( - self, m_path_exists, m_nug, m_glob, m_setup_keys): + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with various configs for ssh_publish_hostkeys.""" self._publish_hostkey_test_setup() cc_ssh.PUBLISH_HOST_KEYS = True keys = ["key1"] user = "clouduser" # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) + m_glob.side_effect = iter( + [ + [], + self.test_hostkey_files, + ] + ) # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cloud.datasource.publish_host_keys = mock.Mock() - cfg = {'ssh_publish_hostkeys': {'enabled': True, - 'blacklist': ['dsa', 'rsa']}} - expected_call = [self.test_hostkeys[key_type] for key_type - in ['ecdsa', 'ed25519']] + cfg = { + "ssh_publish_hostkeys": { + "enabled": True, + "blacklist": ["dsa", "rsa"], + } + } + expected_call = [ + self.test_hostkeys[key_type] for key_type in ["ecdsa", "ed25519"] + ] cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) + self.assertEqual( + [mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list, + ) @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") def test_handle_publish_hostkeys_empty_blacklist( - self, m_path_exists, m_nug, m_glob, m_setup_keys): + self, m_path_exists, m_nug, m_glob, m_setup_keys + ): """Test handle with various configs for ssh_publish_hostkeys.""" self._publish_hostkey_test_setup() cc_ssh.PUBLISH_HOST_KEYS = True keys = ["key1"] user = "clouduser" # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) + m_glob.side_effect = iter( + [ + [], + self.test_hostkey_files, + ] + ) # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) + cloud = self.tmp_cloud(distro="ubuntu", metadata={"public-keys": keys}) cloud.datasource.publish_host_keys = mock.Mock() - cfg = {'ssh_publish_hostkeys': {'enabled': True, - 'blacklist': []}} - expected_call = [self.test_hostkeys[key_type] for key_type - in cc_ssh.GENERATE_KEY_NAMES] + cfg = {"ssh_publish_hostkeys": {"enabled": True, "blacklist": []}} + expected_call = [ + self.test_hostkeys[key_type] + for key_type in cc_ssh.GENERATE_KEY_NAMES + ] cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) + self.assertEqual( + [mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list, + ) @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "util.write_file") @@ -369,36 +427,40 @@ class TestHandleSsh(CiTestCase): cfg["ssh_keys"][public_name] = public_value cfg["ssh_keys"][cert_name] = cert_value - expected_calls.extend([ - mock.call( - '/etc/ssh/ssh_host_{}_key'.format(key_type), - private_value, - 384 - ), - mock.call( - '/etc/ssh/ssh_host_{}_key.pub'.format(key_type), - public_value, - 384 - ), - mock.call( - '/etc/ssh/ssh_host_{}_key-cert.pub'.format(key_type), - cert_value, - 384 - ), - mock.call( - '/etc/ssh/sshd_config', - ('HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub' - '\n'.format(key_type)), - preserve_mode=True - ) - ]) + expected_calls.extend( + [ + mock.call( + "/etc/ssh/ssh_host_{}_key".format(key_type), + private_value, + 384, + ), + mock.call( + "/etc/ssh/ssh_host_{}_key.pub".format(key_type), + public_value, + 384, + ), + mock.call( + "/etc/ssh/ssh_host_{}_key-cert.pub".format(key_type), + cert_value, + 384, + ), + mock.call( + "/etc/ssh/sshd_config", + "HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub" + "\n".format(key_type), + preserve_mode=True, + ), + ] + ) # Run the handler. m_nug.return_value = ([], {}) - with mock.patch(MODPATH + 'ssh_util.parse_ssh_config', - return_value=[]): - cc_ssh.handle("name", cfg, self.tmp_cloud(distro='ubuntu'), - LOG, None) + with mock.patch( + MODPATH + "ssh_util.parse_ssh_config", return_value=[] + ): + cc_ssh.handle( + "name", cfg, self.tmp_cloud(distro="ubuntu"), LOG, None + ) # Check that all expected output has been done. for call_ in expected_calls: diff --git a/tests/unittests/config/test_cc_timezone.py b/tests/unittests/config/test_cc_timezone.py index fb6aab5f..f76397b7 100644 --- a/tests/unittests/config/test_cc_timezone.py +++ b/tests/unittests/config/test_cc_timezone.py @@ -4,19 +4,16 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_timezone - -from cloudinit import util - - import logging import shutil import tempfile -from configobj import ConfigObj from io import BytesIO -from tests.unittests import helpers as t_help +from configobj import ConfigObj +from cloudinit import util +from cloudinit.config import cc_timezone +from tests.unittests import helpers as t_help from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -33,22 +30,24 @@ class TestTimezone(t_help.FilesystemMockingTestCase): def test_set_timezone_sles(self): cfg = { - 'timezone': 'Tatooine/Bestine', + "timezone": "Tatooine/Bestine", } - cc = get_cloud('sles') + cc = get_cloud("sles") # Create a dummy timezone file - dummy_contents = '0123456789abcdefgh' - util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'], - dummy_contents) + dummy_contents = "0123456789abcdefgh" + util.write_file( + "/usr/share/zoneinfo/%s" % cfg["timezone"], dummy_contents + ) - cc_timezone.handle('cc_timezone', cfg, cc, LOG, []) + cc_timezone.handle("cc_timezone", cfg, cc, LOG, []) - contents = util.load_file('/etc/sysconfig/clock', decode=False) + contents = util.load_file("/etc/sysconfig/clock", decode=False) n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({'TIMEZONE': cfg['timezone']}, dict(n_cfg)) + self.assertEqual({"TIMEZONE": cfg["timezone"]}, dict(n_cfg)) - contents = util.load_file('/etc/localtime') + contents = util.load_file("/etc/localtime") self.assertEqual(dummy_contents, contents.strip()) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py index 8d0c9665..d7519a1b 100644 --- a/tests/unittests/config/test_cc_ubuntu_advantage.py +++ b/tests/unittests/config/test_cc_ubuntu_advantage.py @@ -1,15 +1,22 @@ # This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import subp from cloudinit.config.cc_ubuntu_advantage import ( - configure_ua, handle, maybe_install_ua_tools, schema) + configure_ua, + handle, + maybe_install_ua_tools, + schema, +) from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit import subp from tests.unittests.helpers import ( - CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) - + CiTestCase, + SchemaTestCaseMixin, + mock, + skipUnlessJsonSchema, +) # Module path used in mocks -MPATH = 'cloudinit.config.cc_ubuntu_advantage' +MPATH = "cloudinit.config.cc_ubuntu_advantage" class FakeCloud(object): @@ -26,111 +33,131 @@ class TestConfigureUA(CiTestCase): super(TestConfigureUA, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.subp.subp' % MPATH) + @mock.patch("%s.subp.subp" % MPATH) def test_configure_ua_attach_error(self, m_subp): """Errors from ua attach command are raised.""" m_subp.side_effect = subp.ProcessExecutionError( - 'Invalid token SomeToken') + "Invalid token SomeToken" + ) with self.assertRaises(RuntimeError) as context_manager: - configure_ua(token='SomeToken') + configure_ua(token="SomeToken") self.assertEqual( - 'Failure attaching Ubuntu Advantage:\nUnexpected error while' - ' running command.\nCommand: -\nExit code: -\nReason: -\n' - 'Stdout: Invalid token SomeToken\nStderr: -', - str(context_manager.exception)) + "Failure attaching Ubuntu Advantage:\nUnexpected error while" + " running command.\nCommand: -\nExit code: -\nReason: -\n" + "Stdout: Invalid token SomeToken\nStderr: -", + str(context_manager.exception), + ) - @mock.patch('%s.subp.subp' % MPATH) + @mock.patch("%s.subp.subp" % MPATH) def test_configure_ua_attach_with_token(self, m_subp): """When token is provided, attach the machine to ua using the token.""" - configure_ua(token='SomeToken') - m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + configure_ua(token="SomeToken") + m_subp.assert_called_once_with(["ua", "attach", "SomeToken"]) self.assertEqual( - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) + "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n", + self.logs.getvalue(), + ) - @mock.patch('%s.subp.subp' % MPATH) + @mock.patch("%s.subp.subp" % MPATH) def test_configure_ua_attach_on_service_error(self, m_subp): """all services should be enabled and then any failures raised""" def fake_subp(cmd, capture=None): - fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] + fail_cmds = [["ua", "enable", svc] for svc in ["esm", "cc"]] if cmd in fail_cmds and capture: svc = cmd[-1] raise subp.ProcessExecutionError( - 'Invalid {} credentials'.format(svc.upper())) + "Invalid {} credentials".format(svc.upper()) + ) m_subp.side_effect = fake_subp with self.assertRaises(RuntimeError) as context_manager: - configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) + configure_ua(token="SomeToken", enable=["esm", "cc", "fips"]) self.assertEqual( m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken']), - mock.call(['ua', 'enable', 'esm'], capture=True), - mock.call(['ua', 'enable', 'cc'], capture=True), - mock.call(['ua', 'enable', 'fips'], capture=True)]) + [ + mock.call(["ua", "attach", "SomeToken"]), + mock.call(["ua", "enable", "esm"], capture=True), + mock.call(["ua", "enable", "cc"], capture=True), + mock.call(["ua", "enable", "fips"], capture=True), + ], + ) self.assertIn( 'WARNING: Failure enabling "esm":\nUnexpected error' - ' while running command.\nCommand: -\nExit code: -\nReason: -\n' - 'Stdout: Invalid ESM credentials\nStderr: -\n', - self.logs.getvalue()) + " while running command.\nCommand: -\nExit code: -\nReason: -\n" + "Stdout: Invalid ESM credentials\nStderr: -\n", + self.logs.getvalue(), + ) self.assertIn( 'WARNING: Failure enabling "cc":\nUnexpected error' - ' while running command.\nCommand: -\nExit code: -\nReason: -\n' - 'Stdout: Invalid CC credentials\nStderr: -\n', - self.logs.getvalue()) + " while running command.\nCommand: -\nExit code: -\nReason: -\n" + "Stdout: Invalid CC credentials\nStderr: -\n", + self.logs.getvalue(), + ) self.assertEqual( 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', - str(context_manager.exception)) + str(context_manager.exception), + ) - @mock.patch('%s.subp.subp' % MPATH) + @mock.patch("%s.subp.subp" % MPATH) def test_configure_ua_attach_with_empty_services(self, m_subp): """When services is an empty list, do not auto-enable attach.""" - configure_ua(token='SomeToken', enable=[]) - m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + configure_ua(token="SomeToken", enable=[]) + m_subp.assert_called_once_with(["ua", "attach", "SomeToken"]) self.assertEqual( - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) + "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n", + self.logs.getvalue(), + ) - @mock.patch('%s.subp.subp' % MPATH) + @mock.patch("%s.subp.subp" % MPATH) def test_configure_ua_attach_with_specific_services(self, m_subp): """When services a list, only enable specific services.""" - configure_ua(token='SomeToken', enable=['fips']) + configure_ua(token="SomeToken", enable=["fips"]) self.assertEqual( m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken']), - mock.call(['ua', 'enable', 'fips'], capture=True)]) + [ + mock.call(["ua", "attach", "SomeToken"]), + mock.call(["ua", "enable", "fips"], capture=True), + ], + ) self.assertEqual( - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) + "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n", + self.logs.getvalue(), + ) - @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) - @mock.patch('%s.subp.subp' % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock()) + @mock.patch("%s.subp.subp" % MPATH) def test_configure_ua_attach_with_string_services(self, m_subp): """When services a string, treat as singleton list and warn""" - configure_ua(token='SomeToken', enable='fips') + configure_ua(token="SomeToken", enable="fips") self.assertEqual( m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken']), - mock.call(['ua', 'enable', 'fips'], capture=True)]) + [ + mock.call(["ua", "attach", "SomeToken"]), + mock.call(["ua", "enable", "fips"], capture=True), + ], + ) self.assertEqual( - 'WARNING: ubuntu_advantage: enable should be a list, not a' - ' string; treating as a single enable\n' - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) + "WARNING: ubuntu_advantage: enable should be a list, not a" + " string; treating as a single enable\n" + "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n", + self.logs.getvalue(), + ) - @mock.patch('%s.subp.subp' % MPATH) + @mock.patch("%s.subp.subp" % MPATH) def test_configure_ua_attach_with_weird_services(self, m_subp): """When services not string or list, warn but still attach""" - configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) + configure_ua(token="SomeToken", enable={"deffo": "wont work"}) self.assertEqual( - m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken'])]) + m_subp.call_args_list, [mock.call(["ua", "attach", "SomeToken"])] + ) self.assertEqual( - 'WARNING: ubuntu_advantage: enable should be a list, not a' - ' dict; skipping enabling services\n' - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) + "WARNING: ubuntu_advantage: enable should be a list, not a" + " dict; skipping enabling services\n" + "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n", + self.logs.getvalue(), + ) @skipUnlessJsonSchema() @@ -139,49 +166,57 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True schema = schema - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH) + @mock.patch("%s.configure_ua" % MPATH) def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _): """If ubuntu_advantage configuration is not a dict, emit a warning.""" - validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema) + validate_cloudconfig_schema({"ubuntu_advantage": "wrong type"}, schema) self.assertEqual( "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not" " of type 'object'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH) + @mock.patch("%s.configure_ua" % MPATH) def test_schema_disallows_unknown_keys(self, _cfg, _): """Unknown keys in ubuntu_advantage configuration emit warnings.""" validate_cloudconfig_schema( - {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}}, - schema) + {"ubuntu_advantage": {"token": "winner", "invalid-key": ""}}, + schema, + ) self.assertIn( - 'WARNING: Invalid config:\nubuntu_advantage: Additional properties' + "WARNING: Invalid config:\nubuntu_advantage: Additional properties" " are not allowed ('invalid-key' was unexpected)", - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH) + @mock.patch("%s.configure_ua" % MPATH) def test_warn_schema_requires_token(self, _cfg, _): """Warn if ubuntu_advantage configuration lacks token.""" validate_cloudconfig_schema( - {'ubuntu_advantage': {'enable': ['esm']}}, schema) + {"ubuntu_advantage": {"enable": ["esm"]}}, schema + ) self.assertEqual( "WARNING: Invalid config:\nubuntu_advantage:" - " 'token' is a required property\n", self.logs.getvalue()) + " 'token' is a required property\n", + self.logs.getvalue(), + ) - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH) + @mock.patch("%s.configure_ua" % MPATH) def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _): """Warn when ubuntu_advantage:enable config is not a list.""" validate_cloudconfig_schema( - {'ubuntu_advantage': {'enable': 'needslist'}}, schema) + {"ubuntu_advantage": {"enable": "needslist"}}, schema + ) self.assertEqual( "WARNING: Invalid config:\nubuntu_advantage: 'token' is a" " required property\nubuntu_advantage.enable: 'needslist'" " is not of type 'array'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) class TestHandle(CiTestCase): @@ -192,89 +227,93 @@ class TestHandle(CiTestCase): super(TestHandle, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.validate_cloudconfig_schema' % MPATH) + @mock.patch("%s.validate_cloudconfig_schema" % MPATH) def test_handle_no_config(self, m_schema): """When no ua-related configuration is provided, nothing happens.""" cfg = {} - handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) + handle("ua-test", cfg=cfg, cloud=None, log=self.logger, args=None) self.assertIn( "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'" - ' configuration found', - self.logs.getvalue()) + " configuration found", + self.logs.getvalue(), + ) m_schema.assert_not_called() - @mock.patch('%s.configure_ua' % MPATH) - @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch("%s.configure_ua" % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH) def test_handle_tries_to_install_ubuntu_advantage_tools( - self, m_install, m_cfg): + self, m_install, m_cfg + ): """If ubuntu_advantage is provided, try installing ua-tools package.""" - cfg = {'ubuntu_advantage': {'token': 'valid'}} + cfg = {"ubuntu_advantage": {"token": "valid"}} mycloud = FakeCloud(None) - handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) + handle("nomatter", cfg=cfg, cloud=mycloud, log=self.logger, args=None) m_install.assert_called_once_with(mycloud) - @mock.patch('%s.configure_ua' % MPATH) - @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch("%s.configure_ua" % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH) def test_handle_passes_credentials_and_services_to_configure_ua( - self, m_install, m_configure_ua): + self, m_install, m_configure_ua + ): """All ubuntu_advantage config keys are passed to configure_ua.""" - cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}} - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) - m_configure_ua.assert_called_once_with( - token='token', enable=['esm']) + cfg = {"ubuntu_advantage": {"token": "token", "enable": ["esm"]}} + handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None) + m_configure_ua.assert_called_once_with(token="token", enable=["esm"]) - @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) - @mock.patch('%s.configure_ua' % MPATH) + @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock()) + @mock.patch("%s.configure_ua" % MPATH) def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config( - self, m_configure_ua): + self, m_configure_ua + ): """Warning when ubuntu-advantage key is present with new config""" - cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}} - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + cfg = {"ubuntu-advantage": {"token": "token", "enable": ["esm"]}} + handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None) self.assertEqual( 'WARNING: Deprecated configuration key "ubuntu-advantage"' ' provided. Expected underscore delimited "ubuntu_advantage";' - ' will attempt to continue.', - self.logs.getvalue().splitlines()[0]) - m_configure_ua.assert_called_once_with( - token='token', enable=['esm']) + " will attempt to continue.", + self.logs.getvalue().splitlines()[0], + ) + m_configure_ua.assert_called_once_with(token="token", enable=["esm"]) def test_handle_error_on_deprecated_commands_key_dashed(self): """Error when commands is present in ubuntu-advantage key.""" - cfg = {'ubuntu-advantage': {'commands': 'nogo'}} + cfg = {"ubuntu-advantage": {"commands": "nogo"}} with self.assertRaises(RuntimeError) as context_manager: - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None) self.assertEqual( 'Deprecated configuration "ubuntu-advantage: commands" provided.' ' Expected "token"', - str(context_manager.exception)) + str(context_manager.exception), + ) def test_handle_error_on_deprecated_commands_key_underscored(self): """Error when commands is present in ubuntu_advantage key.""" - cfg = {'ubuntu_advantage': {'commands': 'nogo'}} + cfg = {"ubuntu_advantage": {"commands": "nogo"}} with self.assertRaises(RuntimeError) as context_manager: - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None) self.assertEqual( 'Deprecated configuration "ubuntu-advantage: commands" provided.' ' Expected "token"', - str(context_manager.exception)) + str(context_manager.exception), + ) - @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) - @mock.patch('%s.configure_ua' % MPATH) - def test_handle_prefers_new_style_config( - self, m_configure_ua): + @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock()) + @mock.patch("%s.configure_ua" % MPATH) + def test_handle_prefers_new_style_config(self, m_configure_ua): """ubuntu_advantage should be preferred over ubuntu-advantage""" cfg = { - 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']}, - 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}, + "ubuntu-advantage": {"token": "nope", "enable": ["wrong"]}, + "ubuntu_advantage": {"token": "token", "enable": ["esm"]}, } - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None) self.assertEqual( 'WARNING: Deprecated configuration key "ubuntu-advantage"' ' provided. Expected underscore delimited "ubuntu_advantage";' - ' will attempt to continue.', - self.logs.getvalue().splitlines()[0]) - m_configure_ua.assert_called_once_with( - token='token', enable=['esm']) + " will attempt to continue.", + self.logs.getvalue().splitlines()[0], + ) + m_configure_ua.assert_called_once_with(token="token", enable=["esm"]) class TestMaybeInstallUATools(CiTestCase): @@ -285,42 +324,46 @@ class TestMaybeInstallUATools(CiTestCase): super(TestMaybeInstallUATools, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.subp.which' % MPATH) + @mock.patch("%s.subp.which" % MPATH) def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): """Do nothing if ubuntu-advantage-tools already exists.""" - m_which.return_value = '/usr/bin/ua' # already installed + m_which.return_value = "/usr/bin/ua" # already installed distro = mock.MagicMock() distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') + "Some apt error" + ) maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError - @mock.patch('%s.subp.which' % MPATH) + @mock.patch("%s.subp.which" % MPATH) def test_maybe_install_ua_tools_raises_update_errors(self, m_which): """maybe_install_ua_tools logs and raises apt update errors.""" m_which.return_value = None distro = mock.MagicMock() distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') + "Some apt error" + ) with self.assertRaises(RuntimeError) as context_manager: maybe_install_ua_tools(cloud=FakeCloud(distro)) - self.assertEqual('Some apt error', str(context_manager.exception)) - self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + self.assertEqual("Some apt error", str(context_manager.exception)) + self.assertIn("Package update failed\nTraceback", self.logs.getvalue()) - @mock.patch('%s.subp.which' % MPATH) + @mock.patch("%s.subp.which" % MPATH) def test_maybe_install_ua_raises_install_errors(self, m_which): """maybe_install_ua_tools logs and raises package install errors.""" m_which.return_value = None distro = mock.MagicMock() distro.update_package_sources.return_value = None distro.install_packages.side_effect = RuntimeError( - 'Some install error') + "Some install error" + ) with self.assertRaises(RuntimeError) as context_manager: maybe_install_ua_tools(cloud=FakeCloud(distro)) - self.assertEqual('Some install error', str(context_manager.exception)) + self.assertEqual("Some install error", str(context_manager.exception)) self.assertIn( - 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue()) + "Failed to install ubuntu-advantage-tools\n", self.logs.getvalue() + ) - @mock.patch('%s.subp.which' % MPATH) + @mock.patch("%s.subp.which" % MPATH) def test_maybe_install_ua_tools_happy_path(self, m_which): """maybe_install_ua_tools installs ubuntu-advantage-tools.""" m_which.return_value = None @@ -328,6 +371,8 @@ class TestMaybeInstallUATools(CiTestCase): maybe_install_ua_tools(cloud=FakeCloud(distro)) distro.update_package_sources.assert_called_once_with() distro.install_packages.assert_called_once_with( - ['ubuntu-advantage-tools']) + ["ubuntu-advantage-tools"] + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py index d341fbfd..4987492d 100644 --- a/tests/unittests/config/test_cc_ubuntu_drivers.py +++ b/tests/unittests/config/test_cc_ubuntu_drivers.py @@ -3,17 +3,20 @@ import copy import os -from tests.unittests.helpers import CiTestCase, skipUnlessJsonSchema, mock -from cloudinit.config.schema import ( - SchemaValidationError, validate_cloudconfig_schema) from cloudinit.config import cc_ubuntu_drivers as drivers +from cloudinit.config.schema import ( + SchemaValidationError, + validate_cloudconfig_schema, +) from cloudinit.subp import ProcessExecutionError +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema MPATH = "cloudinit.config.cc_ubuntu_drivers." M_TMP_PATH = MPATH + "temp_utils.mkdtemp" OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( "ubuntu-drivers: error: argument <command>: invalid choice: 'install' " - "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") + "(choose from 'list', 'autoinstall', 'devices', 'debug')\n" +) # The tests in this module call helper methods which are decorated with @@ -23,8 +26,8 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( # disable it for the entire module: # pylint: disable=no-value-for-parameter -class AnyTempScriptAndDebconfFile(object): +class AnyTempScriptAndDebconfFile(object): def __init__(self, tmp_dir, debconf_file): self.tmp_dir = tmp_dir self.debconf_file = debconf_file @@ -33,60 +36,68 @@ class AnyTempScriptAndDebconfFile(object): if not len(cmd) == 2: return False script, debconf_file = cmd - if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')): + if bool(script.startswith(self.tmp_dir) and script.endswith(".sh")): return debconf_file == self.debconf_file return False class TestUbuntuDrivers(CiTestCase): - cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} - install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] + cfg_accepted = {"drivers": {"nvidia": {"license-accepted": True}}} + install_gpgpu = ["ubuntu-drivers", "install", "--gpgpu", "nvidia"] with_logs = True @skipUnlessJsonSchema() def test_schema_requires_boolean_for_license_accepted(self): with self.assertRaisesRegex( - SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): + SchemaValidationError, ".*license-accepted.*TRUE.*boolean" + ): validate_cloudconfig_schema( - {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, - schema=drivers.schema, strict=True) + {"drivers": {"nvidia": {"license-accepted": "TRUE"}}}, + schema=drivers.schema, + strict=True, + ) @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.subp", return_value=("", "")) @mock.patch(MPATH + "subp.which", return_value=False) - def _assert_happy_path_taken( - self, config, m_which, m_subp, m_tmp): + def _assert_happy_path_taken(self, config, m_which, m_subp, m_tmp): """Positive path test through handle. Package should be installed.""" tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') + debconf_file = os.path.join(tdir, "nvidia.template") m_tmp.return_value = tdir myCloud = mock.MagicMock() - drivers.handle('ubuntu_drivers', config, myCloud, None, None) - self.assertEqual([mock.call(['ubuntu-drivers-common'])], - myCloud.distro.install_packages.call_args_list) + drivers.handle("ubuntu_drivers", config, myCloud, None, None) + self.assertEqual( + [mock.call(["ubuntu-drivers-common"])], + myCloud.distro.install_packages.call_args_list, + ) self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) + [ + mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu), + ], + m_subp.call_args_list, + ) def test_handle_does_package_install(self): self._assert_happy_path_taken(self.cfg_accepted) def test_trueish_strings_are_considered_approval(self): - for true_value in ['yes', 'true', 'on', '1']: + for true_value in ["yes", "true", "on", "1"]: new_config = copy.deepcopy(self.cfg_accepted) - new_config['drivers']['nvidia']['license-accepted'] = true_value + new_config["drivers"]["nvidia"]["license-accepted"] = true_value self._assert_happy_path_taken(new_config) @mock.patch(M_TMP_PATH) @mock.patch(MPATH + "subp.subp") @mock.patch(MPATH + "subp.which", return_value=False) def test_handle_raises_error_if_no_drivers_found( - self, m_which, m_subp, m_tmp): + self, m_which, m_subp, m_tmp + ): """If ubuntu-drivers doesn't install any drivers, raise an error.""" tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') + debconf_file = os.path.join(tdir, "nvidia.template") m_tmp.return_value = tdir myCloud = mock.MagicMock() @@ -94,84 +105,103 @@ class TestUbuntuDrivers(CiTestCase): if cmd[0].startswith(tdir): return raise ProcessExecutionError( - stdout='No drivers found for installation.\n', exit_code=1) + stdout="No drivers found for installation.\n", exit_code=1 + ) + m_subp.side_effect = fake_subp with self.assertRaises(Exception): drivers.handle( - 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) - self.assertEqual([mock.call(['ubuntu-drivers-common'])], - myCloud.distro.install_packages.call_args_list) + "ubuntu_drivers", self.cfg_accepted, myCloud, None, None + ) self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) - self.assertIn('ubuntu-drivers found no drivers for installation', - self.logs.getvalue()) - - @mock.patch(MPATH + "subp.subp", return_value=('', '')) + [mock.call(["ubuntu-drivers-common"])], + myCloud.distro.install_packages.call_args_list, + ) + self.assertEqual( + [ + mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu), + ], + m_subp.call_args_list, + ) + self.assertIn( + "ubuntu-drivers found no drivers for installation", + self.logs.getvalue(), + ) + + @mock.patch(MPATH + "subp.subp", return_value=("", "")) @mock.patch(MPATH + "subp.which", return_value=False) def _assert_inert_with_config(self, config, m_which, m_subp): """Helper to reduce repetition when testing negative cases""" myCloud = mock.MagicMock() - drivers.handle('ubuntu_drivers', config, myCloud, None, None) + drivers.handle("ubuntu_drivers", config, myCloud, None, None) self.assertEqual(0, myCloud.distro.install_packages.call_count) self.assertEqual(0, m_subp.call_count) def test_handle_inert_if_license_not_accepted(self): """Ensure we don't do anything if the license is rejected.""" self._assert_inert_with_config( - {'drivers': {'nvidia': {'license-accepted': False}}}) + {"drivers": {"nvidia": {"license-accepted": False}}} + ) def test_handle_inert_if_garbage_in_license_field(self): """Ensure we don't do anything if unknown text is in license field.""" self._assert_inert_with_config( - {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) + {"drivers": {"nvidia": {"license-accepted": "garbage"}}} + ) def test_handle_inert_if_no_license_key(self): """Ensure we don't do anything if no license key.""" - self._assert_inert_with_config({'drivers': {'nvidia': {}}}) + self._assert_inert_with_config({"drivers": {"nvidia": {}}}) def test_handle_inert_if_no_nvidia_key(self): """Ensure we don't do anything if other license accepted.""" self._assert_inert_with_config( - {'drivers': {'acme': {'license-accepted': True}}}) + {"drivers": {"acme": {"license-accepted": True}}} + ) def test_handle_inert_if_string_given(self): """Ensure we don't do anything if string refusal given.""" - for false_value in ['no', 'false', 'off', '0']: + for false_value in ["no", "false", "off", "0"]: self._assert_inert_with_config( - {'drivers': {'nvidia': {'license-accepted': false_value}}}) + {"drivers": {"nvidia": {"license-accepted": false_value}}} + ) @mock.patch(MPATH + "install_drivers") def test_handle_no_drivers_does_nothing(self, m_install_drivers): """If no 'drivers' key in the config, nothing should be done.""" myCloud = mock.MagicMock() myLog = mock.MagicMock() - drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) - self.assertIn('Skipping module named', - myLog.debug.call_args_list[0][0][0]) + drivers.handle("ubuntu_drivers", {"foo": "bzr"}, myCloud, myLog, None) + self.assertIn( + "Skipping module named", myLog.debug.call_args_list[0][0][0] + ) self.assertEqual(0, m_install_drivers.call_count) @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.subp", return_value=("", "")) @mock.patch(MPATH + "subp.which", return_value=True) def test_install_drivers_no_install_if_present( - self, m_which, m_subp, m_tmp): + self, m_which, m_subp, m_tmp + ): """If 'ubuntu-drivers' is present, no package install should occur.""" tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') + debconf_file = os.path.join(tdir, "nvidia.template") m_tmp.return_value = tdir pkg_install = mock.MagicMock() - drivers.install_drivers(self.cfg_accepted['drivers'], - pkg_install_func=pkg_install) + drivers.install_drivers( + self.cfg_accepted["drivers"], pkg_install_func=pkg_install + ) self.assertEqual(0, pkg_install.call_count) - self.assertEqual([mock.call('ubuntu-drivers')], - m_which.call_args_list) + self.assertEqual([mock.call("ubuntu-drivers")], m_which.call_args_list) self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) + [ + mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu), + ], + m_subp.call_args_list, + ) def test_install_drivers_rejects_invalid_config(self): """install_drivers should raise TypeError if not given a config dict""" @@ -184,10 +214,11 @@ class TestUbuntuDrivers(CiTestCase): @mock.patch(MPATH + "subp.subp") @mock.patch(MPATH + "subp.which", return_value=False) def test_install_drivers_handles_old_ubuntu_drivers_gracefully( - self, m_which, m_subp, m_tmp): + self, m_which, m_subp, m_tmp + ): """Older ubuntu-drivers versions should emit message and raise error""" tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') + debconf_file = os.path.join(tdir, "nvidia.template") m_tmp.return_value = tdir myCloud = mock.MagicMock() @@ -195,50 +226,68 @@ class TestUbuntuDrivers(CiTestCase): if cmd[0].startswith(tdir): return raise ProcessExecutionError( - stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2) + stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2 + ) + m_subp.side_effect = fake_subp with self.assertRaises(Exception): drivers.handle( - 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) - self.assertEqual([mock.call(['ubuntu-drivers-common'])], - myCloud.distro.install_packages.call_args_list) + "ubuntu_drivers", self.cfg_accepted, myCloud, None, None + ) self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) - self.assertIn('WARNING: the available version of ubuntu-drivers is' - ' too old to perform requested driver installation', - self.logs.getvalue()) + [mock.call(["ubuntu-drivers-common"])], + myCloud.distro.install_packages.call_args_list, + ) + self.assertEqual( + [ + mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu), + ], + m_subp.call_args_list, + ) + self.assertIn( + "WARNING: the available version of ubuntu-drivers is" + " too old to perform requested driver installation", + self.logs.getvalue(), + ) # Sub-class TestUbuntuDrivers to run the same test cases, but with a version class TestUbuntuDriversWithVersion(TestUbuntuDrivers): cfg_accepted = { - 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} - install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] + "drivers": {"nvidia": {"license-accepted": True, "version": "123"}} + } + install_gpgpu = ["ubuntu-drivers", "install", "--gpgpu", "nvidia:123"] @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.subp", return_value=("", "")) @mock.patch(MPATH + "subp.which", return_value=False) def test_version_none_uses_latest(self, m_which, m_subp, m_tmp): tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') + debconf_file = os.path.join(tdir, "nvidia.template") m_tmp.return_value = tdir myCloud = mock.MagicMock() version_none_cfg = { - 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} - drivers.handle( - 'ubuntu_drivers', version_none_cfg, myCloud, None, None) + "drivers": {"nvidia": {"license-accepted": True, "version": None}} + } + drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None, None) self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], - m_subp.call_args_list) + [ + mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(["ubuntu-drivers", "install", "--gpgpu", "nvidia"]), + ], + m_subp.call_args_list, + ) def test_specifying_a_version_doesnt_override_license_acceptance(self): - self._assert_inert_with_config({ - 'drivers': {'nvidia': {'license-accepted': False, - 'version': '123'}} - }) + self._assert_inert_with_config( + { + "drivers": { + "nvidia": {"license-accepted": False, "version": "123"} + } + } + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py index 35ad6413..2bbc16f4 100644 --- a/tests/unittests/config/test_cc_update_etc_hosts.py +++ b/tests/unittests/config/test_cc_update_etc_hosts.py @@ -1,18 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_update_etc_hosts - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util - -from tests.unittests import helpers as t_help - import logging import os import shutil +from cloudinit import cloud, distros, helpers, util +from cloudinit.config import cc_update_etc_hosts +from tests.unittests import helpers as t_help + LOG = logging.getLogger(__name__) @@ -28,46 +23,46 @@ class TestHostsFile(t_help.FilesystemMockingTestCase): def test_write_etc_hosts_suse_localhost(self): cfg = { - 'manage_etc_hosts': 'localhost', - 'hostname': 'cloud-init.test.us' + "manage_etc_hosts": "localhost", + "hostname": "cloud-init.test.us", } - os.makedirs('%s/etc/' % self.tmp) - hosts_content = '192.168.1.1 blah.blah.us blah\n' - fout = open('%s/etc/hosts' % self.tmp, 'w') + os.makedirs("%s/etc/" % self.tmp) + hosts_content = "192.168.1.1 blah.blah.us blah\n" + fout = open("%s/etc/hosts" % self.tmp, "w") fout.write(hosts_content) fout.close() - distro = self._fetch_distro('sles') - distro.hosts_fn = '%s/etc/hosts' % self.tmp + distro = self._fetch_distro("sles") + distro.hosts_fn = "%s/etc/hosts" % self.tmp paths = helpers.Paths({}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) - contents = util.load_file('%s/etc/hosts' % self.tmp) - if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents: - self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') - if '192.168.1.1\tblah.blah.us\tblah' not in contents: - self.assertIsNone('Default etc/hosts content modified') + cc_update_etc_hosts.handle("test", cfg, cc, LOG, []) + contents = util.load_file("%s/etc/hosts" % self.tmp) + if "127.0.1.1\tcloud-init.test.us\tcloud-init" not in contents: + self.assertIsNone("No entry for 127.0.1.1 in etc/hosts") + if "192.168.1.1\tblah.blah.us\tblah" not in contents: + self.assertIsNone("Default etc/hosts content modified") @t_help.skipUnlessJinja() def test_write_etc_hosts_suse_template(self): cfg = { - 'manage_etc_hosts': 'template', - 'hostname': 'cloud-init.test.us' + "manage_etc_hosts": "template", + "hostname": "cloud-init.test.us", } shutil.copytree( - t_help.cloud_init_project_dir('templates'), - '%s/etc/cloud/templates' % self.tmp, + t_help.cloud_init_project_dir("templates"), + "%s/etc/cloud/templates" % self.tmp, ) - distro = self._fetch_distro('sles') + distro = self._fetch_distro("sles") paths = helpers.Paths({}) - paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl' + paths.template_tpl = "%s" % self.tmp + "/etc/cloud/templates/%s.tmpl" ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) - cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) - contents = util.load_file('%s/etc/hosts' % self.tmp) - if '127.0.1.1 cloud-init.test.us cloud-init' not in contents: - self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') - if '::1 cloud-init.test.us cloud-init' not in contents: - self.assertIsNone('No entry for 127.0.0.1 in etc/hosts') + cc_update_etc_hosts.handle("test", cfg, cc, LOG, []) + contents = util.load_file("%s/etc/hosts" % self.tmp) + if "127.0.1.1 cloud-init.test.us cloud-init" not in contents: + self.assertIsNone("No entry for 127.0.1.1 in etc/hosts") + if "::1 cloud-init.test.us cloud-init" not in contents: + self.assertIsNone("No entry for 127.0.0.1 in etc/hosts") diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py index 4ef844cb..0bd3c980 100644 --- a/tests/unittests/config/test_cc_users_groups.py +++ b/tests/unittests/config/test_cc_users_groups.py @@ -7,8 +7,8 @@ from tests.unittests.helpers import CiTestCase, mock MODPATH = "cloudinit.config.cc_users_groups" -@mock.patch('cloudinit.distros.ubuntu.Distro.create_group') -@mock.patch('cloudinit.distros.ubuntu.Distro.create_user') +@mock.patch("cloudinit.distros.ubuntu.Distro.create_group") +@mock.patch("cloudinit.distros.ubuntu.Distro.create_user") class TestHandleUsersGroups(CiTestCase): """Test cc_users_groups handling of config.""" @@ -18,58 +18,90 @@ class TestHandleUsersGroups(CiTestCase): """Test handle with no config will not create users or groups.""" cfg = {} # merged cloud-config # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} + sys_cfg = { + "default_user": { + "name": "ubuntu", + "lock_passwd": True, + "groups": ["lxd", "sudo"], + "shell": "/bin/bash", + } + } metadata = {} cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) + distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata + ) + cc_users_groups.handle("modulename", cfg, cloud, None, None) m_user.assert_not_called() m_group.assert_not_called() def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group): """When users in config, create users with distro.create_user.""" - cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config + cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} + sys_cfg = { + "default_user": { + "name": "ubuntu", + "lock_passwd": True, + "groups": ["lxd", "sudo"], + "shell": "/bin/bash", + } + } metadata = {} cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) + distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata + ) + cc_users_groups.handle("modulename", cfg, cloud, None, None) self.assertCountEqual( m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', default=False)]) + [ + mock.call( + "ubuntu", + groups="lxd,sudo", + lock_passwd=True, + shell="/bin/bash", + ), + mock.call("me2", default=False), + ], + ) m_group.assert_not_called() - @mock.patch('cloudinit.distros.freebsd.Distro.create_group') - @mock.patch('cloudinit.distros.freebsd.Distro.create_user') + @mock.patch("cloudinit.distros.freebsd.Distro.create_group") + @mock.patch("cloudinit.distros.freebsd.Distro.create_user") def test_handle_users_in_cfg_calls_create_users_on_bsd( - self, - m_fbsd_user, - m_fbsd_group, - m_linux_user, - m_linux_group, + self, + m_fbsd_user, + m_fbsd_group, + m_linux_user, + m_linux_group, ): """When users in config, create users with freebsd.create_user.""" - cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config + cfg = {"users": ["default", {"name": "me2"}]} # merged cloud-config # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True, - 'groups': ['wheel'], - 'shell': '/bin/tcsh'}} + sys_cfg = { + "default_user": { + "name": "freebsd", + "lock_passwd": True, + "groups": ["wheel"], + "shell": "/bin/tcsh", + } + } metadata = {} cloud = self.tmp_cloud( - distro='freebsd', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) + distro="freebsd", sys_cfg=sys_cfg, metadata=metadata + ) + cc_users_groups.handle("modulename", cfg, cloud, None, None) self.assertCountEqual( m_fbsd_user.call_args_list, - [mock.call('freebsd', groups='wheel', lock_passwd=True, - shell='/bin/tcsh'), - mock.call('me2', default=False)]) + [ + mock.call( + "freebsd", + groups="wheel", + lock_passwd=True, + shell="/bin/tcsh", + ), + mock.call("me2", default=False), + ], + ) m_fbsd_group.assert_not_called() m_linux_group.assert_not_called() m_linux_user.assert_not_called() @@ -77,96 +109,160 @@ class TestHandleUsersGroups(CiTestCase): def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group): """When ssh_redirect_user is True pass default user and cloud keys.""" cfg = { - 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + "users": ["default", {"name": "me2", "ssh_redirect_user": True}] + } # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} + sys_cfg = { + "default_user": { + "name": "ubuntu", + "lock_passwd": True, + "groups": ["lxd", "sudo"], + "shell": "/bin/bash", + } + } + metadata = {"public-keys": ["key1"]} cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) + distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata + ) + cc_users_groups.handle("modulename", cfg, cloud, None, None) self.assertCountEqual( m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, - ssh_redirect_user='ubuntu')]) + [ + mock.call( + "ubuntu", + groups="lxd,sudo", + lock_passwd=True, + shell="/bin/bash", + ), + mock.call( + "me2", + cloud_public_ssh_keys=["key1"], + default=False, + ssh_redirect_user="ubuntu", + ), + ], + ) m_group.assert_not_called() def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group): """When ssh_redirect_user is 'default' pass default username.""" cfg = { - 'users': ['default', {'name': 'me2', - 'ssh_redirect_user': 'default'}]} + "users": [ + "default", + {"name": "me2", "ssh_redirect_user": "default"}, + ] + } # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} + sys_cfg = { + "default_user": { + "name": "ubuntu", + "lock_passwd": True, + "groups": ["lxd", "sudo"], + "shell": "/bin/bash", + } + } + metadata = {"public-keys": ["key1"]} cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) + distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata + ) + cc_users_groups.handle("modulename", cfg, cloud, None, None) self.assertCountEqual( m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, - ssh_redirect_user='ubuntu')]) + [ + mock.call( + "ubuntu", + groups="lxd,sudo", + lock_passwd=True, + shell="/bin/bash", + ), + mock.call( + "me2", + cloud_public_ssh_keys=["key1"], + default=False, + ssh_redirect_user="ubuntu", + ), + ], + ) m_group.assert_not_called() def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group): """Warn when ssh_redirect_user is not 'default'.""" cfg = { - 'users': ['default', {'name': 'me2', - 'ssh_redirect_user': 'snowflake'}]} + "users": [ + "default", + {"name": "me2", "ssh_redirect_user": "snowflake"}, + ] + } # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} + sys_cfg = { + "default_user": { + "name": "ubuntu", + "lock_passwd": True, + "groups": ["lxd", "sudo"], + "shell": "/bin/bash", + } + } + metadata = {"public-keys": ["key1"]} cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata + ) with self.assertRaises(ValueError) as context_manager: - cc_users_groups.handle('modulename', cfg, cloud, None, None) + cc_users_groups.handle("modulename", cfg, cloud, None, None) m_group.assert_not_called() self.assertEqual( - 'Not creating user me2. Invalid value of ssh_redirect_user:' - ' snowflake. Expected values: true, default or false.', - str(context_manager.exception)) + "Not creating user me2. Invalid value of ssh_redirect_user:" + " snowflake. Expected values: true, default or false.", + str(context_manager.exception), + ) def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group): """When unspecified ssh_redirect_user is false and not set up.""" - cfg = {'users': ['default', {'name': 'me2'}]} + cfg = {"users": ["default", {"name": "me2"}]} # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} + sys_cfg = { + "default_user": { + "name": "ubuntu", + "lock_passwd": True, + "groups": ["lxd", "sudo"], + "shell": "/bin/bash", + } + } + metadata = {"public-keys": ["key1"]} cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) + distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata + ) + cc_users_groups.handle("modulename", cfg, cloud, None, None) self.assertCountEqual( m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', default=False)]) + [ + mock.call( + "ubuntu", + groups="lxd,sudo", + lock_passwd=True, + shell="/bin/bash", + ), + mock.call("me2", default=False), + ], + ) m_group.assert_not_called() def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group): """Warn when ssh_redirect_user is True and no default user present.""" cfg = { - 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + "users": ["default", {"name": "me2", "ssh_redirect_user": True}] + } # System config defines *no* default user for the distro. sys_cfg = {} metadata = {} # no public-keys defined cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - m_user.assert_called_once_with('me2', default=False) + distro="ubuntu", sys_cfg=sys_cfg, metadata=metadata + ) + cc_users_groups.handle("modulename", cfg, cloud, None, None) + m_user.assert_called_once_with("me2", default=False) m_group.assert_not_called() self.assertEqual( - 'WARNING: Ignoring ssh_redirect_user: True for me2. No' - ' default_user defined. Perhaps missing' - ' cloud configuration users: [default, ..].\n', - self.logs.getvalue()) + "WARNING: Ignoring ssh_redirect_user: True for me2. No" + " default_user defined. Perhaps missing" + " cloud configuration users: [default, ..].\n", + self.logs.getvalue(), + ) diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py index 99248f74..7eea99d3 100644 --- a/tests/unittests/config/test_cc_write_files.py +++ b/tests/unittests/config/test_cc_write_files.py @@ -7,13 +7,15 @@ import io import shutil import tempfile -from cloudinit.config.cc_write_files import ( - handle, decode_perms, write_files) from cloudinit import log as logging from cloudinit import util - +from cloudinit.config.cc_write_files import decode_perms, handle, write_files from tests.unittests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + CiTestCase, + FilesystemMockingTestCase, + mock, + skipUnlessJsonSchema, +) LOG = logging.getLogger(__name__) @@ -35,73 +37,89 @@ write_files: """ YAML_CONTENT_EXPECTED = { - '/usr/bin/hello': "#!/bin/sh\necho hello world\n", - '/wark': "foobar\n", - '/tmp/message': "hi mom line 1\nhi mom line 2\n", + "/usr/bin/hello": "#!/bin/sh\necho hello world\n", + "/wark": "foobar\n", + "/tmp/message": "hi mom line 1\nhi mom line 2\n", } VALID_SCHEMA = { - 'write_files': [ - {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', - 'path': '/some', 'permissions': '0777'} + "write_files": [ + { + "append": False, + "content": "a", + "encoding": "gzip", + "owner": "jeff", + "path": "/some", + "permissions": "0777", + } ] } INVALID_SCHEMA = { # Dropped required path key - 'write_files': [ - {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', - 'permissions': '0777'} + "write_files": [ + { + "append": False, + "content": "a", + "encoding": "gzip", + "owner": "jeff", + "permissions": "0777", + } ] } @skipUnlessJsonSchema() -@mock.patch('cloudinit.config.cc_write_files.write_files') +@mock.patch("cloudinit.config.cc_write_files.write_files") class TestWriteFilesSchema(CiTestCase): with_logs = True def test_schema_validation_warns_missing_path(self, m_write_files): """The only required file item property is 'path'.""" - cc = self.tmp_cloud('ubuntu') - valid_config = {'write_files': [{'path': '/some/path'}]} - handle('cc_write_file', valid_config, cc, LOG, []) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - handle('cc_write_file', INVALID_SCHEMA, cc, LOG, []) - self.assertIn('Invalid config:', self.logs.getvalue()) + cc = self.tmp_cloud("ubuntu") + valid_config = {"write_files": [{"path": "/some/path"}]} + handle("cc_write_file", valid_config, cc, LOG, []) + self.assertNotIn("Invalid config:", self.logs.getvalue()) + handle("cc_write_file", INVALID_SCHEMA, cc, LOG, []) + self.assertIn("Invalid config:", self.logs.getvalue()) self.assertIn("'path' is a required property", self.logs.getvalue()) def test_schema_validation_warns_non_string_type_for_files( - self, m_write_files): + self, m_write_files + ): """Schema validation warns of non-string values for each file item.""" - cc = self.tmp_cloud('ubuntu') - for key in VALID_SCHEMA['write_files'][0].keys(): - if key == 'append': - key_type = 'boolean' + cc = self.tmp_cloud("ubuntu") + for key in VALID_SCHEMA["write_files"][0].keys(): + if key == "append": + key_type = "boolean" else: - key_type = 'string' + key_type = "string" invalid_config = copy.deepcopy(VALID_SCHEMA) - invalid_config['write_files'][0][key] = 1 - handle('cc_write_file', invalid_config, cc, LOG, []) + invalid_config["write_files"][0][key] = 1 + handle("cc_write_file", invalid_config, cc, LOG, []) self.assertIn( - mock.call('cc_write_file', invalid_config['write_files']), - m_write_files.call_args_list) + mock.call("cc_write_file", invalid_config["write_files"]), + m_write_files.call_args_list, + ) self.assertIn( - 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type), - self.logs.getvalue()) - self.assertIn('Invalid config:', self.logs.getvalue()) + "write_files.0.%s: 1 is not of type '%s'" % (key, key_type), + self.logs.getvalue(), + ) + self.assertIn("Invalid config:", self.logs.getvalue()) def test_schema_validation_warns_on_additional_undefined_propertes( - self, m_write_files): + self, m_write_files + ): """Schema validation warns on additional undefined file properties.""" - cc = self.tmp_cloud('ubuntu') + cc = self.tmp_cloud("ubuntu") invalid_config = copy.deepcopy(VALID_SCHEMA) - invalid_config['write_files'][0]['bogus'] = 'value' - handle('cc_write_file', invalid_config, cc, LOG, []) + invalid_config["write_files"][0]["bogus"] = "value" + handle("cc_write_file", invalid_config, cc, LOG, []) self.assertIn( "Invalid config:\nwrite_files.0: Additional properties" " are not allowed ('bogus' was unexpected)", - self.logs.getvalue()) + self.logs.getvalue(), + ) class TestWriteFiles(FilesystemMockingTestCase): @@ -116,20 +134,20 @@ class TestWriteFiles(FilesystemMockingTestCase): @skipUnlessJsonSchema() def test_handler_schema_validation_warns_non_array_type(self): """Schema validation warns of non-array value.""" - invalid_config = {'write_files': 1} - cc = self.tmp_cloud('ubuntu') + invalid_config = {"write_files": 1} + cc = self.tmp_cloud("ubuntu") with self.assertRaises(TypeError): - handle('cc_write_file', invalid_config, cc, LOG, []) + handle("cc_write_file", invalid_config, cc, LOG, []) self.assertIn( - 'Invalid config:\nwrite_files: 1 is not of type \'array\'', - self.logs.getvalue()) + "Invalid config:\nwrite_files: 1 is not of type 'array'", + self.logs.getvalue(), + ) def test_simple(self): self.patchUtils(self.tmp) expected = "hello world\n" filename = "/tmp/my.file" - write_files( - "test_simple", [{"content": expected, "path": filename}]) + write_files("test_simple", [{"content": expected, "path": filename}]) self.assertEqual(util.load_file(filename), expected) def test_append(self): @@ -141,13 +159,14 @@ class TestWriteFiles(FilesystemMockingTestCase): util.write_file(filename, existing) write_files( "test_append", - [{"content": added, "path": filename, "append": "true"}]) + [{"content": added, "path": filename, "append": "true"}], + ) self.assertEqual(util.load_file(filename), expected) def test_yaml_binary(self): self.patchUtils(self.tmp) data = util.load_yaml(YAML_TEXT) - write_files("testname", data['write_files']) + write_files("testname", data["write_files"]) for path, content in YAML_CONTENT_EXPECTED.items(): self.assertEqual(util.load_file(path), content) @@ -158,13 +177,13 @@ class TestWriteFiles(FilesystemMockingTestCase): # for 'gz', 'gzip', 'gz+base64' ... data = b"foobzr" utf8_valid = b"foobzr" - utf8_invalid = b'ab\xaadef' + utf8_invalid = b"ab\xaadef" files = [] expected = [] - gz_aliases = ('gz', 'gzip') - gz_b64_aliases = ('gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64') - b64_aliases = ('base64', 'b64') + gz_aliases = ("gz", "gzip") + gz_b64_aliases = ("gz+base64", "gzip+base64", "gz+b64", "gzip+b64") + b64_aliases = ("base64", "b64") datum = (("utf8", utf8_valid), ("no-utf8", utf8_invalid)) for name, data in datum: @@ -173,11 +192,13 @@ class TestWriteFiles(FilesystemMockingTestCase): b64 = (base64.b64encode(data), b64_aliases) for content, aliases in (gz, gz_b64, b64): for enc in aliases: - cur = {'content': content, - 'path': '/tmp/file-%s-%s' % (name, enc), - 'encoding': enc} + cur = { + "content": content, + "path": "/tmp/file-%s-%s" % (name, enc), + "encoding": enc, + } files.append(cur) - expected.append((cur['path'], data)) + expected.append((cur["path"], data)) write_files("test_decoding", files) @@ -185,20 +206,17 @@ class TestWriteFiles(FilesystemMockingTestCase): self.assertEqual(util.load_file(path, decode=False), content) # make sure we actually wrote *some* files. - flen_expected = ( - len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum)) + flen_expected = len(gz_aliases + gz_b64_aliases + b64_aliases) * len( + datum + ) self.assertEqual(len(expected), flen_expected) def test_deferred(self): self.patchUtils(self.tmp) - file_path = '/tmp/deferred.file' - config = { - 'write_files': [ - {'path': file_path, 'defer': True} - ] - } - cc = self.tmp_cloud('ubuntu') - handle('cc_write_file', config, cc, LOG, []) + file_path = "/tmp/deferred.file" + config = {"write_files": [{"path": file_path, "defer": True}]} + cc = self.tmp_cloud("ubuntu") + handle("cc_write_file", config, cc, LOG, []) with self.assertRaises(FileNotFoundError): util.load_file(file_path) diff --git a/tests/unittests/config/test_cc_write_files_deferred.py b/tests/unittests/config/test_cc_write_files_deferred.py index d33d250a..3faac1bf 100644 --- a/tests/unittests/config/test_cc_write_files_deferred.py +++ b/tests/unittests/config/test_cc_write_files_deferred.py @@ -1,48 +1,54 @@ # This file is part of cloud-init. See LICENSE file for license information. -import tempfile import shutil +import tempfile -from cloudinit.config.cc_write_files_deferred import (handle) -from .test_cc_write_files import (VALID_SCHEMA) from cloudinit import log as logging from cloudinit import util - +from cloudinit.config.cc_write_files_deferred import handle from tests.unittests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + CiTestCase, + FilesystemMockingTestCase, + mock, + skipUnlessJsonSchema, +) + +from .test_cc_write_files import VALID_SCHEMA LOG = logging.getLogger(__name__) @skipUnlessJsonSchema() -@mock.patch('cloudinit.config.cc_write_files_deferred.write_files') +@mock.patch("cloudinit.config.cc_write_files_deferred.write_files") class TestWriteFilesDeferredSchema(CiTestCase): with_logs = True - def test_schema_validation_warns_invalid_value(self, - m_write_files_deferred): + def test_schema_validation_warns_invalid_value( + self, m_write_files_deferred + ): """If 'defer' is defined, it must be of type 'bool'.""" valid_config = { - 'write_files': [ - {**VALID_SCHEMA.get('write_files')[0], 'defer': True} + "write_files": [ + {**VALID_SCHEMA.get("write_files")[0], "defer": True} ] } invalid_config = { - 'write_files': [ - {**VALID_SCHEMA.get('write_files')[0], 'defer': str('no')} + "write_files": [ + {**VALID_SCHEMA.get("write_files")[0], "defer": str("no")} ] } - cc = self.tmp_cloud('ubuntu') - handle('cc_write_files_deferred', valid_config, cc, LOG, []) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - handle('cc_write_files_deferred', invalid_config, cc, LOG, []) - self.assertIn('Invalid config:', self.logs.getvalue()) - self.assertIn("defer: 'no' is not of type 'boolean'", - self.logs.getvalue()) + cc = self.tmp_cloud("ubuntu") + handle("cc_write_files_deferred", valid_config, cc, LOG, []) + self.assertNotIn("Invalid config:", self.logs.getvalue()) + handle("cc_write_files_deferred", invalid_config, cc, LOG, []) + self.assertIn("Invalid config:", self.logs.getvalue()) + self.assertIn( + "defer: 'no' is not of type 'boolean'", self.logs.getvalue() + ) class TestWriteFilesDeferred(FilesystemMockingTestCase): @@ -58,20 +64,20 @@ class TestWriteFilesDeferred(FilesystemMockingTestCase): self.patchUtils(self.tmp) expected = "hello world\n" config = { - 'write_files': [ + "write_files": [ { - 'path': '/tmp/deferred.file', - 'defer': True, - 'content': expected + "path": "/tmp/deferred.file", + "defer": True, + "content": expected, }, - {'path': '/tmp/not_deferred.file'} + {"path": "/tmp/not_deferred.file"}, ] } - cc = self.tmp_cloud('ubuntu') - handle('cc_write_files_deferred', config, cc, LOG, []) - self.assertEqual(util.load_file('/tmp/deferred.file'), expected) + cc = self.tmp_cloud("ubuntu") + handle("cc_write_files_deferred", config, cc, LOG, []) + self.assertEqual(util.load_file("/tmp/deferred.file"), expected) with self.assertRaises(FileNotFoundError): - util.load_file('/tmp/not_deferred.file') + util.load_file("/tmp/not_deferred.file") # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py index 2f11b96a..550b0af2 100644 --- a/tests/unittests/config/test_cc_yum_add_repo.py +++ b/tests/unittests/config/test_cc_yum_add_repo.py @@ -20,92 +20,101 @@ class TestConfig(helpers.FilesystemMockingTestCase): def test_bad_config(self): cfg = { - 'yum_repos': { - 'epel-testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', + "yum_repos": { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", # Missing this should cause the repo not to be written # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch', - 'enabled': False, - 'gpgcheck': True, - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'failovermethod': 'priority', + "enabled": False, + "gpgcheck": True, + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "failovermethod": "priority", }, }, } self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - self.assertRaises(IOError, util.load_file, - "/etc/yum.repos.d/epel_testing.repo") + cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, []) + self.assertRaises( + IOError, util.load_file, "/etc/yum.repos.d/epel_testing.repo" + ) def test_write_config(self): cfg = { - 'yum_repos': { - 'epel-testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', - 'enabled': False, - 'gpgcheck': True, - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'failovermethod': 'priority', + "yum_repos": { + "epel-testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "baseurl": "http://blah.org/pub/epel/testing/5/$basearch", + "enabled": False, + "gpgcheck": True, + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "failovermethod": "priority", }, }, } self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) + cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") parser = configparser.ConfigParser() parser.read_string(contents) expected = { - 'epel_testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - 'failovermethod': 'priority', - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'enabled': '0', - 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', - 'gpgcheck': '1', + "epel_testing": { + "name": "Extra Packages for Enterprise Linux 5 - Testing", + "failovermethod": "priority", + "gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL", + "enabled": "0", + "baseurl": "http://blah.org/pub/epel/testing/5/$basearch", + "gpgcheck": "1", } } for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) + self.assertTrue( + parser.has_section(section), + "Contains section {0}".format(section), + ) for k, v in expected[section].items(): self.assertEqual(parser.get(section, k), v) def test_write_config_array(self): cfg = { - 'yum_repos': { - 'puppetlabs-products': { - 'name': 'Puppet Labs Products El 6 - $basearch', - 'baseurl': - 'http://yum.puppetlabs.com/el/6/products/$basearch', - 'gpgkey': [ - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs', - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', + "yum_repos": { + "puppetlabs-products": { + "name": "Puppet Labs Products El 6 - $basearch", + "baseurl": ( + "http://yum.puppetlabs.com/el/6/products/$basearch" + ), + "gpgkey": [ + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs", + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet", ], - 'enabled': True, - 'gpgcheck': True, + "enabled": True, + "gpgcheck": True, } } } self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) + cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, []) contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") parser = configparser.ConfigParser() parser.read_string(contents) expected = { - 'puppetlabs_products': { - 'name': 'Puppet Labs Products El 6 - $basearch', - 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch', - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n' - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', - 'enabled': '1', - 'gpgcheck': '1', + "puppetlabs_products": { + "name": "Puppet Labs Products El 6 - $basearch", + "baseurl": "http://yum.puppetlabs.com/el/6/products/$basearch", + "gpgkey": ( + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n" + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet" + ), + "enabled": "1", + "gpgcheck": "1", } } for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) + self.assertTrue( + parser.has_section(section), + "Contains section {0}".format(section), + ) for k, v in expected[section].items(): self.assertEqual(parser.get(section, k), v) + # vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_zypper_add_repo.py b/tests/unittests/config/test_cc_zypper_add_repo.py index 4af04bee..4304fee1 100644 --- a/tests/unittests/config/test_cc_zypper_add_repo.py +++ b/tests/unittests/config/test_cc_zypper_add_repo.py @@ -17,31 +17,28 @@ class TestConfig(helpers.FilesystemMockingTestCase): def setUp(self): super(TestConfig, self).setUp() self.tmp = self.tmp_dir() - self.zypp_conf = 'etc/zypp/zypp.conf' + self.zypp_conf = "etc/zypp/zypp.conf" def test_bad_repo_config(self): """Config has no baseurl, no file should be written""" cfg = { - 'repos': [ - { - 'id': 'foo', - 'name': 'suse-test', - 'enabled': '1' - }, + "repos": [ + {"id": "foo", "name": "suse-test", "enabled": "1"}, ] } self.patchUtils(self.tmp) - cc_zypper_add_repo._write_repos(cfg['repos'], '/etc/zypp/repos.d') - self.assertRaises(IOError, util.load_file, - "/etc/zypp/repos.d/foo.repo") + cc_zypper_add_repo._write_repos(cfg["repos"], "/etc/zypp/repos.d") + self.assertRaises( + IOError, util.load_file, "/etc/zypp/repos.d/foo.repo" + ) def test_write_repos(self): """Verify valid repos get written""" cfg = self._get_base_config_repos() root_d = self.tmp_dir() - cc_zypper_add_repo._write_repos(cfg['zypper']['repos'], root_d) - repos = glob.glob('%s/*.repo' % root_d) - expected_repos = ['testing-foo.repo', 'testing-bar.repo'] + cc_zypper_add_repo._write_repos(cfg["zypper"]["repos"], root_d) + repos = glob.glob("%s/*.repo" % root_d) + expected_repos = ["testing-foo.repo", "testing-bar.repo"] if len(repos) != 2: assert 'Number of repos written is "%d" expected 2' % len(repos) for repo in repos: @@ -53,80 +50,77 @@ class TestConfig(helpers.FilesystemMockingTestCase): def test_write_repo(self): """Verify the content of a repo file""" cfg = { - 'repos': [ + "repos": [ { - 'baseurl': 'http://foo', - 'name': 'test-foo', - 'id': 'testing-foo' + "baseurl": "http://foo", + "name": "test-foo", + "id": "testing-foo", }, ] } root_d = self.tmp_dir() - cc_zypper_add_repo._write_repos(cfg['repos'], root_d) + cc_zypper_add_repo._write_repos(cfg["repos"], root_d) contents = util.load_file("%s/testing-foo.repo" % root_d) parser = configparser.ConfigParser() parser.read_string(contents) expected = { - 'testing-foo': { - 'name': 'test-foo', - 'baseurl': 'http://foo', - 'enabled': '1', - 'autorefresh': '1' + "testing-foo": { + "name": "test-foo", + "baseurl": "http://foo", + "enabled": "1", + "autorefresh": "1", } } for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) + self.assertTrue( + parser.has_section(section), + "Contains section {0}".format(section), + ) for k, v in expected[section].items(): self.assertEqual(parser.get(section, k), v) def test_config_write(self): """Write valid configuration data""" - cfg = { - 'config': { - 'download.deltarpm': 'False', - 'reposdir': 'foo' - } - } + cfg = {"config": {"download.deltarpm": "False", "reposdir": "foo"}} root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) + helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"}) self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg['config']) + cc_zypper_add_repo._write_zypp_config(cfg["config"]) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - 'reposdir=foo' + "# Zypp config", + "# Added via cloud.cfg", + "download.deltarpm=False", + "reposdir=foo", ] - for item in contents.split('\n'): + for item in contents.split("\n"): if item not in expected: self.assertIsNone(item) - @mock.patch('cloudinit.log.logging') + @mock.patch("cloudinit.log.logging") def test_config_write_skip_configdir(self, mock_logging): """Write configuration but skip writing 'configdir' setting""" cfg = { - 'config': { - 'download.deltarpm': 'False', - 'reposdir': 'foo', - 'configdir': 'bar' + "config": { + "download.deltarpm": "False", + "reposdir": "foo", + "configdir": "bar", } } root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) + helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"}) self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg['config']) + cc_zypper_add_repo._write_zypp_config(cfg["config"]) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - 'reposdir=foo' + "# Zypp config", + "# Added via cloud.cfg", + "download.deltarpm=False", + "reposdir=foo", ] - for item in contents.split('\n'): + for item in contents.split("\n"): if item not in expected: self.assertIsNone(item) # Not finding teh right path for mocking :( @@ -134,55 +128,53 @@ class TestConfig(helpers.FilesystemMockingTestCase): def test_empty_config_section_no_new_data(self): """When the config section is empty no new data should be written to - zypp.conf""" + zypp.conf""" cfg = self._get_base_config_repos() - cfg['zypper']['config'] = None + cfg["zypper"]["config"] = None root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) + helpers.populate_dir(root_d, {self.zypp_conf: "# No data"}) self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) + cc_zypper_add_repo._write_zypp_config(cfg.get("config", {})) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') + self.assertEqual(contents, "# No data") def test_empty_config_value_no_new_data(self): """When the config section is not empty but there are no values - no new data should be written to zypp.conf""" + no new data should be written to zypp.conf""" cfg = self._get_base_config_repos() - cfg['zypper']['config'] = { - 'download.deltarpm': None - } + cfg["zypper"]["config"] = {"download.deltarpm": None} root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) + helpers.populate_dir(root_d, {self.zypp_conf: "# No data"}) self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) + cc_zypper_add_repo._write_zypp_config(cfg.get("config", {})) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') + self.assertEqual(contents, "# No data") def test_handler_full_setup(self): """Test that the handler ends up calling the renderers""" cfg = self._get_base_config_repos() - cfg['zypper']['config'] = { - 'download.deltarpm': 'False', + cfg["zypper"]["config"] = { + "download.deltarpm": "False", } root_d = self.tmp_dir() - os.makedirs('%s/etc/zypp/repos.d' % root_d) - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) + os.makedirs("%s/etc/zypp/repos.d" % root_d) + helpers.populate_dir(root_d, {self.zypp_conf: "# Zypp config\n"}) self.reRoot(root_d) - cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, []) + cc_zypper_add_repo.handle("zypper_add_repo", cfg, None, LOG, []) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', + "# Zypp config", + "# Added via cloud.cfg", + "download.deltarpm=False", ] - for item in contents.split('\n'): + for item in contents.split("\n"): if item not in expected: self.assertIsNone(item) - repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d) - expected_repos = ['testing-foo.repo', 'testing-bar.repo'] + repos = glob.glob("%s/etc/zypp/repos.d/*.repo" % root_d) + expected_repos = ["testing-foo.repo", "testing-bar.repo"] if len(repos) != 2: assert 'Number of repos written is "%d" expected 2' % len(repos) for repo in repos: @@ -192,39 +184,39 @@ class TestConfig(helpers.FilesystemMockingTestCase): def test_no_config_section_no_new_data(self): """When there is no config section no new data should be written to - zypp.conf""" + zypp.conf""" cfg = self._get_base_config_repos() root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) + helpers.populate_dir(root_d, {self.zypp_conf: "# No data"}) self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) + cc_zypper_add_repo._write_zypp_config(cfg.get("config", {})) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') + self.assertEqual(contents, "# No data") def test_no_repo_data(self): """When there is no repo data nothing should happen""" root_d = self.tmp_dir() self.reRoot(root_d) cc_zypper_add_repo._write_repos(None, root_d) - content = glob.glob('%s/*' % root_d) + content = glob.glob("%s/*" % root_d) self.assertEqual(len(content), 0) def _get_base_config_repos(self): """Basic valid repo configuration""" cfg = { - 'zypper': { - 'repos': [ + "zypper": { + "repos": [ { - 'baseurl': 'http://foo', - 'name': 'test-foo', - 'id': 'testing-foo' + "baseurl": "http://foo", + "name": "test-foo", + "id": "testing-foo", }, { - 'baseurl': 'http://bar', - 'name': 'test-bar', - 'id': 'testing-bar' - } + "baseurl": "http://bar", + "name": "test-bar", + "id": "testing-bar", + }, ] } } diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index 40803cae..fb5b891d 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -2,35 +2,36 @@ import importlib -import sys import inspect +import itertools import logging +import sys from copy import copy -import itertools -import pytest from pathlib import Path from textwrap import dedent + +import pytest from yaml import safe_load from cloudinit.config.schema import ( CLOUD_CONFIG_HEADER, + MetaSchema, SchemaValidationError, annotated_cloudconfig_file, + get_jsonschema_validator, get_meta_doc, get_schema, - get_jsonschema_validator, + main, validate_cloudconfig_file, validate_cloudconfig_metaschema, validate_cloudconfig_schema, - main, - MetaSchema, ) from cloudinit.util import write_file from tests.unittests.helpers import ( CiTestCase, + cloud_init_project_dir, mock, skipUnlessJsonSchema, - cloud_init_project_dir, ) @@ -78,26 +79,25 @@ def get_module_variable(var_name) -> dict: class GetSchemaTest(CiTestCase): - def test_get_schema_coalesces_known_schema(self): """Every cloudconfig module with schema is listed in allOf keyword.""" schema = get_schema() self.assertCountEqual( [ - 'cc_apk_configure', - 'cc_apt_configure', - 'cc_bootcmd', - 'cc_locale', - 'cc_ntp', - 'cc_resizefs', - 'cc_runcmd', - 'cc_snap', - 'cc_ubuntu_advantage', - 'cc_ubuntu_drivers', - 'cc_write_files', - 'cc_zypper_add_repo', - 'cc_chef', - 'cc_install_hotplug', + "cc_apk_configure", + "cc_apt_configure", + "cc_bootcmd", + "cc_locale", + "cc_ntp", + "cc_resizefs", + "cc_runcmd", + "cc_snap", + "cc_ubuntu_advantage", + "cc_ubuntu_drivers", + "cc_write_files", + "cc_zypper_add_repo", + "cc_chef", + "cc_install_hotplug", ], [meta["id"] for meta in get_metas().values() if meta is not None], ) @@ -113,15 +113,18 @@ class SchemaValidationErrorTest(CiTestCase): def test_schema_validation_error_expects_schema_errors(self): """SchemaValidationError is initialized from schema_errors.""" - errors = (('key.path', 'unexpected key "junk"'), - ('key2.path', '"-123" is not a valid "hostname" format')) + errors = ( + ("key.path", 'unexpected key "junk"'), + ("key2.path", '"-123" is not a valid "hostname" format'), + ) exception = SchemaValidationError(schema_errors=errors) self.assertIsInstance(exception, Exception) self.assertEqual(exception.schema_errors, errors) self.assertEqual( 'Cloud config schema errors: key.path: unexpected key "junk", ' 'key2.path: "-123" is not a valid "hostname" format', - str(exception)) + str(exception), + ) self.assertTrue(isinstance(exception, ValueError)) @@ -133,18 +136,19 @@ class ValidateCloudConfigSchemaTest(CiTestCase): @skipUnlessJsonSchema() def test_validateconfig_schema_non_strict_emits_warnings(self): """When strict is False validate_cloudconfig_schema emits warnings.""" - schema = {'properties': {'p1': {'type': 'string'}}} - validate_cloudconfig_schema({'p1': -1}, schema, strict=False) + schema = {"properties": {"p1": {"type": "string"}}} + validate_cloudconfig_schema({"p1": -1}, schema, strict=False) self.assertIn( "Invalid config:\np1: -1 is not of type 'string'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) @skipUnlessJsonSchema() def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self): """Warning from validate_cloudconfig_schema when missing jsonschema.""" - schema = {'properties': {'p1': {'type': 'string'}}} - with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): - validate_cloudconfig_schema({'p1': -1}, schema, strict=True) + schema = {"properties": {"p1": {"type": "string"}}} + with mock.patch.dict("sys.modules", **{"jsonschema": ImportError()}): + validate_cloudconfig_schema({"p1": -1}, schema, strict=True) self.assertIn( "Ignoring schema validation. jsonschema is not present", self.logs.getvalue(), @@ -153,28 +157,28 @@ class ValidateCloudConfigSchemaTest(CiTestCase): @skipUnlessJsonSchema() def test_validateconfig_schema_strict_raises_errors(self): """When strict is True validate_cloudconfig_schema raises errors.""" - schema = {'properties': {'p1': {'type': 'string'}}} + schema = {"properties": {"p1": {"type": "string"}}} with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({'p1': -1}, schema, strict=True) + validate_cloudconfig_schema({"p1": -1}, schema, strict=True) self.assertEqual( "Cloud config schema errors: p1: -1 is not of type 'string'", - str(context_mgr.exception)) + str(context_mgr.exception), + ) @skipUnlessJsonSchema() def test_validateconfig_schema_honors_formats(self): """With strict True, validate_cloudconfig_schema errors on format.""" - schema = { - 'properties': {'p1': {'type': 'string', 'format': 'email'}}} + schema = {"properties": {"p1": {"type": "string", "format": "email"}}} with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True) + validate_cloudconfig_schema({"p1": "-1"}, schema, strict=True) self.assertEqual( "Cloud config schema errors: p1: '-1' is not a 'email'", - str(context_mgr.exception)) + str(context_mgr.exception), + ) @skipUnlessJsonSchema() def test_validateconfig_schema_honors_formats_strict_metaschema(self): - """With strict True and strict_metascheam True, ensure errors on format - """ + """With strict and strict_metaschema True, ensure errors on format""" schema = {"properties": {"p1": {"type": "string", "format": "email"}}} with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_schema( @@ -229,15 +233,15 @@ class ValidateCloudConfigFileTest(CiTestCase): def setUp(self): super(ValidateCloudConfigFileTest, self).setUp() - self.config_file = self.tmp_path('cloudcfg.yaml') + self.config_file = self.tmp_path("cloudcfg.yaml") def test_validateconfig_file_error_on_absent_file(self): """On absent config_path, validate_cloudconfig_file errors.""" with self.assertRaises(RuntimeError) as context_mgr: - validate_cloudconfig_file('/not/here', {}) + validate_cloudconfig_file("/not/here", {}) self.assertEqual( - 'Configfile /not/here does not exist', - str(context_mgr.exception)) + "Configfile /not/here does not exist", str(context_mgr.exception) + ) def test_validateconfig_file_error_on_invalid_header(self): """On invalid header, validate_cloudconfig_file errors. @@ -245,48 +249,54 @@ class ValidateCloudConfigFileTest(CiTestCase): A SchemaValidationError is raised when the file doesn't begin with CLOUD_CONFIG_HEADER. """ - write_file(self.config_file, '#junk') + write_file(self.config_file, "#junk") with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, {}) self.assertEqual( - 'Cloud config schema errors: format-l1.c1: File {0} needs to begin' + "Cloud config schema errors: format-l1.c1: File {0} needs to begin" ' with "{1}"'.format( - self.config_file, CLOUD_CONFIG_HEADER.decode()), - str(context_mgr.exception)) + self.config_file, CLOUD_CONFIG_HEADER.decode() + ), + str(context_mgr.exception), + ) def test_validateconfig_file_error_on_non_yaml_scanner_error(self): """On non-yaml scan issues, validate_cloudconfig_file errors.""" # Generate a scanner error by providing text on a single line with # improper indent. - write_file(self.config_file, '#cloud-config\nasdf:\nasdf') + write_file(self.config_file, "#cloud-config\nasdf:\nasdf") with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, {}) self.assertIn( - 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format( - self.config_file), - str(context_mgr.exception)) + "schema errors: format-l3.c1: File {0} is not valid yaml.".format( + self.config_file + ), + str(context_mgr.exception), + ) def test_validateconfig_file_error_on_non_yaml_parser_error(self): """On non-yaml parser issues, validate_cloudconfig_file errors.""" - write_file(self.config_file, '#cloud-config\n{}}') + write_file(self.config_file, "#cloud-config\n{}}") with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, {}) self.assertIn( - 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format( - self.config_file), - str(context_mgr.exception)) + "schema errors: format-l2.c3: File {0} is not valid yaml.".format( + self.config_file + ), + str(context_mgr.exception), + ) @skipUnlessJsonSchema() def test_validateconfig_file_sctrictly_validates_schema(self): """validate_cloudconfig_file raises errors on invalid schema.""" - schema = { - 'properties': {'p1': {'type': 'string', 'format': 'string'}}} - write_file(self.config_file, '#cloud-config\np1: -1') + schema = {"properties": {"p1": {"type": "string", "format": "string"}}} + write_file(self.config_file, "#cloud-config\np1: -1") with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, schema) self.assertEqual( "Cloud config schema errors: p1: -1 is not of type 'string'", - str(context_mgr.exception)) + str(context_mgr.exception), + ) class GetSchemaDocTest(CiTestCase): @@ -321,13 +331,21 @@ class GetSchemaDocTest(CiTestCase): """get_meta_doc returns restructured text for a cloudinit schema.""" full_schema = copy(self.required_schema) full_schema.update( - {'properties': { - 'prop1': {'type': 'array', 'description': 'prop-description', - 'items': {'type': 'integer'}}}}) + { + "properties": { + "prop1": { + "type": "array", + "description": "prop-description", + "items": {"type": "integer"}, + } + } + } + ) doc = get_meta_doc(self.meta, full_schema) self.assertEqual( - dedent(""" + dedent( + """ name ---- **Summary:** title @@ -349,7 +367,8 @@ class GetSchemaDocTest(CiTestCase): [don't, expand, "this"] # --- Example2 --- ex2: true - """), + """ + ), doc, ) @@ -388,12 +407,23 @@ class GetSchemaDocTest(CiTestCase): """get_meta_doc properly indented examples as a list of strings.""" full_schema = copy(self.required_schema) full_schema.update( - {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'], - 'properties': { - 'prop1': {'type': 'array', 'description': 'prop-description', - 'items': {'type': 'integer'}}}}) + { + "examples": [ + 'ex1:\n [don\'t, expand, "this"]', + "ex2: true", + ], + "properties": { + "prop1": { + "type": "array", + "description": "prop-description", + "items": {"type": "integer"}, + } + }, + } + ) self.assertIn( - dedent(""" + dedent( + """ **Config schema**: **prop1:** (array of integer) prop-description @@ -403,7 +433,8 @@ class GetSchemaDocTest(CiTestCase): [don't, expand, "this"] # --- Example2 --- ex2: true - """), + """ + ), get_meta_doc(self.meta, full_schema), ) @@ -424,13 +455,15 @@ class GetSchemaDocTest(CiTestCase): - option3 The default value is - option1""") + option1""" + ), } } } self.assertIn( - dedent(""" + dedent( + """ **Config schema**: **p1:** (string) This item has the following options: @@ -440,7 +473,8 @@ class GetSchemaDocTest(CiTestCase): The default value is option1 - """), + """ + ), get_meta_doc(self.meta, schema), ) @@ -475,7 +509,7 @@ class GetSchemaDocTest(CiTestCase): "type": "string", }, "prop_array": { - "label": 'array_label', + "label": "array_label", "type": "array", "items": { "type": "object", @@ -490,7 +524,7 @@ class GetSchemaDocTest(CiTestCase): "type": "string", "label": "label2", } - } + }, } meta_doc = get_meta_doc(self.meta, schema) assert "**label1:** (string)" in meta_doc @@ -507,20 +541,23 @@ class AnnotatedCloudconfigFileTest(CiTestCase): def test_annotated_cloudconfig_file_no_schema_errors(self): """With no schema_errors, print the original content.""" - content = b'ntp:\n pools: [ntp1.pools.com]\n' + content = b"ntp:\n pools: [ntp1.pools.com]\n" self.assertEqual( - content, - annotated_cloudconfig_file({}, content, schema_errors=[])) + content, annotated_cloudconfig_file({}, content, schema_errors=[]) + ) def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self): """With schema_errors, error lines are annotated and a footer added.""" - content = dedent("""\ + content = dedent( + """\ #cloud-config # comment ntp: pools: [-99, 75] - """).encode() - expected = dedent("""\ + """ + ).encode() + expected = dedent( + """\ #cloud-config # comment ntp: # E1 @@ -531,38 +568,48 @@ class AnnotatedCloudconfigFileTest(CiTestCase): # E2: -99 is not a string # E3: 75 is not a string - """) + """ + ) parsed_config = safe_load(content[13:]) schema_errors = [ - ('ntp', 'Some type error'), ('ntp.pools.0', '-99 is not a string'), - ('ntp.pools.1', '75 is not a string')] + ("ntp", "Some type error"), + ("ntp.pools.0", "-99 is not a string"), + ("ntp.pools.1", "75 is not a string"), + ] self.assertEqual( expected, - annotated_cloudconfig_file(parsed_config, content, schema_errors)) + annotated_cloudconfig_file(parsed_config, content, schema_errors), + ) def test_annotated_cloudconfig_file_annotates_separate_line_items(self): """Errors are annotated for lists with items on separate lines.""" - content = dedent("""\ + content = dedent( + """\ #cloud-config # comment ntp: pools: - -99 - 75 - """).encode() - expected = dedent("""\ + """ + ).encode() + expected = dedent( + """\ ntp: pools: - -99 # E1 - 75 # E2 - """) + """ + ) parsed_config = safe_load(content[13:]) schema_errors = [ - ('ntp.pools.0', '-99 is not a string'), - ('ntp.pools.1', '75 is not a string')] + ("ntp.pools.0", "-99 is not a string"), + ("ntp.pools.1", "75 is not a string"), + ] self.assertIn( expected, - annotated_cloudconfig_file(parsed_config, content, schema_errors)) + annotated_cloudconfig_file(parsed_config, content, schema_errors), + ) class TestMain: @@ -575,94 +622,94 @@ class TestMain: def test_main_exclusive_args(self, params, capsys): """Main exits non-zero and error on required exclusive args.""" params = list(itertools.chain(*[a.split() for a in params])) - with mock.patch('sys.argv', ['mycmd'] + params): + with mock.patch("sys.argv", ["mycmd"] + params): with pytest.raises(SystemExit) as context_manager: main() assert 1 == context_manager.value.code _out, err = capsys.readouterr() expected = ( - 'Error:\n' - 'Expected one of --config-file, --system or --docs arguments\n' + "Error:\n" + "Expected one of --config-file, --system or --docs arguments\n" ) assert expected == err def test_main_missing_args(self, capsys): """Main exits non-zero and reports an error on missing parameters.""" - with mock.patch('sys.argv', ['mycmd']): + with mock.patch("sys.argv", ["mycmd"]): with pytest.raises(SystemExit) as context_manager: main() assert 1 == context_manager.value.code _out, err = capsys.readouterr() expected = ( - 'Error:\n' - 'Expected one of --config-file, --system or --docs arguments\n' + "Error:\n" + "Expected one of --config-file, --system or --docs arguments\n" ) assert expected == err def test_main_absent_config_file(self, capsys): """Main exits non-zero when config file is absent.""" - myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE'] - with mock.patch('sys.argv', myargs): + myargs = ["mycmd", "--annotate", "--config-file", "NOT_A_FILE"] + with mock.patch("sys.argv", myargs): with pytest.raises(SystemExit) as context_manager: main() assert 1 == context_manager.value.code _out, err = capsys.readouterr() - assert 'Error:\nConfigfile NOT_A_FILE does not exist\n' == err + assert "Error:\nConfigfile NOT_A_FILE does not exist\n" == err def test_main_prints_docs(self, capsys): """When --docs parameter is provided, main generates documentation.""" - myargs = ['mycmd', '--docs', 'all'] - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' + myargs = ["mycmd", "--docs", "all"] + with mock.patch("sys.argv", myargs): + assert 0 == main(), "Expected 0 exit code" out, _err = capsys.readouterr() - assert '\nNTP\n---\n' in out - assert '\nRuncmd\n------\n' in out + assert "\nNTP\n---\n" in out + assert "\nRuncmd\n------\n" in out def test_main_validates_config_file(self, tmpdir, capsys): """When --config-file parameter is provided, main validates schema.""" - myyaml = tmpdir.join('my.yaml') - myargs = ['mycmd', '--config-file', myyaml.strpath] - myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' + myyaml = tmpdir.join("my.yaml") + myargs = ["mycmd", "--config-file", myyaml.strpath] + myyaml.write(b"#cloud-config\nntp:") # shortest ntp schema + with mock.patch("sys.argv", myargs): + assert 0 == main(), "Expected 0 exit code" out, _err = capsys.readouterr() - assert 'Valid cloud-config: {0}\n'.format(myyaml) == out + assert "Valid cloud-config: {0}\n".format(myyaml) == out - @mock.patch('cloudinit.config.schema.read_cfg_paths') - @mock.patch('cloudinit.config.schema.os.getuid', return_value=0) + @mock.patch("cloudinit.config.schema.read_cfg_paths") + @mock.patch("cloudinit.config.schema.os.getuid", return_value=0) def test_main_validates_system_userdata( self, m_getuid, m_read_cfg_paths, capsys, paths ): """When --system is provided, main validates system userdata.""" m_read_cfg_paths.return_value = paths ud_file = paths.get_ipath_cur("userdata_raw") - write_file(ud_file, b'#cloud-config\nntp:') - myargs = ['mycmd', '--system'] - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' + write_file(ud_file, b"#cloud-config\nntp:") + myargs = ["mycmd", "--system"] + with mock.patch("sys.argv", myargs): + assert 0 == main(), "Expected 0 exit code" out, _err = capsys.readouterr() - assert 'Valid cloud-config: system userdata\n' == out + assert "Valid cloud-config: system userdata\n" == out - @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000) + @mock.patch("cloudinit.config.schema.os.getuid", return_value=1000) def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths): """Non-root user can't use --system param""" - myargs = ['mycmd', '--system'] - with mock.patch('sys.argv', myargs): + myargs = ["mycmd", "--system"] + with mock.patch("sys.argv", myargs): with pytest.raises(SystemExit) as context_manager: main() assert 1 == context_manager.value.code _out, err = capsys.readouterr() expected = ( - 'Error:\nUnable to read system userdata as non-root user. ' - 'Try using sudo\n' + "Error:\nUnable to read system userdata as non-root user. " + "Try using sudo\n" ) assert expected == err def _get_meta_doc_examples(): - examples_dir = Path(cloud_init_project_dir('doc/examples')) + examples_dir = Path(cloud_init_project_dir("doc/examples")) assert examples_dir.is_dir() return ( @@ -712,7 +759,7 @@ class TestStrictMetaschema: } with pytest.raises( SchemaValidationError, - match=(r"Additional properties are not allowed.*") + match=r"Additional properties are not allowed.*", ): validate_cloudconfig_metaschema(validator, schema) diff --git a/tests/unittests/distros/__init__.py b/tests/unittests/distros/__init__.py index 5394aa56..e66b9446 100644 --- a/tests/unittests/distros/__init__.py +++ b/tests/unittests/distros/__init__.py @@ -1,9 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy -from cloudinit import distros -from cloudinit import helpers -from cloudinit import settings +from cloudinit import distros, helpers, settings def _get_distro(dtype, system_info=None): @@ -14,8 +12,8 @@ def _get_distro(dtype, system_info=None): example: _get_distro("debian") """ if system_info is None: - system_info = copy.deepcopy(settings.CFG_BUILTIN['system_info']) - system_info['distro'] = dtype - paths = helpers.Paths(system_info['paths']) + system_info = copy.deepcopy(settings.CFG_BUILTIN["system_info"]) + system_info["distro"] = dtype + paths = helpers.Paths(system_info["paths"]) distro_cls = distros.fetch(dtype) return distro_cls(dtype, system_info, paths) diff --git a/tests/unittests/distros/test_arch.py b/tests/unittests/distros/test_arch.py index 590ba00e..5446295e 100644 --- a/tests/unittests/distros/test_arch.py +++ b/tests/unittests/distros/test_arch.py @@ -1,15 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.distros.arch import _render_network from cloudinit import util - -from tests.unittests.helpers import (CiTestCase, dir2dict) +from cloudinit.distros.arch import _render_network +from tests.unittests.helpers import CiTestCase, dir2dict from . import _get_distro class TestArch(CiTestCase): - def test_get_distro(self): distro = _get_distro("arch") hostname = "myhostname" @@ -23,23 +21,35 @@ class TestRenderNetwork(CiTestCase): """Just the most basic static config. note 'lo' should not be rendered as an interface.""" - entries = {'eth0': {'auto': True, - 'dns-nameservers': ['8.8.8.8'], - 'bootproto': 'static', - 'address': '10.0.0.2', - 'gateway': '10.0.0.1', - 'netmask': '255.255.255.0'}, - 'lo': {'auto': True}} + entries = { + "eth0": { + "auto": True, + "dns-nameservers": ["8.8.8.8"], + "bootproto": "static", + "address": "10.0.0.2", + "gateway": "10.0.0.1", + "netmask": "255.255.255.0", + }, + "lo": {"auto": True}, + } target = self.tmp_dir() devs = _render_network(entries, target=target) files = dir2dict(target, prefix=target) - self.assertEqual(['eth0'], devs) + self.assertEqual(["eth0"], devs) self.assertEqual( - {'/etc/netctl/eth0': '\n'.join([ - "Address=10.0.0.2/255.255.255.0", - "Connection=ethernet", - "DNS=('8.8.8.8')", - "Gateway=10.0.0.1", - "IP=static", - "Interface=eth0", ""]), - '/etc/resolv.conf': 'nameserver 8.8.8.8\n'}, files) + { + "/etc/netctl/eth0": "\n".join( + [ + "Address=10.0.0.2/255.255.255.0", + "Connection=ethernet", + "DNS=('8.8.8.8')", + "Gateway=10.0.0.1", + "IP=static", + "Interface=eth0", + "", + ] + ), + "/etc/resolv.conf": "nameserver 8.8.8.8\n", + }, + files, + ) diff --git a/tests/unittests/distros/test_bsd_utils.py b/tests/unittests/distros/test_bsd_utils.py index 55686dc9..d6f0aeed 100644 --- a/tests/unittests/distros/test_bsd_utils.py +++ b/tests/unittests/distros/test_bsd_utils.py @@ -1,8 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import cloudinit.distros.bsd_utils as bsd_utils - -from tests.unittests.helpers import (CiTestCase, ExitStack, mock) +from tests.unittests.helpers import CiTestCase, ExitStack, mock RC_FILE = """ if something; then @@ -13,55 +12,55 @@ hostname={hostname} class TestBsdUtils(CiTestCase): - def setUp(self): super().setUp() patches = ExitStack() self.addCleanup(patches.close) self.load_file = patches.enter_context( - mock.patch.object(bsd_utils.util, 'load_file')) + mock.patch.object(bsd_utils.util, "load_file") + ) self.write_file = patches.enter_context( - mock.patch.object(bsd_utils.util, 'write_file')) + mock.patch.object(bsd_utils.util, "write_file") + ) def test_get_rc_config_value(self): - self.load_file.return_value = 'hostname=foo\n' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - self.load_file.assert_called_with('/etc/rc.conf') + self.load_file.return_value = "hostname=foo\n" + self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo") + self.load_file.assert_called_with("/etc/rc.conf") - self.load_file.return_value = 'hostname=foo' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') + self.load_file.return_value = "hostname=foo" + self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo") self.load_file.return_value = 'hostname="foo"' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') + self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo") self.load_file.return_value = "hostname='foo'" - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') + self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo") - self.load_file.return_value = 'hostname=\'foo"' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"") + self.load_file.return_value = "hostname='foo\"" + self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "'foo\"") - self.load_file.return_value = '' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None) + self.load_file.return_value = "" + self.assertEqual(bsd_utils.get_rc_config_value("hostname"), None) - self.load_file.return_value = RC_FILE.format(hostname='foo') - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo") + self.load_file.return_value = RC_FILE.format(hostname="foo") + self.assertEqual(bsd_utils.get_rc_config_value("hostname"), "foo") def test_set_rc_config_value_unchanged(self): # bsd_utils.set_rc_config_value('hostname', 'foo') # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') - self.load_file.return_value = RC_FILE.format(hostname='foo') + self.load_file.return_value = RC_FILE.format(hostname="foo") self.write_file.assert_not_called() def test_set_rc_config_value(self): - bsd_utils.set_rc_config_value('hostname', 'foo') - self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') + bsd_utils.set_rc_config_value("hostname", "foo") + self.write_file.assert_called_with("/etc/rc.conf", "hostname=foo\n") - self.load_file.return_value = RC_FILE.format(hostname='foo') - bsd_utils.set_rc_config_value('hostname', 'bar') + self.load_file.return_value = RC_FILE.format(hostname="foo") + bsd_utils.set_rc_config_value("hostname", "bar") self.write_file.assert_called_with( - '/etc/rc.conf', - RC_FILE.format(hostname='bar') + "/etc/rc.conf", RC_FILE.format(hostname="bar") ) diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py index 5baa8a4b..ddb039bd 100644 --- a/tests/unittests/distros/test_create_users.py +++ b/tests/unittests/distros/test_create_users.py @@ -2,9 +2,8 @@ import re -from cloudinit import distros -from cloudinit import ssh_util -from tests.unittests.helpers import (CiTestCase, mock) +from cloudinit import distros, ssh_util +from tests.unittests.helpers import CiTestCase, mock from tests.unittests.util import abstract_to_concrete @@ -17,220 +16,267 @@ class TestCreateUser(CiTestCase): def setUp(self): super(TestCreateUser, self).setUp() self.dist = abstract_to_concrete(distros.Distro)( - name='test', cfg=None, paths=None + name="test", cfg=None, paths=None ) def _useradd2call(self, args): # return a mock call for the useradd command in args # with expected 'logstring'. - args = ['useradd'] + args + args = ["useradd"] + args logcmd = [a for a in args] for i in range(len(args)): - if args[i] in ('--password',): - logcmd[i + 1] = 'REDACTED' + if args[i] in ("--password",): + logcmd[i + 1] = "REDACTED" return mock.call(args, logstring=logcmd) def test_basic(self, m_subp, m_is_snappy): - user = 'foouser' + user = "foouser" self.dist.create_user(user) self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) + [ + self._useradd2call([user, "-m"]), + mock.call(["passwd", "-l", user]), + ], + ) def test_no_home(self, m_subp, m_is_snappy): - user = 'foouser' + user = "foouser" self.dist.create_user(user, no_create_home=True) self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '-M']), - mock.call(['passwd', '-l', user])]) + [ + self._useradd2call([user, "-M"]), + mock.call(["passwd", "-l", user]), + ], + ) def test_system_user(self, m_subp, m_is_snappy): # system user should have no home and get --system - user = 'foouser' + user = "foouser" self.dist.create_user(user, system=True) self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '--system', '-M']), - mock.call(['passwd', '-l', user])]) + [ + self._useradd2call([user, "--system", "-M"]), + mock.call(["passwd", "-l", user]), + ], + ) def test_explicit_no_home_false(self, m_subp, m_is_snappy): - user = 'foouser' + user = "foouser" self.dist.create_user(user, no_create_home=False) self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) + [ + self._useradd2call([user, "-m"]), + mock.call(["passwd", "-l", user]), + ], + ) def test_unlocked(self, m_subp, m_is_snappy): - user = 'foouser' + user = "foouser" self.dist.create_user(user, lock_passwd=False) self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m'])]) + m_subp.call_args_list, [self._useradd2call([user, "-m"])] + ) def test_set_password(self, m_subp, m_is_snappy): - user = 'foouser' - password = 'passfoo' + user = "foouser" + password = "passfoo" self.dist.create_user(user, passwd=password) self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '--password', password, '-m']), - mock.call(['passwd', '-l', user])]) + [ + self._useradd2call([user, "--password", password, "-m"]), + mock.call(["passwd", "-l", user]), + ], + ) @mock.patch("cloudinit.distros.util.is_group") def test_group_added(self, m_is_group, m_subp, m_is_snappy): m_is_group.return_value = False - user = 'foouser' - self.dist.create_user(user, groups=['group1']) + user = "foouser" + self.dist.create_user(user, groups=["group1"]) expected = [ - mock.call(['groupadd', 'group1']), - self._useradd2call([user, '--groups', 'group1', '-m']), - mock.call(['passwd', '-l', user])] + mock.call(["groupadd", "group1"]), + self._useradd2call([user, "--groups", "group1", "-m"]), + mock.call(["passwd", "-l", user]), + ] self.assertEqual(m_subp.call_args_list, expected) @mock.patch("cloudinit.distros.util.is_group") def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy): - ex_groups = ['existing_group'] - groups = ['group1', ex_groups[0]] + ex_groups = ["existing_group"] + groups = ["group1", ex_groups[0]] m_is_group.side_effect = lambda m: m in ex_groups - user = 'foouser' + user = "foouser" self.dist.create_user(user, groups=groups) expected = [ - mock.call(['groupadd', 'group1']), - self._useradd2call([user, '--groups', ','.join(groups), '-m']), - mock.call(['passwd', '-l', user])] + mock.call(["groupadd", "group1"]), + self._useradd2call([user, "--groups", ",".join(groups), "-m"]), + mock.call(["passwd", "-l", user]), + ] self.assertEqual(m_subp.call_args_list, expected) @mock.patch("cloudinit.distros.util.is_group") def test_create_groups_with_whitespace_string( - self, m_is_group, m_subp, m_is_snappy): + self, m_is_group, m_subp, m_is_snappy + ): # groups supported as a comma delimeted string even with white space m_is_group.return_value = False - user = 'foouser' - self.dist.create_user(user, groups='group1, group2') + user = "foouser" + self.dist.create_user(user, groups="group1, group2") expected = [ - mock.call(['groupadd', 'group1']), - mock.call(['groupadd', 'group2']), - self._useradd2call([user, '--groups', 'group1,group2', '-m']), - mock.call(['passwd', '-l', user])] + mock.call(["groupadd", "group1"]), + mock.call(["groupadd", "group2"]), + self._useradd2call([user, "--groups", "group1,group2", "-m"]), + mock.call(["passwd", "-l", user]), + ] self.assertEqual(m_subp.call_args_list, expected) def test_explicit_sudo_false(self, m_subp, m_is_snappy): - user = 'foouser' + user = "foouser" self.dist.create_user(user, sudo=False) self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) + [ + self._useradd2call([user, "-m"]), + mock.call(["passwd", "-l", user]), + ], + ) - @mock.patch('cloudinit.ssh_util.setup_user_keys') + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_setup_ssh_authorized_keys_with_string( - self, m_setup_user_keys, m_subp, m_is_snappy): + self, m_setup_user_keys, m_subp, m_is_snappy + ): """ssh_authorized_keys allows string and calls setup_user_keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys='mykey') + user = "foouser" + self.dist.create_user(user, ssh_authorized_keys="mykey") self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - m_setup_user_keys.assert_called_once_with(set(['mykey']), user) + [ + self._useradd2call([user, "-m"]), + mock.call(["passwd", "-l", user]), + ], + ) + m_setup_user_keys.assert_called_once_with(set(["mykey"]), user) - @mock.patch('cloudinit.ssh_util.setup_user_keys') + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_setup_ssh_authorized_keys_with_list( - self, m_setup_user_keys, m_subp, m_is_snappy): + self, m_setup_user_keys, m_subp, m_is_snappy + ): """ssh_authorized_keys allows lists and calls setup_user_keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2']) + user = "foouser" + self.dist.create_user(user, ssh_authorized_keys=["key1", "key2"]) self.assertEqual( m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user) + [ + self._useradd2call([user, "-m"]), + mock.call(["passwd", "-l", user]), + ], + ) + m_setup_user_keys.assert_called_once_with(set(["key1", "key2"]), user) - @mock.patch('cloudinit.ssh_util.setup_user_keys') + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_setup_ssh_authorized_keys_with_integer( - self, m_setup_user_keys, m_subp, m_is_snappy): + self, m_setup_user_keys, m_subp, m_is_snappy + ): """ssh_authorized_keys warns on non-iterable/string type.""" - user = 'foouser' + user = "foouser" self.dist.create_user(user, ssh_authorized_keys=-1) m_setup_user_keys.assert_called_once_with(set([]), user) match = re.match( - r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for' - ' \'ssh_authorized_keys\'.*', + r".*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for" + " 'ssh_authorized_keys'.*", self.logs.getvalue(), - re.DOTALL) + re.DOTALL, + ) self.assertIsNotNone( - match, 'Missing ssh_authorized_keys invalid type warning') + match, "Missing ssh_authorized_keys invalid type warning" + ) - @mock.patch('cloudinit.ssh_util.setup_user_keys') + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_create_user_with_ssh_redirect_user_no_cloud_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): + self, m_setup_user_keys, m_subp, m_is_snappy + ): """Log a warning when trying to redirect a user no cloud ssh keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_redirect_user='someuser') + user = "foouser" + self.dist.create_user(user, ssh_redirect_user="someuser") self.assertIn( - 'WARNING: Unable to disable SSH logins for foouser given ' - 'ssh_redirect_user: someuser. No cloud public-keys present.\n', - self.logs.getvalue()) + "WARNING: Unable to disable SSH logins for foouser given " + "ssh_redirect_user: someuser. No cloud public-keys present.\n", + self.logs.getvalue(), + ) m_setup_user_keys.assert_not_called() - @mock.patch('cloudinit.ssh_util.setup_user_keys') + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_create_user_with_ssh_redirect_user_with_cloud_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): + self, m_setup_user_keys, m_subp, m_is_snappy + ): """Disable ssh when ssh_redirect_user and cloud ssh keys are set.""" - user = 'foouser' + user = "foouser" self.dist.create_user( - user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1']) + user, ssh_redirect_user="someuser", cloud_public_ssh_keys=["key1"] + ) disable_prefix = ssh_util.DISABLE_USER_OPTS - disable_prefix = disable_prefix.replace('$USER', 'someuser') - disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + disable_prefix = disable_prefix.replace("$USER", "someuser") + disable_prefix = disable_prefix.replace("$DISABLE_USER", user) m_setup_user_keys.assert_called_once_with( - set(['key1']), 'foouser', options=disable_prefix) + set(["key1"]), "foouser", options=disable_prefix + ) - @mock.patch('cloudinit.ssh_util.setup_user_keys') + @mock.patch("cloudinit.ssh_util.setup_user_keys") def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): + self, m_setup_user_keys, m_subp, m_is_snappy + ): """Do not disable ssh_authorized_keys when ssh_redirect_user is set.""" - user = 'foouser' + user = "foouser" self.dist.create_user( - user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser', - cloud_public_ssh_keys=['key1']) + user, + ssh_authorized_keys="auth1", + ssh_redirect_user="someuser", + cloud_public_ssh_keys=["key1"], + ) disable_prefix = ssh_util.DISABLE_USER_OPTS - disable_prefix = disable_prefix.replace('$USER', 'someuser') - disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + disable_prefix = disable_prefix.replace("$USER", "someuser") + disable_prefix = disable_prefix.replace("$DISABLE_USER", user) self.assertEqual( m_setup_user_keys.call_args_list, - [mock.call(set(['auth1']), user), # not disabled - mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + [ + mock.call(set(["auth1"]), user), # not disabled + mock.call(set(["key1"]), "foouser", options=disable_prefix), + ], + ) @mock.patch("cloudinit.distros.subp.which") - def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, - m_is_snappy): + def test_lock_with_usermod_if_no_passwd( + self, m_which, m_subp, m_is_snappy + ): """Lock uses usermod --lock if no 'passwd' cmd available.""" - m_which.side_effect = lambda m: m in ('usermod',) + m_which.side_effect = lambda m: m in ("usermod",) self.dist.lock_passwd("bob") self.assertEqual( - [mock.call(['usermod', '--lock', 'bob'])], - m_subp.call_args_list) + [mock.call(["usermod", "--lock", "bob"])], m_subp.call_args_list + ) @mock.patch("cloudinit.distros.subp.which") - def test_lock_with_passwd_if_available(self, m_which, m_subp, - m_is_snappy): + def test_lock_with_passwd_if_available(self, m_which, m_subp, m_is_snappy): """Lock with only passwd will use passwd.""" - m_which.side_effect = lambda m: m in ('passwd',) + m_which.side_effect = lambda m: m in ("passwd",) self.dist.lock_passwd("bob") self.assertEqual( - [mock.call(['passwd', '-l', 'bob'])], - m_subp.call_args_list) + [mock.call(["passwd", "-l", "bob"])], m_subp.call_args_list + ) @mock.patch("cloudinit.distros.subp.which") - def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, - m_is_snappy): + def test_lock_raises_runtime_if_no_commands( + self, m_which, m_subp, m_is_snappy + ): """Lock with no commands available raises RuntimeError.""" m_which.return_value = None with self.assertRaises(RuntimeError): self.dist.lock_passwd("bob") + # vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_debian.py b/tests/unittests/distros/test_debian.py index 3d0db145..c7c5932e 100644 --- a/tests/unittests/distros/test_debian.py +++ b/tests/unittests/distros/test_debian.py @@ -4,92 +4,117 @@ from unittest import mock import pytest -from cloudinit import distros, util -from cloudinit.distros.debian import ( - APT_GET_COMMAND, - APT_GET_WRAPPER, -) +from cloudinit import distros, subp, util +from cloudinit.distros.debian import APT_GET_COMMAND, APT_GET_WRAPPER from tests.unittests.helpers import FilesystemMockingTestCase -from cloudinit import subp @mock.patch("cloudinit.distros.debian.subp.subp") class TestDebianApplyLocale(FilesystemMockingTestCase): - def setUp(self): super(TestDebianApplyLocale, self).setUp() self.new_root = self.tmp_dir() self.patchOS(self.new_root) self.patchUtils(self.new_root) - self.spath = self.tmp_path('etc/default/locale', self.new_root) + self.spath = self.tmp_path("etc/default/locale", self.new_root) cls = distros.fetch("debian") self.distro = cls("debian", {}, None) def test_no_rerun(self, m_subp): """If system has defined locale, no re-run is expected.""" m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=%s\n' % locale, omode="w") + locale = "en_US.UTF-8" + util.write_file(self.spath, "LANG=%s\n" % locale, omode="w") self.distro.apply_locale(locale, out_fn=self.spath) m_subp.assert_not_called() def test_no_regen_on_c_utf8(self, m_subp): """If locale is set to C.UTF8, do not attempt to call locale-gen""" m_subp.return_value = (None, None) - locale = 'C.UTF-8' - util.write_file(self.spath, 'LANG=%s\n' % 'en_US.UTF-8', omode="w") + locale = "C.UTF-8" + util.write_file(self.spath, "LANG=%s\n" % "en_US.UTF-8", omode="w") self.distro.apply_locale(locale, out_fn=self.spath) self.assertEqual( - [['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) + [ + [ + "update-locale", + "--locale-file=" + self.spath, + "LANG=%s" % locale, + ] + ], + [p[0][0] for p in m_subp.call_args_list], + ) def test_rerun_if_different(self, m_subp): """If system has different locale, locale-gen should be called.""" m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=fr_FR.UTF-8', omode="w") + locale = "en_US.UTF-8" + util.write_file(self.spath, "LANG=fr_FR.UTF-8", omode="w") self.distro.apply_locale(locale, out_fn=self.spath) self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) + [ + ["locale-gen", locale], + [ + "update-locale", + "--locale-file=" + self.spath, + "LANG=%s" % locale, + ], + ], + [p[0][0] for p in m_subp.call_args_list], + ) def test_rerun_if_no_file(self, m_subp): """If system has no locale file, locale-gen should be called.""" m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' + locale = "en_US.UTF-8" self.distro.apply_locale(locale, out_fn=self.spath) self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) + [ + ["locale-gen", locale], + [ + "update-locale", + "--locale-file=" + self.spath, + "LANG=%s" % locale, + ], + ], + [p[0][0] for p in m_subp.call_args_list], + ) def test_rerun_on_unset_system_locale(self, m_subp): """If system has unset locale, locale-gen should be called.""" m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=', omode="w") + locale = "en_US.UTF-8" + util.write_file(self.spath, "LANG=", omode="w") self.distro.apply_locale(locale, out_fn=self.spath) self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) + [ + ["locale-gen", locale], + [ + "update-locale", + "--locale-file=" + self.spath, + "LANG=%s" % locale, + ], + ], + [p[0][0] for p in m_subp.call_args_list], + ) def test_rerun_on_mismatched_keys(self, m_subp): """If key is LC_ALL and system has only LANG, rerun is expected.""" m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath, keyname='LC_ALL') + locale = "en_US.UTF-8" + util.write_file(self.spath, "LANG=", omode="w") + self.distro.apply_locale(locale, out_fn=self.spath, keyname="LC_ALL") self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LC_ALL=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) + [ + ["locale-gen", locale], + [ + "update-locale", + "--locale-file=" + self.spath, + "LC_ALL=%s" % locale, + ], + ], + [p[0][0] for p in m_subp.call_args_list], + ) def test_falseish_locale_raises_valueerror(self, m_subp): """locale as None or "" is invalid and should raise ValueError.""" @@ -99,45 +124,53 @@ class TestDebianApplyLocale(FilesystemMockingTestCase): m_subp.assert_not_called() self.assertEqual( - 'Failed to provide locale value.', str(ctext_m.exception)) + "Failed to provide locale value.", str(ctext_m.exception) + ) with self.assertRaises(ValueError) as ctext_m: self.distro.apply_locale("") m_subp.assert_not_called() self.assertEqual( - 'Failed to provide locale value.', str(ctext_m.exception)) + "Failed to provide locale value.", str(ctext_m.exception) + ) -@mock.patch.dict('os.environ', {}, clear=True) +@mock.patch.dict("os.environ", {}, clear=True) @mock.patch("cloudinit.distros.debian.subp.which", return_value=True) @mock.patch("cloudinit.distros.debian.subp.subp") class TestPackageCommand: distro = distros.fetch("debian")("debian", {}, None) - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - return_value=True) + @mock.patch( + "cloudinit.distros.debian.Distro._apt_lock_available", + return_value=True, + ) def test_simple_command(self, m_apt_avail, m_subp, m_which): - self.distro.package_command('update') - apt_args = [APT_GET_WRAPPER['command']] + self.distro.package_command("update") + apt_args = [APT_GET_WRAPPER["command"]] apt_args.extend(APT_GET_COMMAND) - apt_args.append('update') + apt_args.append("update") expected_call = { - 'args': apt_args, - 'capture': False, - 'env': {'DEBIAN_FRONTEND': 'noninteractive'}, + "args": apt_args, + "capture": False, + "env": {"DEBIAN_FRONTEND": "noninteractive"}, } assert m_subp.call_args == mock.call(**expected_call) - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=[False, False, True]) + @mock.patch( + "cloudinit.distros.debian.Distro._apt_lock_available", + side_effect=[False, False, True], + ) @mock.patch("cloudinit.distros.debian.time.sleep") def test_wait_for_lock(self, m_sleep, m_apt_avail, m_subp, m_which): self.distro._wait_for_apt_command("stub", {"args": "stub2"}) assert m_sleep.call_args_list == [mock.call(1), mock.call(1)] - assert m_subp.call_args_list == [mock.call(args='stub2')] + assert m_subp.call_args_list == [mock.call(args="stub2")] - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - return_value=False) + @mock.patch( + "cloudinit.distros.debian.Distro._apt_lock_available", + return_value=False, + ) @mock.patch("cloudinit.distros.debian.time.sleep") @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) def test_lock_wait_timeout( @@ -147,8 +180,10 @@ class TestPackageCommand: self.distro._wait_for_apt_command("stub", "stub2", timeout=5) assert m_subp.call_args_list == [] - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=cycle([True, False])) + @mock.patch( + "cloudinit.distros.debian.Distro._apt_lock_available", + side_effect=cycle([True, False]), + ) @mock.patch("cloudinit.distros.debian.time.sleep") def test_lock_exception_wait(self, m_sleep, m_apt_avail, m_subp, m_which): exception = subp.ProcessExecutionError( @@ -158,8 +193,10 @@ class TestPackageCommand: ret = self.distro._wait_for_apt_command("stub", {"args": "stub2"}) assert ret == "return_thing" - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=cycle([True, False])) + @mock.patch( + "cloudinit.distros.debian.Distro._apt_lock_available", + side_effect=cycle([True, False]), + ) @mock.patch("cloudinit.distros.debian.time.sleep") @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) def test_lock_exception_timeout( diff --git a/tests/unittests/distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py index 0279e86f..22be5098 100644 --- a/tests/unittests/distros/test_freebsd.py +++ b/tests/unittests/distros/test_freebsd.py @@ -1,45 +1,43 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) -from tests.unittests.helpers import (CiTestCase, mock) - import os +from cloudinit.util import find_freebsd_part, get_path_dev_freebsd +from tests.unittests.helpers import CiTestCase, mock -class TestDeviceLookUp(CiTestCase): - @mock.patch('cloudinit.subp.subp') +class TestDeviceLookUp(CiTestCase): + @mock.patch("cloudinit.subp.subp") def test_find_freebsd_part_label(self, mock_subp): - glabel_out = ''' + glabel_out = """ gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 label/rootfs N/A da0p2 label/swap N/A da0p3 -''' +""" mock_subp.return_value = (glabel_out, "") res = find_freebsd_part("/dev/label/rootfs") self.assertEqual("da0p2", res) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_find_freebsd_part_gpt(self, mock_subp): - glabel_out = ''' + glabel_out = """ gpt/bootfs N/A vtbd0p1 gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 gpt/swapfs N/A vtbd0p2 gpt/rootfs N/A vtbd0p3 iso9660/cidata N/A vtbd2 -''' +""" mock_subp.return_value = (glabel_out, "") res = find_freebsd_part("/dev/gpt/rootfs") self.assertEqual("vtbd0p3", res) def test_get_path_dev_freebsd_label(self): - mnt_list = ''' + mnt_list = """ /dev/label/rootfs / ufs rw 1 1 devfs /dev devfs rw,multilabel 0 0 fdescfs /dev/fd fdescfs rw 0 0 /dev/da1s1 /mnt/resource ufs rw 2 2 -''' - with mock.patch.object(os.path, 'exists', - return_value=True): - res = get_path_dev_freebsd('/etc', mnt_list) +""" + with mock.patch.object(os.path, "exists", return_value=True): + res = get_path_dev_freebsd("/etc", mnt_list) self.assertIsNotNone(res) diff --git a/tests/unittests/distros/test_generic.py b/tests/unittests/distros/test_generic.py index e542c26f..93c5395c 100644 --- a/tests/unittests/distros/test_generic.py +++ b/tests/unittests/distros/test_generic.py @@ -1,35 +1,49 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import distros -from cloudinit import util - -from tests.unittests import helpers - import os -import pytest import shutil import tempfile from unittest import mock +import pytest + +from cloudinit import distros, util +from tests.unittests import helpers + unknown_arch_info = { - 'arches': ['default'], - 'failsafe': {'primary': 'http://fs-primary-default', - 'security': 'http://fs-security-default'} + "arches": ["default"], + "failsafe": { + "primary": "http://fs-primary-default", + "security": "http://fs-security-default", + }, } package_mirrors = [ - {'arches': ['i386', 'amd64'], - 'failsafe': {'primary': 'http://fs-primary-intel', - 'security': 'http://fs-security-intel'}, - 'search': { - 'primary': ['http://%(ec2_region)s.ec2/', - 'http://%(availability_zone)s.clouds/'], - 'security': ['http://security-mirror1-intel', - 'http://security-mirror2-intel']}}, - {'arches': ['armhf', 'armel'], - 'failsafe': {'primary': 'http://fs-primary-arm', - 'security': 'http://fs-security-arm'}}, - unknown_arch_info + { + "arches": ["i386", "amd64"], + "failsafe": { + "primary": "http://fs-primary-intel", + "security": "http://fs-security-intel", + }, + "search": { + "primary": [ + "http://%(ec2_region)s.ec2/", + "http://%(availability_zone)s.clouds/", + ], + "security": [ + "http://security-mirror1-intel", + "http://security-mirror2-intel", + ], + }, + }, + { + "arches": ["armhf", "armel"], + "failsafe": { + "primary": "http://fs-primary-arm", + "security": "http://fs-security-arm", + }, + }, + unknown_arch_info, ] gpmi = distros._get_package_mirror_info @@ -37,7 +51,6 @@ gapmi = distros._get_arch_package_mirror_info class TestGenericDistro(helpers.FilesystemMockingTestCase): - def setUp(self): super(TestGenericDistro, self).setUp() # Make a temp directoy for tests to use. @@ -48,7 +61,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): cls = distros.fetch("ubuntu") d = cls("ubuntu", {}, None) os.makedirs(os.path.join(self.tmp, "etc")) - os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d')) + os.makedirs(os.path.join(self.tmp, "etc", "sudoers.d")) self.patchOS(self.tmp) self.patchUtils(self.tmp) d.write_sudo_rules("harlowja", rules) @@ -65,34 +78,34 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): return found_amount def test_sudoers_ensure_rules(self): - rules = 'ALL=(ALL:ALL) ALL' - contents = self._write_load_sudoers('harlowja', rules) - expected = ['harlowja ALL=(ALL:ALL) ALL'] + rules = "ALL=(ALL:ALL) ALL" + contents = self._write_load_sudoers("harlowja", rules) + expected = ["harlowja ALL=(ALL:ALL) ALL"] self.assertEqual(len(expected), self._count_in(expected, contents)) not_expected = [ - 'harlowja A', - 'harlowja L', - 'harlowja L', + "harlowja A", + "harlowja L", + "harlowja L", ] self.assertEqual(0, self._count_in(not_expected, contents)) def test_sudoers_ensure_rules_list(self): rules = [ - 'ALL=(ALL:ALL) ALL', - 'B-ALL=(ALL:ALL) ALL', - 'C-ALL=(ALL:ALL) ALL', + "ALL=(ALL:ALL) ALL", + "B-ALL=(ALL:ALL) ALL", + "C-ALL=(ALL:ALL) ALL", ] - contents = self._write_load_sudoers('harlowja', rules) + contents = self._write_load_sudoers("harlowja", rules) expected = [ - 'harlowja ALL=(ALL:ALL) ALL', - 'harlowja B-ALL=(ALL:ALL) ALL', - 'harlowja C-ALL=(ALL:ALL) ALL', + "harlowja ALL=(ALL:ALL) ALL", + "harlowja B-ALL=(ALL:ALL) ALL", + "harlowja C-ALL=(ALL:ALL) ALL", ] self.assertEqual(len(expected), self._count_in(expected, contents)) not_expected = [ - 'harlowja A', - 'harlowja L', - 'harlowja L', + "harlowja A", + "harlowja L", + "harlowja L", ] self.assertEqual(0, self._count_in(not_expected, contents)) @@ -124,7 +137,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): d = cls("ubuntu", {}, None) self.patchOS(self.tmp) self.patchUtils(self.tmp) - for char in ['#', '@']: + for char in ["#", "@"]: util.write_file("/etc/sudoers", "{}includedir /b".format(char)) d.ensure_sudo_dir("/b") contents = util.load_file("/etc/sudoers") @@ -146,7 +159,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): d = cls("ubuntu", {}, None) self.patchOS(self.tmp) self.patchUtils(self.tmp) - os.makedirs('/run/systemd/system') + os.makedirs("/run/systemd/system") self.assertTrue(d.uses_systemd()) def test_systemd_not_in_use(self): @@ -161,18 +174,18 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): d = cls("ubuntu", {}, None) self.patchOS(self.tmp) self.patchUtils(self.tmp) - os.makedirs('/run/systemd') - os.symlink('/', '/run/systemd/system') + os.makedirs("/run/systemd") + os.symlink("/", "/run/systemd/system") self.assertFalse(d.uses_systemd()) - @mock.patch('cloudinit.distros.debian.read_system_locale') + @mock.patch("cloudinit.distros.debian.read_system_locale") def test_get_locale_ubuntu(self, m_locale): """Test ubuntu distro returns locale set to C.UTF-8""" - m_locale.return_value = 'C.UTF-8' + m_locale.return_value = "C.UTF-8" cls = distros.fetch("ubuntu") d = cls("ubuntu", {}, None) locale = d.get_locale() - self.assertEqual('C.UTF-8', locale) + self.assertEqual("C.UTF-8", locale) def test_get_locale_rhel(self): """Test rhel distro returns NotImplementedError exception""" @@ -197,11 +210,11 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): with mock.patch("cloudinit.subp.subp") as m_subp: d.expire_passwd("myuser") m_subp.assert_called_once_with( - ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]) + ["pw", "usermod", "myuser", "-p", "01-Jan-1970"] + ) class TestGetPackageMirrors: - def return_first(self, mlist): if not mlist: return None @@ -219,77 +232,125 @@ class TestGetPackageMirrors: def return_last(self, mlist): if not mlist: return None - return(mlist[-1]) + return mlist[-1] @pytest.mark.parametrize( "allow_ec2_mirror, platform_type, mirrors", [ - (True, "ec2", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (True, "other", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (False, "ec2", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (False, "other", [ - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror2-intel'} - ]) - ]) - def test_get_package_mirror_info_az_ec2(self, - allow_ec2_mirror, - platform_type, - mirrors): - flag_path = "cloudinit.distros." \ - "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" + ( + True, + "ec2", + [ + { + "primary": "http://us-east-1.ec2/", + "security": "http://security-mirror1-intel", + }, + { + "primary": "http://us-east-1a.clouds/", + "security": "http://security-mirror2-intel", + }, + ], + ), + ( + True, + "other", + [ + { + "primary": "http://us-east-1.ec2/", + "security": "http://security-mirror1-intel", + }, + { + "primary": "http://us-east-1a.clouds/", + "security": "http://security-mirror2-intel", + }, + ], + ), + ( + False, + "ec2", + [ + { + "primary": "http://us-east-1.ec2/", + "security": "http://security-mirror1-intel", + }, + { + "primary": "http://us-east-1a.clouds/", + "security": "http://security-mirror2-intel", + }, + ], + ), + ( + False, + "other", + [ + { + "primary": "http://us-east-1a.clouds/", + "security": "http://security-mirror1-intel", + }, + { + "primary": "http://fs-primary-intel", + "security": "http://security-mirror2-intel", + }, + ], + ), + ], + ) + def test_get_package_mirror_info_az_ec2( + self, allow_ec2_mirror, platform_type, mirrors + ): + flag_path = ( + "cloudinit.distros.ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" + ) with mock.patch(flag_path, allow_ec2_mirror): arch_mirrors = gapmi(package_mirrors, arch="amd64") data_source_mock = mock.Mock( - availability_zone="us-east-1a", - platform_type=platform_type) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == mirrors[0]) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_second) - assert(results == mirrors[1]) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_none) - assert(results == package_mirrors[0]['failsafe']) + availability_zone="us-east-1a", platform_type=platform_type + ) + + results = gpmi( + arch_mirrors, + data_source=data_source_mock, + mirror_filter=self.return_first, + ) + assert results == mirrors[0] + + results = gpmi( + arch_mirrors, + data_source=data_source_mock, + mirror_filter=self.return_second, + ) + assert results == mirrors[1] + + results = gpmi( + arch_mirrors, + data_source=data_source_mock, + mirror_filter=self.return_none, + ) + assert results == package_mirrors[0]["failsafe"] def test_get_package_mirror_info_az_non_ec2(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") data_source_mock = mock.Mock(availability_zone="nova.cloudvendor") - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == { - 'primary': 'http://nova.cloudvendor.clouds/', - 'security': 'http://security-mirror1-intel'} + results = gpmi( + arch_mirrors, + data_source=data_source_mock, + mirror_filter=self.return_first, ) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_last) - assert(results == { - 'primary': 'http://nova.cloudvendor.clouds/', - 'security': 'http://security-mirror2-intel'} + assert results == { + "primary": "http://nova.cloudvendor.clouds/", + "security": "http://security-mirror1-intel", + } + + results = gpmi( + arch_mirrors, + data_source=data_source_mock, + mirror_filter=self.return_last, ) + assert results == { + "primary": "http://nova.cloudvendor.clouds/", + "security": "http://security-mirror2-intel", + } def test_get_package_mirror_info_none(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") @@ -298,18 +359,25 @@ class TestGetPackageMirrors: # because both search entries here replacement based on # availability-zone, the filter will be called with an empty list and # failsafe should be taken. - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == { - 'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror1-intel'} + results = gpmi( + arch_mirrors, + data_source=data_source_mock, + mirror_filter=self.return_first, ) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_last) - assert(results == { - 'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror2-intel'} + assert results == { + "primary": "http://fs-primary-intel", + "security": "http://security-mirror1-intel", + } + + results = gpmi( + arch_mirrors, + data_source=data_source_mock, + mirror_filter=self.return_last, ) + assert results == { + "primary": "http://fs-primary-intel", + "security": "http://security-mirror2-intel", + } + # vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_gentoo.py b/tests/unittests/distros/test_gentoo.py index 4e4680b8..dadf5df5 100644 --- a/tests/unittests/distros/test_gentoo.py +++ b/tests/unittests/distros/test_gentoo.py @@ -1,13 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import util -from cloudinit import atomic_helper +from cloudinit import atomic_helper, util from tests.unittests.helpers import CiTestCase + from . import _get_distro class TestGentoo(CiTestCase): - def test_write_hostname(self): distro = _get_distro("gentoo") hostname = "myhostname" @@ -22,5 +21,7 @@ class TestGentoo(CiTestCase): hostfile = self.tmp_path("hostfile") atomic_helper.write_file(hostfile, contents, omode="w") distro._write_hostname(hostname, hostfile) - self.assertEqual('#This is the hostname\nhostname="myhostname"\n', - util.load_file(hostfile)) + self.assertEqual( + '#This is the hostname\nhostname="myhostname"\n', + util.load_file(hostfile), + ) diff --git a/tests/unittests/distros/test_hostname.py b/tests/unittests/distros/test_hostname.py index f6d4dbe5..2cbbb3e2 100644 --- a/tests/unittests/distros/test_hostname.py +++ b/tests/unittests/distros/test_hostname.py @@ -4,13 +4,12 @@ import unittest from cloudinit.distros.parsers import hostname - -BASE_HOSTNAME = ''' +BASE_HOSTNAME = """ # My super-duper-hostname blahblah -''' +""" BASE_HOSTNAME = BASE_HOSTNAME.strip() @@ -18,7 +17,7 @@ class TestHostnameHelper(unittest.TestCase): def test_parse_same(self): hn = hostname.HostnameConf(BASE_HOSTNAME) self.assertEqual(str(hn).strip(), BASE_HOSTNAME) - self.assertEqual(hn.hostname, 'blahblah') + self.assertEqual(hn.hostname, "blahblah") def test_no_adjust_hostname(self): hn = hostname.HostnameConf(BASE_HOSTNAME) @@ -29,14 +28,15 @@ class TestHostnameHelper(unittest.TestCase): def test_adjust_hostname(self): hn = hostname.HostnameConf(BASE_HOSTNAME) prev_name = hn.hostname - self.assertEqual(prev_name, 'blahblah') + self.assertEqual(prev_name, "blahblah") hn.set_hostname("bbbbd") - self.assertEqual(hn.hostname, 'bbbbd') - expected_out = ''' + self.assertEqual(hn.hostname, "bbbbd") + expected_out = """ # My super-duper-hostname bbbbd -''' +""" self.assertEqual(str(hn).strip(), expected_out.strip()) + # vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_hosts.py b/tests/unittests/distros/test_hosts.py index 8aaa6e48..faffd912 100644 --- a/tests/unittests/distros/test_hosts.py +++ b/tests/unittests/distros/test_hosts.py @@ -4,42 +4,44 @@ import unittest from cloudinit.distros.parsers import hosts - -BASE_ETC = ''' +BASE_ETC = """ # Example 127.0.0.1 localhost 192.168.1.10 foo.mydomain.org foo 192.168.1.10 bar.mydomain.org bar 146.82.138.7 master.debian.org master 209.237.226.90 www.opensource.org -''' +""" BASE_ETC = BASE_ETC.strip() class TestHostsHelper(unittest.TestCase): def test_parse(self): eh = hosts.HostsConf(BASE_ETC) - self.assertEqual(eh.get_entry('127.0.0.1'), [['localhost']]) - self.assertEqual(eh.get_entry('192.168.1.10'), - [['foo.mydomain.org', 'foo'], - ['bar.mydomain.org', 'bar']]) + self.assertEqual(eh.get_entry("127.0.0.1"), [["localhost"]]) + self.assertEqual( + eh.get_entry("192.168.1.10"), + [["foo.mydomain.org", "foo"], ["bar.mydomain.org", "bar"]], + ) eh = str(eh) - self.assertTrue(eh.startswith('# Example')) + self.assertTrue(eh.startswith("# Example")) def test_add(self): eh = hosts.HostsConf(BASE_ETC) - eh.add_entry('127.0.0.0', 'blah') - self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) - eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3') - self.assertEqual(eh.get_entry('127.0.0.3'), - [['blah', 'blah2', 'blah3']]) + eh.add_entry("127.0.0.0", "blah") + self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]]) + eh.add_entry("127.0.0.3", "blah", "blah2", "blah3") + self.assertEqual( + eh.get_entry("127.0.0.3"), [["blah", "blah2", "blah3"]] + ) def test_del(self): eh = hosts.HostsConf(BASE_ETC) - eh.add_entry('127.0.0.0', 'blah') - self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) + eh.add_entry("127.0.0.0", "blah") + self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]]) + + eh.del_entries("127.0.0.0") + self.assertEqual(eh.get_entry("127.0.0.0"), []) - eh.del_entries('127.0.0.0') - self.assertEqual(eh.get_entry('127.0.0.0'), []) # vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_init.py b/tests/unittests/distros/test_init.py index fd64a322..8f3c8978 100644 --- a/tests/unittests/distros/test_init.py +++ b/tests/unittests/distros/test_init.py @@ -9,16 +9,17 @@ from unittest import mock import pytest -from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS +from cloudinit.distros import LDH_ASCII_CHARS, _get_package_mirror_info # In newer versions of Python, these characters will be omitted instead # of substituted because of security concerns. # See https://bugs.python.org/issue43882 -SECURITY_URL_CHARS = '\n\r\t' +SECURITY_URL_CHARS = "\n\r\t" # Define a set of characters we would expect to be replaced INVALID_URL_CHARS = [ - chr(x) for x in range(127) + chr(x) + for x in range(127) if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS ] for separator in [":", ".", "/", "#", "?", "@", "[", "]"]: @@ -37,21 +38,41 @@ class TestGetPackageMirrorInfo: These tests are more focused on specific aspects of the unit under test. """ - @pytest.mark.parametrize('mirror_info,expected', [ - # Empty info gives empty return - ({}, {}), - # failsafe values used if present - ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}}, - {'primary': 'http://value', 'security': 'http://other'}), - # search values used if present - ({'search': {'primary': ['http://value'], - 'security': ['http://other']}}, - {'primary': ['http://value'], 'security': ['http://other']}), - # failsafe values used if search value not present - ({'search': {'primary': ['http://value']}, - 'failsafe': {'security': 'http://other'}}, - {'primary': ['http://value'], 'security': 'http://other'}) - ]) + @pytest.mark.parametrize( + "mirror_info,expected", + [ + # Empty info gives empty return + ({}, {}), + # failsafe values used if present + ( + { + "failsafe": { + "primary": "http://value", + "security": "http://other", + } + }, + {"primary": "http://value", "security": "http://other"}, + ), + # search values used if present + ( + { + "search": { + "primary": ["http://value"], + "security": ["http://other"], + } + }, + {"primary": ["http://value"], "security": ["http://other"]}, + ), + # failsafe values used if search value not present + ( + { + "search": {"primary": ["http://value"]}, + "failsafe": {"security": "http://other"}, + }, + {"primary": ["http://value"], "security": "http://other"}, + ), + ], + ) def test_get_package_mirror_info_failsafe(self, mirror_info, expected): """ Test the interaction between search and failsafe inputs @@ -60,97 +81,163 @@ class TestGetPackageMirrorInfo: options; test_failsafe_used_if_all_search_results_filtered_out covers that.) """ - assert expected == _get_package_mirror_info(mirror_info, - mirror_filter=lambda x: x) + assert expected == _get_package_mirror_info( + mirror_info, mirror_filter=lambda x: x + ) def test_failsafe_used_if_all_search_results_filtered_out(self): """Test the failsafe option used if all search options eliminated.""" mirror_info = { - 'search': {'primary': ['http://value']}, - 'failsafe': {'primary': 'http://other'} + "search": {"primary": ["http://value"]}, + "failsafe": {"primary": "http://other"}, } - assert {'primary': 'http://other'} == _get_package_mirror_info( - mirror_info, mirror_filter=lambda x: False) + assert {"primary": "http://other"} == _get_package_mirror_info( + mirror_info, mirror_filter=lambda x: False + ) - @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [ - (True, 'ec2') - ]) - @pytest.mark.parametrize('availability_zone,region,patterns,expected', ( - # Test ec2_region alone - ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'], - ['http://ec2-fk-fake-1/ubuntu']), - # Test availability_zone alone - ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'], - ['http://az-fk-fake-1f/ubuntu']), - # Test region alone - (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'], - ['http://rg-fk-fake-1/ubuntu']), - # Test that ec2_region is not available for non-matching AZs - ('fake-fake-1f', None, - ['http://EC2-%(ec2_region)s/ubuntu', - 'http://AZ-%(availability_zone)s/ubuntu'], - ['http://az-fake-fake-1f/ubuntu']), - # Test that template order maintained - (None, 'fake-region', - ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'], - ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']), - # Test that non-ASCII hostnames are IDNA encoded; - # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" - (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'], - ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']), - # Test that non-ASCII hostnames with a port are IDNA encoded; - # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" - (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'], - ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']), - # Test that non-ASCII non-hostname parts of URLs are unchanged - (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'], - ['http://www.example.com/ТεЅТ̣/ubuntu']), - # Test that IPv4 addresses are unchanged - (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'], - ['http://192.168.1.1:8080/fk-fake-1/ubuntu']), - # Test that IPv6 addresses are unchanged - (None, 'fk-fake-1', - ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'], - ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']), - # Test that unparseable URLs are filtered out of the mirror list - (None, 'inv[lid', - ['http://%(region)s.in.hostname/should/be/filtered', - 'http://but.not.in.the.path/%(region)s'], - ['http://but.not.in.the.path/inv[lid']), - (None, '-some-region-', - ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'], - ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']), - ) + tuple( - # Dynamically generate a test case for each non-LDH - # (Letters/Digits/Hyphen) ASCII character, testing that it is - # substituted with a hyphen - (None, 'fk{0}fake{0}1'.format(invalid_char), - ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu']) - for invalid_char in INVALID_URL_CHARS - )) - def test_valid_substitution(self, - allow_ec2_mirror, - platform_type, - availability_zone, - region, - patterns, - expected): + @pytest.mark.parametrize( + "allow_ec2_mirror, platform_type", [(True, "ec2")] + ) + @pytest.mark.parametrize( + "availability_zone,region,patterns,expected", + ( + # Test ec2_region alone + ( + "fk-fake-1f", + None, + ["http://EC2-%(ec2_region)s/ubuntu"], + ["http://ec2-fk-fake-1/ubuntu"], + ), + # Test availability_zone alone + ( + "fk-fake-1f", + None, + ["http://AZ-%(availability_zone)s/ubuntu"], + ["http://az-fk-fake-1f/ubuntu"], + ), + # Test region alone + ( + None, + "fk-fake-1", + ["http://RG-%(region)s/ubuntu"], + ["http://rg-fk-fake-1/ubuntu"], + ), + # Test that ec2_region is not available for non-matching AZs + ( + "fake-fake-1f", + None, + [ + "http://EC2-%(ec2_region)s/ubuntu", + "http://AZ-%(availability_zone)s/ubuntu", + ], + ["http://az-fake-fake-1f/ubuntu"], + ), + # Test that template order maintained + ( + None, + "fake-region", + [ + "http://RG-%(region)s-2/ubuntu", + "http://RG-%(region)s-1/ubuntu", + ], + [ + "http://rg-fake-region-2/ubuntu", + "http://rg-fake-region-1/ubuntu", + ], + ), + # Test that non-ASCII hostnames are IDNA encoded; + # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" + ( + None, + "ТεЅТ̣", + ["http://www.IDNA-%(region)s.com/ubuntu"], + ["http://www.xn--idna--4kd53hh6aba3q.com/ubuntu"], + ), + # Test that non-ASCII hostnames with a port are IDNA encoded; + # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" + ( + None, + "ТεЅТ̣", + ["http://www.IDNA-%(region)s.com:8080/ubuntu"], + ["http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu"], + ), + # Test that non-ASCII non-hostname parts of URLs are unchanged + ( + None, + "ТεЅТ̣", + ["http://www.example.com/%(region)s/ubuntu"], + ["http://www.example.com/ТεЅТ̣/ubuntu"], + ), + # Test that IPv4 addresses are unchanged + ( + None, + "fk-fake-1", + ["http://192.168.1.1:8080/%(region)s/ubuntu"], + ["http://192.168.1.1:8080/fk-fake-1/ubuntu"], + ), + # Test that IPv6 addresses are unchanged + ( + None, + "fk-fake-1", + ["http://[2001:67c:1360:8001::23]/%(region)s/ubuntu"], + ["http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu"], + ), + # Test that unparseable URLs are filtered out of the mirror list + ( + None, + "inv[lid", + [ + "http://%(region)s.in.hostname/should/be/filtered", + "http://but.not.in.the.path/%(region)s", + ], + ["http://but.not.in.the.path/inv[lid"], + ), + ( + None, + "-some-region-", + ["http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu"], + ["http://lead-ing.some-region.trail-ing.example.com/ubuntu"], + ), + ) + + tuple( + # Dynamically generate a test case for each non-LDH + # (Letters/Digits/Hyphen) ASCII character, testing that it is + # substituted with a hyphen + ( + None, + "fk{0}fake{0}1".format(invalid_char), + ["http://%(region)s/ubuntu"], + ["http://fk-fake-1/ubuntu"], + ) + for invalid_char in INVALID_URL_CHARS + ), + ) + def test_valid_substitution( + self, + allow_ec2_mirror, + platform_type, + availability_zone, + region, + patterns, + expected, + ): """Test substitution works as expected.""" - flag_path = "cloudinit.distros." \ - "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" + flag_path = ( + "cloudinit.distros.ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" + ) m_data_source = mock.Mock( availability_zone=availability_zone, region=region, - platform_type=platform_type + platform_type=platform_type, ) - mirror_info = {'search': {'primary': patterns}} + mirror_info = {"search": {"primary": patterns}} with mock.patch(flag_path, allow_ec2_mirror): ret = _get_package_mirror_info( mirror_info, data_source=m_data_source, - mirror_filter=lambda x: x + mirror_filter=lambda x: x, ) print(allow_ec2_mirror) print(platform_type) @@ -158,4 +245,4 @@ class TestGetPackageMirrorInfo: print(region) print(patterns) print(expected) - assert {'primary': expected} == ret + assert {"primary": expected} == ret diff --git a/tests/unittests/distros/test_manage_service.py b/tests/unittests/distros/test_manage_service.py index 6f1bd0b1..9e64b35c 100644 --- a/tests/unittests/distros/test_manage_service.py +++ b/tests/unittests/distros/test_manage_service.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from tests.unittests.helpers import (CiTestCase, mock) +from tests.unittests.helpers import CiTestCase, mock from tests.unittests.util import MockDistro @@ -12,27 +12,30 @@ class TestManageService(CiTestCase): super(TestManageService, self).setUp() self.dist = MockDistro() - @mock.patch.object(MockDistro, 'uses_systemd', return_value=False) + @mock.patch.object(MockDistro, "uses_systemd", return_value=False) @mock.patch("cloudinit.distros.subp.subp") def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd): - self.dist.init_cmd = ['systemctl'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['systemctl', 'start', 'myssh'], - capture=True) + self.dist.init_cmd = ["systemctl"] + self.dist.manage_service("start", "myssh") + m_subp.assert_called_with( + ["systemctl", "start", "myssh"], capture=True + ) - @mock.patch.object(MockDistro, 'uses_systemd', return_value=False) + @mock.patch.object(MockDistro, "uses_systemd", return_value=False) @mock.patch("cloudinit.distros.subp.subp") def test_manage_service_service_initcmd(self, m_subp, m_sysd): - self.dist.init_cmd = ['service'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['service', 'myssh', 'start'], capture=True) + self.dist.init_cmd = ["service"] + self.dist.manage_service("start", "myssh") + m_subp.assert_called_with(["service", "myssh", "start"], capture=True) - @mock.patch.object(MockDistro, 'uses_systemd', return_value=True) + @mock.patch.object(MockDistro, "uses_systemd", return_value=True) @mock.patch("cloudinit.distros.subp.subp") def test_manage_service_systemctl(self, m_subp, m_sysd): - self.dist.init_cmd = ['ignore'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['systemctl', 'start', 'myssh'], - capture=True) + self.dist.init_cmd = ["ignore"] + self.dist.manage_service("start", "myssh") + m_subp.assert_called_with( + ["systemctl", "start", "myssh"], capture=True + ) + # vi: ts=4 sw=4 expandtab diff --git a/tests/unittests/distros/test_netbsd.py b/tests/unittests/distros/test_netbsd.py index 11a68d2a..0bc6dfbd 100644 --- a/tests/unittests/distros/test_netbsd.py +++ b/tests/unittests/distros/test_netbsd.py @@ -1,10 +1,11 @@ -import cloudinit.distros.netbsd +import unittest.mock as mock import pytest -import unittest.mock as mock + +import cloudinit.distros.netbsd -@pytest.mark.parametrize('with_pkgin', (True, False)) +@pytest.mark.parametrize("with_pkgin", (True, False)) @mock.patch("cloudinit.distros.netbsd.os") def test_init(m_os, with_pkgin): print(with_pkgin) @@ -12,6 +13,6 @@ def test_init(m_os, with_pkgin): cfg = {} distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None) - expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None + expectation = ["pkgin", "-y", "full-upgrade"] if with_pkgin else None assert distro.pkg_cmd_upgrade_prefix == expectation - assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list + assert [mock.call("/usr/pkg/bin/pkgin")] == m_os.path.exists.call_args_list diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py index 90ac5578..a25be481 100644 --- a/tests/unittests/distros/test_netconfig.py +++ b/tests/unittests/distros/test_netconfig.py @@ -7,17 +7,11 @@ from io import StringIO from textwrap import dedent from unittest import mock -from cloudinit import distros +from cloudinit import distros, helpers, safeyaml, settings, subp, util from cloudinit.distros.parsers.sys_conf import SysConf -from cloudinit import helpers -from cloudinit import settings -from tests.unittests.helpers import ( - FilesystemMockingTestCase, dir2dict) -from cloudinit import subp -from cloudinit import util -from cloudinit import safeyaml - -BASE_NET_CFG = ''' +from tests.unittests.helpers import FilesystemMockingTestCase, dir2dict + +BASE_NET_CFG = """ auto lo iface lo inet loopback @@ -31,9 +25,9 @@ iface eth0 inet static auto eth1 iface eth1 inet dhcp -''' +""" -BASE_NET_CFG_FROM_V2 = ''' +BASE_NET_CFG_FROM_V2 = """ auto lo iface lo inet loopback @@ -44,9 +38,9 @@ iface eth0 inet static auto eth1 iface eth1 inet dhcp -''' +""" -BASE_NET_CFG_IPV6 = ''' +BASE_NET_CFG_IPV6 = """ auto lo iface lo inet loopback @@ -74,20 +68,31 @@ iface eth1 inet6 static address 2607:f0d0:1002:0011::3 netmask 64 gateway 2607:f0d0:1002:0011::1 -''' - -V1_NET_CFG = {'config': [{'name': 'eth0', +""" - 'subnets': [{'address': '192.168.1.5', - 'broadcast': '192.168.1.0', - 'gateway': '192.168.1.254', - 'netmask': '255.255.255.0', - 'type': 'static'}], - 'type': 'physical'}, - {'name': 'eth1', - 'subnets': [{'control': 'auto', 'type': 'dhcp4'}], - 'type': 'physical'}], - 'version': 1} +V1_NET_CFG = { + "config": [ + { + "name": "eth0", + "subnets": [ + { + "address": "192.168.1.5", + "broadcast": "192.168.1.0", + "gateway": "192.168.1.254", + "netmask": "255.255.255.0", + "type": "static", + } + ], + "type": "physical", + }, + { + "name": "eth1", + "subnets": [{"control": "auto", "type": "dhcp4"}], + "type": "physical", + }, + ], + "version": 1, +} V1_NET_CFG_WITH_DUPS = """\ # same value in interface specific dns and global dns @@ -144,19 +149,28 @@ auto eth1 iface eth1 inet dhcp """ -V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', - 'subnets': [{'address': - '2607:f0d0:1002:0011::2', - 'gateway': - '2607:f0d0:1002:0011::1', - 'netmask': '64', - 'type': 'static6'}], - 'type': 'physical'}, - {'name': 'eth1', - 'subnets': [{'control': 'auto', - 'type': 'dhcp4'}], - 'type': 'physical'}], - 'version': 1} +V1_NET_CFG_IPV6 = { + "config": [ + { + "name": "eth0", + "subnets": [ + { + "address": "2607:f0d0:1002:0011::2", + "gateway": "2607:f0d0:1002:0011::1", + "netmask": "64", + "type": "static6", + } + ], + "type": "physical", + }, + { + "name": "eth1", + "subnets": [{"control": "auto", "type": "dhcp4"}], + "type": "physical", + }, + ], + "version": 1, +} V1_TO_V2_NET_CFG_OUTPUT = """\ @@ -194,14 +208,11 @@ network: """ V2_NET_CFG = { - 'ethernets': { - 'eth7': { - 'addresses': ['192.168.1.5/24'], - 'gateway4': '192.168.1.254'}, - 'eth9': { - 'dhcp4': True} + "ethernets": { + "eth7": {"addresses": ["192.168.1.5/24"], "gateway4": "192.168.1.254"}, + "eth9": {"dhcp4": True}, }, - 'version': 2 + "version": 2, } @@ -237,19 +248,18 @@ class WriteBuffer(object): class TestNetCfgDistroBase(FilesystemMockingTestCase): - def setUp(self): super(TestNetCfgDistroBase, self).setUp() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch("cloudinit.util.system_is_snappy", "m_snappy") def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN - cfg['system_info']['distro'] = dname + cfg["system_info"]["distro"] = dname if renderers: - cfg['system_info']['network'] = {'renderers': renderers} + cfg["system_info"]["network"] = {"renderers": renderers} paths = helpers.Paths({}) - return cls(dname, cfg.get('system_info'), paths) + return cls(dname, cfg.get("system_info"), paths) def assertCfgEquals(self, blob1, blob2): b1 = dict(SysConf(blob1.strip().splitlines())) @@ -264,23 +274,23 @@ class TestNetCfgDistroBase(FilesystemMockingTestCase): class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase): - def setUp(self): super(TestNetCfgDistroFreeBSD, self).setUp() - self.distro = self._get_distro('freebsd', renderers=['freebsd']) + self.distro = self._get_distro("freebsd", renderers=["freebsd"]) - def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None, - bringup=False): + def _apply_and_verify_freebsd( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): if not expected_cfgs: - raise ValueError('expected_cfg must not be None') + raise ValueError("expected_cfg must not be None") tmpd = None - with mock.patch('cloudinit.net.freebsd.available') as m_avail: + with mock.patch("cloudinit.net.freebsd.available") as m_avail: m_avail.return_value = True with self.reRooted(tmpd) as tmpd: - util.ensure_dir('/etc') - util.ensure_file('/etc/rc.conf') - util.ensure_file('/etc/resolv.conf') + util.ensure_dir("/etc") + util.ensure_file("/etc/rc.conf") + util.ensure_file("/etc/resolv.conf") apply_fn(config, bringup) results = dir2dict(tmpd) @@ -291,14 +301,14 @@ class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase): print(results[cfgpath]) print("----------") self.assertEqual( - set(expected.split('\n')), - set(results[cfgpath].split('\n'))) + set(expected.split("\n")), set(results[cfgpath].split("\n")) + ) self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_apply_network_config_freebsd_standard(self, ifaces_mac): ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'eth0', + "00:15:5d:4c:73:00": "eth0", } rc_conf_expected = """\ defaultrouter=192.168.1.254 @@ -307,17 +317,19 @@ ifconfig_eth1=DHCP """ expected_cfgs = { - '/etc/rc.conf': rc_conf_expected, - '/etc/resolv.conf': '' + "/etc/rc.conf": rc_conf_expected, + "/etc/resolv.conf": "", } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify_freebsd( + self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + ) - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_apply_network_config_freebsd_ifrename(self, ifaces_mac): ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'vtnet0', + "00:15:5d:4c:73:00": "vtnet0", } rc_conf_expected = """\ ifconfig_vtnet0_name=eth0 @@ -327,49 +339,51 @@ ifconfig_eth1=DHCP """ V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG) - V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00' + V1_NET_CFG_RENAME["config"][0]["mac_address"] = "00:15:5d:4c:73:00" expected_cfgs = { - '/etc/rc.conf': rc_conf_expected, - '/etc/resolv.conf': '' + "/etc/rc.conf": rc_conf_expected, + "/etc/resolv.conf": "", } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG_RENAME, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify_freebsd( + self.distro.apply_network_config, + V1_NET_CFG_RENAME, + expected_cfgs=expected_cfgs.copy(), + ) - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_apply_network_config_freebsd_nameserver(self, ifaces_mac): ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'eth0', + "00:15:5d:4c:73:00": "eth0", } V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG) - ns = ['1.2.3.4'] - V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns - expected_cfgs = { - '/etc/resolv.conf': 'nameserver 1.2.3.4\n' - } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG_DNS, - expected_cfgs=expected_cfgs.copy()) + ns = ["1.2.3.4"] + V1_NET_CFG_DNS["config"][0]["subnets"][0]["dns_nameservers"] = ns + expected_cfgs = {"/etc/resolv.conf": "nameserver 1.2.3.4\n"} + self._apply_and_verify_freebsd( + self.distro.apply_network_config, + V1_NET_CFG_DNS, + expected_cfgs=expected_cfgs.copy(), + ) class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): - def setUp(self): super(TestNetCfgDistroUbuntuEni, self).setUp() - self.distro = self._get_distro('ubuntu', renderers=['eni']) + self.distro = self._get_distro("ubuntu", renderers=["eni"]) def eni_path(self): - return '/etc/network/interfaces.d/50-cloud-init.cfg' + return "/etc/network/interfaces.d/50-cloud-init.cfg" - def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None, - bringup=False): + def _apply_and_verify_eni( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): if not expected_cfgs: - raise ValueError('expected_cfg must not be None') + raise ValueError("expected_cfg must not be None") tmpd = None - with mock.patch('cloudinit.net.eni.available') as m_avail: + with mock.patch("cloudinit.net.eni.available") as m_avail: m_avail.return_value = True with self.reRooted(tmpd) as tmpd: apply_fn(config, bringup) @@ -389,35 +403,39 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): self.eni_path(): V1_NET_CFG_OUTPUT, } # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify_eni(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify_eni( + self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + ) def test_apply_network_config_ipv6_ub(self): - expected_cfgs = { - self.eni_path(): V1_NET_CFG_IPV6_OUTPUT - } - self._apply_and_verify_eni(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) + expected_cfgs = {self.eni_path(): V1_NET_CFG_IPV6_OUTPUT} + self._apply_and_verify_eni( + self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy(), + ) class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): def setUp(self): super(TestNetCfgDistroUbuntuNetplan, self).setUp() - self.distro = self._get_distro('ubuntu', renderers=['netplan']) - self.devlist = ['eth0', 'lo'] + self.distro = self._get_distro("ubuntu", renderers=["netplan"]) + self.devlist = ["eth0", "lo"] - def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None, - bringup=False): + def _apply_and_verify_netplan( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): if not expected_cfgs: - raise ValueError('expected_cfg must not be None') + raise ValueError("expected_cfg must not be None") tmpd = None - with mock.patch('cloudinit.net.netplan.available', - return_value=True): - with mock.patch("cloudinit.net.netplan.get_devicelist", - return_value=self.devlist): + with mock.patch("cloudinit.net.netplan.available", return_value=True): + with mock.patch( + "cloudinit.net.netplan.get_devicelist", + return_value=self.devlist, + ): with self.reRooted(tmpd) as tmpd: apply_fn(config, bringup) @@ -432,7 +450,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' + return "/etc/netplan/50-cloud-init.yaml" def test_apply_network_config_v1_to_netplan_ub(self): expected_cfgs = { @@ -440,9 +458,11 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): } # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify_netplan( + self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + ) def test_apply_network_config_v1_ipv6_to_netplan_ub(self): expected_cfgs = { @@ -450,39 +470,43 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): } # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify_netplan( + self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy(), + ) def test_apply_network_config_v2_passthrough_ub(self): expected_cfgs = { self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, } # ub_distro.apply_network_config(V2_NET_CFG, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V2_NET_CFG, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify_netplan( + self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + ) class TestNetCfgDistroRedhat(TestNetCfgDistroBase): - def setUp(self): super(TestNetCfgDistroRedhat, self).setUp() - self.distro = self._get_distro('rhel', renderers=['sysconfig']) + self.distro = self._get_distro("rhel", renderers=["sysconfig"]) def ifcfg_path(self, ifname): - return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname + return "/etc/sysconfig/network-scripts/ifcfg-%s" % ifname def control_path(self): - return '/etc/sysconfig/network' + return "/etc/sysconfig/network" - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): + def _apply_and_verify( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): if not expected_cfgs: - raise ValueError('expected_cfg must not be None') + raise ValueError("expected_cfg must not be None") tmpd = None - with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + with mock.patch("cloudinit.net.sysconfig.available") as m_avail: m_avail.return_value = True with self.reRooted(tmpd) as tmpd: apply_fn(config, bringup) @@ -494,7 +518,8 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): def test_apply_network_config_rh(self): expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ + self.ifcfg_path("eth0"): dedent( + """\ BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 @@ -505,27 +530,35 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - self.ifcfg_path('eth1'): dedent("""\ + """ + ), + self.ifcfg_path("eth1"): dedent( + """\ BOOTPROTO=dhcp DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - self.control_path(): dedent("""\ + """ + ), + self.control_path(): dedent( + """\ NETWORKING=yes - """), + """ + ), } # rh_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify( + self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + ) def test_apply_network_config_ipv6_rh(self): expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ + self.ifcfg_path("eth0"): dedent( + """\ BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 @@ -538,39 +571,54 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - self.ifcfg_path('eth1'): dedent("""\ + """ + ), + self.ifcfg_path("eth1"): dedent( + """\ BOOTPROTO=dhcp DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - self.control_path(): dedent("""\ + """ + ), + self.control_path(): dedent( + """\ NETWORKING=yes NETWORKING_IPV6=yes IPV6_AUTOCONF=no - """), + """ + ), } # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify( + self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy(), + ) def test_vlan_render_unsupported(self): """Render officially unsupported vlan names.""" cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"], - 'match': {'macaddress': "00:16:3e:60:7c:df"}}}, - 'vlans': { - 'infra0': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, + "version": 2, + "ethernets": { + "eth0": { + "addresses": ["192.10.1.2/24"], + "match": {"macaddress": "00:16:3e:60:7c:df"}, + } + }, + "vlans": { + "infra0": { + "addresses": ["10.0.1.2/16"], + "id": 1001, + "link": "eth0", + } + }, } expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ + self.ifcfg_path("eth0"): dedent( + """\ BOOTPROTO=none DEVICE=eth0 HWADDR=00:16:3e:60:7c:df @@ -580,8 +628,10 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - self.ifcfg_path('infra0'): dedent("""\ + """ + ), + self.ifcfg_path("infra0"): dedent( + """\ BOOTPROTO=none DEVICE=infra0 IPADDR=10.0.1.2 @@ -591,26 +641,33 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): PHYSDEV=eth0 USERCTL=no VLAN=yes - """), - self.control_path(): dedent("""\ + """ + ), + self.control_path(): dedent( + """\ NETWORKING=yes - """), + """ + ), } self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) + self.distro.apply_network_config, cfg, expected_cfgs=expected_cfgs + ) def test_vlan_render(self): cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"]}}, - 'vlans': { - 'eth0.1001': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, + "version": 2, + "ethernets": {"eth0": {"addresses": ["192.10.1.2/24"]}}, + "vlans": { + "eth0.1001": { + "addresses": ["10.0.1.2/16"], + "id": 1001, + "link": "eth0", + } + }, } expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ + self.ifcfg_path("eth0"): dedent( + """\ BOOTPROTO=none DEVICE=eth0 IPADDR=192.10.1.2 @@ -619,8 +676,10 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - self.ifcfg_path('eth0.1001'): dedent("""\ + """ + ), + self.ifcfg_path("eth0.1001"): dedent( + """\ BOOTPROTO=none DEVICE=eth0.1001 IPADDR=10.0.1.2 @@ -630,32 +689,35 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): PHYSDEV=eth0 USERCTL=no VLAN=yes - """), - self.control_path(): dedent("""\ + """ + ), + self.control_path(): dedent( + """\ NETWORKING=yes - """), + """ + ), } self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) + self.distro.apply_network_config, cfg, expected_cfgs=expected_cfgs + ) class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): - def setUp(self): super(TestNetCfgDistroOpensuse, self).setUp() - self.distro = self._get_distro('opensuse', renderers=['sysconfig']) + self.distro = self._get_distro("opensuse", renderers=["sysconfig"]) def ifcfg_path(self, ifname): - return '/etc/sysconfig/network/ifcfg-%s' % ifname + return "/etc/sysconfig/network/ifcfg-%s" % ifname - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): + def _apply_and_verify( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): if not expected_cfgs: - raise ValueError('expected_cfg must not be None') + raise ValueError("expected_cfg must not be None") tmpd = None - with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + with mock.patch("cloudinit.net.sysconfig.available") as m_avail: m_avail.return_value = True with self.reRooted(tmpd) as tmpd: apply_fn(config, bringup) @@ -668,52 +730,71 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): def test_apply_network_config_opensuse(self): """Opensuse uses apply_network_config and renders sysconfig""" expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ + self.ifcfg_path("eth0"): dedent( + """\ BOOTPROTO=static IPADDR=192.168.1.5 NETMASK=255.255.255.0 STARTMODE=auto - """), - self.ifcfg_path('eth1'): dedent("""\ + """ + ), + self.ifcfg_path("eth1"): dedent( + """\ BOOTPROTO=dhcp4 STARTMODE=auto - """), + """ + ), } - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify( + self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + ) def test_apply_network_config_ipv6_opensuse(self): """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ + self.ifcfg_path("eth0"): dedent( + """\ BOOTPROTO=static IPADDR6=2607:f0d0:1002:0011::2/64 STARTMODE=auto - """), - self.ifcfg_path('eth1'): dedent("""\ + """ + ), + self.ifcfg_path("eth1"): dedent( + """\ BOOTPROTO=dhcp4 STARTMODE=auto - """), + """ + ), } - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) + self._apply_and_verify( + self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy(), + ) class TestNetCfgDistroArch(TestNetCfgDistroBase): def setUp(self): super(TestNetCfgDistroArch, self).setUp() - self.distro = self._get_distro('arch', renderers=['netplan']) - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False, with_netplan=False): + self.distro = self._get_distro("arch", renderers=["netplan"]) + + def _apply_and_verify( + self, + apply_fn, + config, + expected_cfgs=None, + bringup=False, + with_netplan=False, + ): if not expected_cfgs: - raise ValueError('expected_cfg must not be None') + raise ValueError("expected_cfg must not be None") tmpd = None - with mock.patch('cloudinit.net.netplan.available', - return_value=with_netplan): + with mock.patch( + "cloudinit.net.netplan.available", return_value=with_netplan + ): with self.reRooted(tmpd) as tmpd: apply_fn(config, bringup) @@ -728,10 +809,10 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def netctl_path(self, iface): - return '/etc/netctl/%s' % iface + return "/etc/netctl/%s" % iface def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' + return "/etc/netplan/50-cloud-init.yaml" def test_apply_network_config_v1_without_netplan(self): # Note that this is in fact an invalid netctl config: @@ -741,33 +822,40 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase): # still being used in absence of netplan, not the correctness of the # rendered netctl config. expected_cfgs = { - self.netctl_path('eth0'): dedent("""\ + self.netctl_path("eth0"): dedent( + """\ Address=192.168.1.5/255.255.255.0 Connection=ethernet DNS=() Gateway=192.168.1.254 IP=static Interface=eth0 - """), - self.netctl_path('eth1'): dedent("""\ + """ + ), + self.netctl_path("eth1"): dedent( + """\ Address=None/None Connection=ethernet DNS=() Gateway= IP=dhcp Interface=eth1 - """), + """ + ), } # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy(), - with_netplan=False) + self._apply_and_verify( + self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=False, + ) def test_apply_network_config_v1_with_netplan(self): expected_cfgs = { - self.netplan_path(): dedent("""\ + self.netplan_path(): dedent( + """\ # generated by cloud-init network: version: 2 @@ -778,31 +866,32 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase): gateway4: 192.168.1.254 eth1: dhcp4: true - """), + """ + ), } with mock.patch( - 'cloudinit.net.netplan.get_devicelist', - return_value=[] + "cloudinit.net.netplan.get_devicelist", return_value=[] ): - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy(), - with_netplan=True) + self._apply_and_verify( + self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=True, + ) class TestNetCfgDistroPhoton(TestNetCfgDistroBase): - def setUp(self): super(TestNetCfgDistroPhoton, self).setUp() - self.distro = self._get_distro('photon', renderers=['networkd']) + self.distro = self._get_distro("photon", renderers=["networkd"]) def create_conf_dict(self, contents): content_dict = {} for line in contents: if line: line = line.strip() - if line and re.search(r'^\[(.+)\]$', line): + if line and re.search(r"^\[(.+)\]$", line): content_dict[line] = [] key = line elif line: @@ -815,13 +904,14 @@ class TestNetCfgDistroPhoton(TestNetCfgDistroBase): for k, v in actual.items(): self.assertEqual(sorted(expected[k]), sorted(v)) - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): + def _apply_and_verify( + self, apply_fn, config, expected_cfgs=None, bringup=False + ): if not expected_cfgs: - raise ValueError('expected_cfg must not be None') + raise ValueError("expected_cfg must not be None") tmpd = None - with mock.patch('cloudinit.net.networkd.available') as m_avail: + with mock.patch("cloudinit.net.networkd.available") as m_avail: m_avail.return_value = True with self.reRooted(tmpd) as tmpd: apply_fn(config, bringup) @@ -833,10 +923,11 @@ class TestNetCfgDistroPhoton(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def nwk_file_path(self, ifname): - return '/etc/systemd/network/10-cloud-init-%s.network' % ifname + return "/etc/systemd/network/10-cloud-init-%s.network" % ifname def net_cfg_1(self, ifname): - ret = """\ + ret = ( + """\ [Match] Name=%s [Network] @@ -844,48 +935,53 @@ class TestNetCfgDistroPhoton(TestNetCfgDistroBase): [Address] Address=192.168.1.5/24 [Route] - Gateway=192.168.1.254""" % ifname + Gateway=192.168.1.254""" + % ifname + ) return ret def net_cfg_2(self, ifname): - ret = """\ + ret = ( + """\ [Match] Name=%s [Network] - DHCP=ipv4""" % ifname + DHCP=ipv4""" + % ifname + ) return ret def test_photon_network_config_v1(self): - tmp = self.net_cfg_1('eth0').splitlines() + tmp = self.net_cfg_1("eth0").splitlines() expected_eth0 = self.create_conf_dict(tmp) - tmp = self.net_cfg_2('eth1').splitlines() + tmp = self.net_cfg_2("eth1").splitlines() expected_eth1 = self.create_conf_dict(tmp) expected_cfgs = { - self.nwk_file_path('eth0'): expected_eth0, - self.nwk_file_path('eth1'): expected_eth1, + self.nwk_file_path("eth0"): expected_eth0, + self.nwk_file_path("eth1"): expected_eth1, } - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs.copy()) + self._apply_and_verify( + self.distro.apply_network_config, V1_NET_CFG, expected_cfgs.copy() + ) def test_photon_network_config_v2(self): - tmp = self.net_cfg_1('eth7').splitlines() + tmp = self.net_cfg_1("eth7").splitlines() expected_eth7 = self.create_conf_dict(tmp) - tmp = self.net_cfg_2('eth9').splitlines() + tmp = self.net_cfg_2("eth9").splitlines() expected_eth9 = self.create_conf_dict(tmp) expected_cfgs = { - self.nwk_file_path('eth7'): expected_eth7, - self.nwk_file_path('eth9'): expected_eth9, + self.nwk_file_path("eth7"): expected_eth7, + self.nwk_file_path("eth9"): expected_eth9, } - self._apply_and_verify(self.distro.apply_network_config, - V2_NET_CFG, - expected_cfgs.copy()) + self._apply_and_verify( + self.distro.apply_network_config, V2_NET_CFG, expected_cfgs.copy() + ) def test_photon_network_config_v1_with_duplicates(self): expected = """\ @@ -902,15 +998,16 @@ class TestNetCfgDistroPhoton(TestNetCfgDistroBase): expected = self.create_conf_dict(expected.splitlines()) expected_cfgs = { - self.nwk_file_path('eth0'): expected, + self.nwk_file_path("eth0"): expected, } - self._apply_and_verify(self.distro.apply_network_config, - net_cfg, - expected_cfgs.copy()) + self._apply_and_verify( + self.distro.apply_network_config, net_cfg, expected_cfgs.copy() + ) def get_mode(path, target=None): return os.stat(subp.target_path(target, path)).st_mode & 0o777 + # vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py index ec508f4d..635f6901 100644 --- a/tests/unittests/distros/test_networking.py +++ b/tests/unittests/distros/test_networking.py @@ -1,3 +1,6 @@ +# See https://docs.pytest.org/en/stable/example +# /parametrize.html#parametrizing-conditional-raising +from contextlib import ExitStack as does_not_raise from unittest import mock import pytest @@ -9,10 +12,6 @@ from cloudinit.distros.networking import ( Networking, ) -# See https://docs.pytest.org/en/stable/example -# /parametrize.html#parametrizing-conditional-raising -from contextlib import ExitStack as does_not_raise - @pytest.yield_fixture def generic_networking_cls(): @@ -35,7 +34,8 @@ def generic_networking_cls(): error = AssertionError("Unexpectedly used /sys in generic networking code") with mock.patch( - "cloudinit.net.get_sys_class_path", side_effect=error, + "cloudinit.net.get_sys_class_path", + side_effect=error, ): yield TestNetworking @@ -91,8 +91,10 @@ class TestLinuxNetworkingTrySetLinkUp: m_is_up.return_value = True is_success = LinuxNetworking().try_set_link_up(devname) - assert (mock.call(['ip', 'link', 'set', devname, 'up']) == - m_subp.call_args_list[-1]) + assert ( + mock.call(["ip", "link", "set", devname, "up"]) + == m_subp.call_args_list[-1] + ) assert is_success def test_calls_subp_return_false(self, m_subp, m_is_up): @@ -100,8 +102,10 @@ class TestLinuxNetworkingTrySetLinkUp: m_is_up.return_value = False is_success = LinuxNetworking().try_set_link_up(devname) - assert (mock.call(['ip', 'link', 'set', devname, 'up']) == - m_subp.call_args_list[-1]) + assert ( + mock.call(["ip", "link", "set", devname, "up"]) + == m_subp.call_args_list[-1] + ) assert not is_success @@ -153,7 +157,9 @@ class TestNetworkingWaitForPhysDevs: return netcfg def test_skips_settle_if_all_present( - self, generic_networking_cls, wait_for_physdevs_netcfg, + self, + generic_networking_cls, + wait_for_physdevs_netcfg, ): networking = generic_networking_cls() with mock.patch.object( @@ -169,7 +175,9 @@ class TestNetworkingWaitForPhysDevs: assert 0 == m_settle.call_count def test_calls_udev_settle_on_missing( - self, generic_networking_cls, wait_for_physdevs_netcfg, + self, + generic_networking_cls, + wait_for_physdevs_netcfg, ): networking = generic_networking_cls() with mock.patch.object( diff --git a/tests/unittests/distros/test_opensuse.py b/tests/unittests/distros/test_opensuse.py index 4ff26102..4a4b266f 100644 --- a/tests/unittests/distros/test_opensuse.py +++ b/tests/unittests/distros/test_opensuse.py @@ -6,7 +6,6 @@ from . import _get_distro class TestopenSUSE(CiTestCase): - def test_get_distro(self): distro = _get_distro("opensuse") - self.assertEqual(distro.osfamily, 'suse') + self.assertEqual(distro.osfamily, "suse") diff --git a/tests/unittests/distros/test_photon.py b/tests/unittests/distros/test_photon.py index 3858f723..fed30c2b 100644 --- a/tests/unittests/distros/test_photon.py +++ b/tests/unittests/distros/test_photon.py @@ -1,34 +1,34 @@ # This file is part of cloud-init. See LICENSE file for license information. -from . import _get_distro from cloudinit import util -from tests.unittests.helpers import mock -from tests.unittests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase, mock + +from . import _get_distro SYSTEM_INFO = { - 'paths': { - 'cloud_dir': '/var/lib/cloud/', - 'templates_dir': '/etc/cloud/templates/', + "paths": { + "cloud_dir": "/var/lib/cloud/", + "templates_dir": "/etc/cloud/templates/", }, - 'network': {'renderers': 'networkd'}, + "network": {"renderers": "networkd"}, } class TestPhoton(CiTestCase): with_logs = True - distro = _get_distro('photon', SYSTEM_INFO) - expected_log_line = 'Rely on PhotonOS default network config' + distro = _get_distro("photon", SYSTEM_INFO) + expected_log_line = "Rely on PhotonOS default network config" def test_network_renderer(self): - self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd') + self.assertEqual(self.distro._cfg["network"]["renderers"], "networkd") def test_get_distro(self): - self.assertEqual(self.distro.osfamily, 'photon') + self.assertEqual(self.distro.osfamily, "photon") @mock.patch("cloudinit.distros.photon.subp.subp") def test_write_hostname(self, m_subp): - hostname = 'myhostname' - hostfile = self.tmp_path('previous-hostname') + hostname = "myhostname" + hostfile = self.tmp_path("previous-hostname") self.distro._write_hostname(hostname, hostfile) self.assertEqual(hostname, util.load_file(hostfile)) @@ -36,7 +36,7 @@ class TestPhoton(CiTestCase): self.assertEqual(ret, hostname) m_subp.return_value = (None, None) - hostfile += 'hostfile' + hostfile += "hostfile" self.distro._write_hostname(hostname, hostfile) m_subp.return_value = (hostname, None) @@ -44,25 +44,25 @@ class TestPhoton(CiTestCase): self.assertEqual(ret, hostname) self.logs.truncate(0) - m_subp.return_value = (None, 'bla') + m_subp.return_value = (None, "bla") self.distro._write_hostname(hostname, None) - self.assertIn('Error while setting hostname', self.logs.getvalue()) + self.assertIn("Error while setting hostname", self.logs.getvalue()) - @mock.patch('cloudinit.net.generate_fallback_config') + @mock.patch("cloudinit.net.generate_fallback_config") def test_fallback_netcfg(self, m_fallback_cfg): - key = 'disable_fallback_netcfg' + key = "disable_fallback_netcfg" # Don't use fallback if no setting given self.logs.truncate(0) - assert(self.distro.generate_fallback_config() is None) + assert self.distro.generate_fallback_config() is None self.assertIn(self.expected_log_line, self.logs.getvalue()) self.logs.truncate(0) self.distro._cfg[key] = True - assert(self.distro.generate_fallback_config() is None) + assert self.distro.generate_fallback_config() is None self.assertIn(self.expected_log_line, self.logs.getvalue()) self.logs.truncate(0) self.distro._cfg[key] = False - assert(self.distro.generate_fallback_config() is not None) + assert self.distro.generate_fallback_config() is not None self.assertNotIn(self.expected_log_line, self.logs.getvalue()) diff --git a/tests/unittests/distros/test_resolv.py b/tests/unittests/distros/test_resolv.py index e7971627..65e78101 100644 --- a/tests/unittests/distros/test_resolv.py +++ b/tests/unittests/distros/test_resolv.py @@ -1,18 +1,16 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.distros.parsers import resolv_conf - -from tests.unittests.helpers import TestCase - import re +from cloudinit.distros.parsers import resolv_conf +from tests.unittests.helpers import TestCase -BASE_RESOLVE = ''' +BASE_RESOLVE = """ ; generated by /sbin/dhclient-script search blah.yahoo.com yahoo.com nameserver 10.15.44.14 nameserver 10.15.30.92 -''' +""" BASE_RESOLVE = BASE_RESOLVE.strip() @@ -27,39 +25,40 @@ class TestResolvHelper(TestCase): self.assertIsNone(rp.local_domain) rp.local_domain = "bob" - self.assertEqual('bob', rp.local_domain) - self.assertIn('domain bob', str(rp)) + self.assertEqual("bob", rp.local_domain) + self.assertIn("domain bob", str(rp)) def test_nameservers(self): rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIn('10.15.44.14', rp.nameservers) - self.assertIn('10.15.30.92', rp.nameservers) - rp.add_nameserver('10.2') - self.assertIn('10.2', rp.nameservers) - self.assertIn('nameserver 10.2', str(rp)) - self.assertNotIn('10.3', rp.nameservers) + self.assertIn("10.15.44.14", rp.nameservers) + self.assertIn("10.15.30.92", rp.nameservers) + rp.add_nameserver("10.2") + self.assertIn("10.2", rp.nameservers) + self.assertIn("nameserver 10.2", str(rp)) + self.assertNotIn("10.3", rp.nameservers) self.assertEqual(len(rp.nameservers), 3) - rp.add_nameserver('10.2') - rp.add_nameserver('10.3') - self.assertNotIn('10.3', rp.nameservers) + rp.add_nameserver("10.2") + rp.add_nameserver("10.3") + self.assertNotIn("10.3", rp.nameservers) def test_search_domains(self): rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIn('yahoo.com', rp.search_domains) - self.assertIn('blah.yahoo.com', rp.search_domains) - rp.add_search_domain('bbb.y.com') - self.assertIn('bbb.y.com', rp.search_domains) - self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp))) - self.assertIn('bbb.y.com', rp.search_domains) - rp.add_search_domain('bbb.y.com') + self.assertIn("yahoo.com", rp.search_domains) + self.assertIn("blah.yahoo.com", rp.search_domains) + rp.add_search_domain("bbb.y.com") + self.assertIn("bbb.y.com", rp.search_domains) + self.assertTrue(re.search(r"search(.*)bbb.y.com(.*)", str(rp))) + self.assertIn("bbb.y.com", rp.search_domains) + rp.add_search_domain("bbb.y.com") self.assertEqual(len(rp.search_domains), 3) - rp.add_search_domain('bbb2.y.com') + rp.add_search_domain("bbb2.y.com") self.assertEqual(len(rp.search_domains), 4) - rp.add_search_domain('bbb3.y.com') + rp.add_search_domain("bbb3.y.com") self.assertEqual(len(rp.search_domains), 5) - rp.add_search_domain('bbb4.y.com') + rp.add_search_domain("bbb4.y.com") self.assertEqual(len(rp.search_domains), 6) - self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com') + self.assertRaises(ValueError, rp.add_search_domain, "bbb5.y.com") self.assertEqual(len(rp.search_domains), 6) + # vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_sles.py b/tests/unittests/distros/test_sles.py index 04514a19..66b8b13d 100644 --- a/tests/unittests/distros/test_sles.py +++ b/tests/unittests/distros/test_sles.py @@ -6,7 +6,6 @@ from . import _get_distro class TestSLES(CiTestCase): - def test_get_distro(self): distro = _get_distro("sles") - self.assertEqual(distro.osfamily, 'suse') + self.assertEqual(distro.osfamily, "suse") diff --git a/tests/unittests/distros/test_sysconfig.py b/tests/unittests/distros/test_sysconfig.py index 4368496d..d0979e17 100644 --- a/tests/unittests/distros/test_sysconfig.py +++ b/tests/unittests/distros/test_sysconfig.py @@ -3,22 +3,23 @@ import re from cloudinit.distros.parsers.sys_conf import SysConf - from tests.unittests.helpers import TestCase - # Lots of good examples @ # http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt + class TestSysConfHelper(TestCase): # This function was added in 2.7, make it work for 2.6 def assertRegMatches(self, text, regexp): regexp = re.compile(regexp) - self.assertTrue(regexp.search(text), - msg="%s must match %s!" % (text, regexp.pattern)) + self.assertTrue( + regexp.search(text), + msg="%s must match %s!" % (text, regexp.pattern), + ) def test_parse_no_change(self): - contents = '''# A comment + contents = """# A comment USESMBAUTH=no KEYTABLE=/usr/lib/kbd/keytables/us.map SHORTDATE=$(date +%y:%m:%d:%H:%M) @@ -28,59 +29,64 @@ NETMASK0=255.255.255.0 LIST=$LOGROOT/incremental-list IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64' ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256" -USEMD5=no''' +USEMD5=no""" conf = SysConf(contents.splitlines()) - self.assertEqual(conf['HOSTNAME'], 'blahblah') - self.assertEqual(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)') + self.assertEqual(conf["HOSTNAME"], "blahblah") + self.assertEqual(conf["SHORTDATE"], "$(date +%y:%m:%d:%H:%M)") # Should be unquoted - self.assertEqual(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; ' - '-G ${DEVICE} rx 256 tx 256')) + self.assertEqual( + conf["ETHTOOL_OPTS"], + "-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256", + ) self.assertEqual(contents, str(conf)) def test_parse_shell_vars(self): - contents = 'USESMBAUTH=$XYZ' + contents = "USESMBAUTH=$XYZ" conf = SysConf(contents.splitlines()) self.assertEqual(contents, str(conf)) - conf = SysConf('') - conf['B'] = '${ZZ}d apples' + conf = SysConf("") + conf["B"] = "${ZZ}d apples" # Should be quoted self.assertEqual('B="${ZZ}d apples"', str(conf)) - conf = SysConf('') - conf['B'] = '$? d apples' + conf = SysConf("") + conf["B"] = "$? d apples" self.assertEqual('B="$? d apples"', str(conf)) contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"' conf = SysConf(contents.splitlines()) - self.assertEqual('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf)) + self.assertEqual("IPMI_WATCHDOG_OPTIONS=timeout=60", str(conf)) def test_parse_adjust(self): contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"' conf = SysConf(contents.splitlines()) # Should be unquoted - self.assertEqual('eth0-:0004::1/64 eth1-:0005::1/64', - conf['IPV6TO4_ROUTING']) - conf['IPV6TO4_ROUTING'] = "blah \tblah" + self.assertEqual( + "eth0-:0004::1/64 eth1-:0005::1/64", conf["IPV6TO4_ROUTING"] + ) + conf["IPV6TO4_ROUTING"] = "blah \tblah" contents2 = str(conf).strip() # Should be requoted due to whitespace - self.assertRegMatches(contents2, - r'IPV6TO4_ROUTING=[\']blah\s+blah[\']') + self.assertRegMatches( + contents2, r"IPV6TO4_ROUTING=[\']blah\s+blah[\']" + ) def test_parse_no_adjust_shell(self): - conf = SysConf(''.splitlines()) - conf['B'] = ' $(time)' + conf = SysConf("".splitlines()) + conf["B"] = " $(time)" contents = str(conf) - self.assertEqual('B= $(time)', contents) + self.assertEqual("B= $(time)", contents) def test_parse_empty(self): - contents = '' + contents = "" conf = SysConf(contents.splitlines()) - self.assertEqual('', str(conf).strip()) + self.assertEqual("", str(conf).strip()) def test_parse_add_new(self): - contents = 'BLAH=b' + contents = "BLAH=b" conf = SysConf(contents.splitlines()) - conf['Z'] = 'd' + conf["Z"] = "d" contents = str(conf) self.assertIn("Z=d", contents) self.assertIn("BLAH=b", contents) + # vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_user_data_normalize.py b/tests/unittests/distros/test_user_data_normalize.py index bd8f2adb..67ea024b 100644 --- a/tests/unittests/distros/test_user_data_normalize.py +++ b/tests/unittests/distros/test_user_data_normalize.py @@ -2,251 +2,233 @@ from unittest import mock -from cloudinit import distros +from cloudinit import distros, helpers, settings from cloudinit.distros import ug_util -from cloudinit import helpers -from cloudinit import settings - from tests.unittests.helpers import TestCase - bcfg = { - 'name': 'bob', - 'plain_text_passwd': 'ubuntu', - 'home': "/home/ubuntu", - 'shell': "/bin/bash", - 'lock_passwd': True, - 'gecos': "Ubuntu", - 'groups': ["foo"] + "name": "bob", + "plain_text_passwd": "ubuntu", + "home": "/home/ubuntu", + "shell": "/bin/bash", + "lock_passwd": True, + "gecos": "Ubuntu", + "groups": ["foo"], } class TestUGNormalize(TestCase): - def setUp(self): super(TestUGNormalize, self).setUp() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch("cloudinit.util.system_is_snappy", "m_snappy") def _make_distro(self, dtype, def_user=None): cfg = dict(settings.CFG_BUILTIN) - cfg['system_info']['distro'] = dtype - paths = helpers.Paths(cfg['system_info']['paths']) + cfg["system_info"]["distro"] = dtype + paths = helpers.Paths(cfg["system_info"]["paths"]) distro_cls = distros.fetch(dtype) if def_user: - cfg['system_info']['default_user'] = def_user.copy() - distro = distro_cls(dtype, cfg['system_info'], paths) + cfg["system_info"]["default_user"] = def_user.copy() + distro = distro_cls(dtype, cfg["system_info"], paths) return distro def _norm(self, cfg, distro): return ug_util.normalize_users_groups(cfg, distro) def test_group_dict(self): - distro = self._make_distro('ubuntu') - g = {'groups': - [{'ubuntu': ['foo', 'bar'], - 'bob': 'users'}, - 'cloud-users', - {'bob': 'users2'}]} + distro = self._make_distro("ubuntu") + g = { + "groups": [ + {"ubuntu": ["foo", "bar"], "bob": "users"}, + "cloud-users", + {"bob": "users2"}, + ] + } (_users, groups) = self._norm(g, distro) - self.assertIn('ubuntu', groups) - ub_members = groups['ubuntu'] - self.assertEqual(sorted(['foo', 'bar']), sorted(ub_members)) - self.assertIn('bob', groups) - b_members = groups['bob'] - self.assertEqual(sorted(['users', 'users2']), - sorted(b_members)) + self.assertIn("ubuntu", groups) + ub_members = groups["ubuntu"] + self.assertEqual(sorted(["foo", "bar"]), sorted(ub_members)) + self.assertIn("bob", groups) + b_members = groups["bob"] + self.assertEqual(sorted(["users", "users2"]), sorted(b_members)) def test_basic_groups(self): - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'groups': ['bob'], + "groups": ["bob"], } (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) + self.assertIn("bob", groups) self.assertEqual({}, users) def test_csv_groups(self): - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'groups': 'bob,joe,steve', + "groups": "bob,joe,steve", } (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertIn('joe', groups) - self.assertIn('steve', groups) + self.assertIn("bob", groups) + self.assertIn("joe", groups) + self.assertIn("steve", groups) self.assertEqual({}, users) def test_more_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': ['bob', 'joe', 'steve'] - } + distro = self._make_distro("ubuntu") + ug_cfg = {"groups": ["bob", "joe", "steve"]} (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertIn('joe', groups) - self.assertIn('steve', groups) + self.assertIn("bob", groups) + self.assertIn("joe", groups) + self.assertIn("steve", groups) self.assertEqual({}, users) def test_member_groups(self): - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'groups': { - 'bob': ['s'], - 'joe': [], - 'steve': [], + "groups": { + "bob": ["s"], + "joe": [], + "steve": [], } } (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertEqual(['s'], groups['bob']) - self.assertEqual([], groups['joe']) - self.assertIn('joe', groups) - self.assertIn('steve', groups) + self.assertIn("bob", groups) + self.assertEqual(["s"], groups["bob"]) + self.assertEqual([], groups["joe"]) + self.assertIn("joe", groups) + self.assertIn("steve", groups) self.assertEqual({}, users) def test_users_simple_dict(self): - distro = self._make_distro('ubuntu', bcfg) + distro = self._make_distro("ubuntu", bcfg) ug_cfg = { - 'users': { - 'default': True, + "users": { + "default": True, } } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) + self.assertIn("bob", users) ug_cfg = { - 'users': { - 'default': 'yes', + "users": { + "default": "yes", } } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) + self.assertIn("bob", users) ug_cfg = { - 'users': { - 'default': '1', + "users": { + "default": "1", } } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) + self.assertIn("bob", users) def test_users_simple_dict_no(self): - distro = self._make_distro('ubuntu', bcfg) + distro = self._make_distro("ubuntu", bcfg) ug_cfg = { - 'users': { - 'default': False, + "users": { + "default": False, } } (users, _groups) = self._norm(ug_cfg, distro) self.assertEqual({}, users) ug_cfg = { - 'users': { - 'default': 'no', + "users": { + "default": "no", } } (users, _groups) = self._norm(ug_cfg, distro) self.assertEqual({}, users) def test_users_simple_csv(self): - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'users': 'joe,bob', + "users": "joe,bob", } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) + self.assertIn("joe", users) + self.assertIn("bob", users) + self.assertEqual({"default": False}, users["joe"]) + self.assertEqual({"default": False}, users["bob"]) def test_users_simple(self): - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'users': [ - 'joe', - 'bob' - ], + "users": ["joe", "bob"], } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) + self.assertIn("joe", users) + self.assertIn("bob", users) + self.assertEqual({"default": False}, users["joe"]) + self.assertEqual({"default": False}, users["bob"]) def test_users_old_user(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'user': 'zetta', - 'users': 'default' - } + distro = self._make_distro("ubuntu", bcfg) + ug_cfg = {"user": "zetta", "users": "default"} (users, _groups) = self._norm(ug_cfg, distro) - self.assertNotIn('bob', users) # Bob is not the default now, zetta is - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - self.assertNotIn('default', users) - ug_cfg = { - 'user': 'zetta', - 'users': 'default, joe' - } + self.assertNotIn("bob", users) # Bob is not the default now, zetta is + self.assertIn("zetta", users) + self.assertTrue(users["zetta"]["default"]) + self.assertNotIn("default", users) + ug_cfg = {"user": "zetta", "users": "default, joe"} (users, _groups) = self._norm(ug_cfg, distro) - self.assertNotIn('bob', users) # Bob is not the default now, zetta is - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - self.assertNotIn('default', users) - ug_cfg = { - 'user': 'zetta', - 'users': ['bob', 'joe'] - } + self.assertNotIn("bob", users) # Bob is not the default now, zetta is + self.assertIn("joe", users) + self.assertIn("zetta", users) + self.assertTrue(users["zetta"]["default"]) + self.assertNotIn("default", users) + ug_cfg = {"user": "zetta", "users": ["bob", "joe"]} (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) + self.assertIn("bob", users) + self.assertIn("joe", users) + self.assertIn("zetta", users) + self.assertTrue(users["zetta"]["default"]) ug_cfg = { - 'user': 'zetta', - 'users': { - 'bob': True, - 'joe': True, - } + "user": "zetta", + "users": { + "bob": True, + "joe": True, + }, } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) + self.assertIn("bob", users) + self.assertIn("joe", users) + self.assertIn("zetta", users) + self.assertTrue(users["zetta"]["default"]) ug_cfg = { - 'user': 'zetta', + "user": "zetta", } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('zetta', users) + self.assertIn("zetta", users) ug_cfg = {} (users, groups) = self._norm(ug_cfg, distro) self.assertEqual({}, users) self.assertEqual({}, groups) def test_users_dict_default_additional(self): - distro = self._make_distro('ubuntu', bcfg) + distro = self._make_distro("ubuntu", bcfg) ug_cfg = { - 'users': [ - {'name': 'default', 'blah': True} - ], + "users": [{"name": "default", "blah": True}], } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertEqual(",".join(distro.get_default_user()['groups']), - users['bob']['groups']) - self.assertEqual(True, users['bob']['blah']) - self.assertEqual(True, users['bob']['default']) + self.assertIn("bob", users) + self.assertEqual( + ",".join(distro.get_default_user()["groups"]), + users["bob"]["groups"], + ) + self.assertEqual(True, users["bob"]["blah"]) + self.assertEqual(True, users["bob"]["default"]) def test_users_dict_extract(self): - distro = self._make_distro('ubuntu', bcfg) + distro = self._make_distro("ubuntu", bcfg) ug_cfg = { - 'users': [ - 'default', + "users": [ + "default", ], } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) + self.assertIn("bob", users) (name, config) = ug_util.extract_default(users) - self.assertEqual(name, 'bob') + self.assertEqual(name, "bob") expected_config = {} def_config = None try: @@ -258,115 +240,126 @@ class TestUGNormalize(TestCase): expected_config.update(def_config) # Ignore these for now - expected_config.pop('name', None) - expected_config.pop('groups', None) - config.pop('groups', None) + expected_config.pop("name", None) + expected_config.pop("groups", None) + config.pop("groups", None) self.assertEqual(config, expected_config) def test_users_dict_default(self): - distro = self._make_distro('ubuntu', bcfg) + distro = self._make_distro("ubuntu", bcfg) ug_cfg = { - 'users': [ - 'default', + "users": [ + "default", ], } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertEqual(",".join(distro.get_default_user()['groups']), - users['bob']['groups']) - self.assertEqual(True, users['bob']['default']) + self.assertIn("bob", users) + self.assertEqual( + ",".join(distro.get_default_user()["groups"]), + users["bob"]["groups"], + ) + self.assertEqual(True, users["bob"]["default"]) def test_users_dict_trans(self): - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'users': [ - {'name': 'joe', - 'tr-me': True}, - {'name': 'bob'}, + "users": [ + {"name": "joe", "tr-me": True}, + {"name": "bob"}, ], } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'tr_me': True, 'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) + self.assertIn("joe", users) + self.assertIn("bob", users) + self.assertEqual({"tr_me": True, "default": False}, users["joe"]) + self.assertEqual({"default": False}, users["bob"]) def test_users_dict(self): - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'users': [ - {'name': 'joe'}, - {'name': 'bob'}, + "users": [ + {"name": "joe"}, + {"name": "bob"}, ], } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) + self.assertIn("joe", users) + self.assertIn("bob", users) + self.assertEqual({"default": False}, users["joe"]) + self.assertEqual({"default": False}, users["bob"]) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_create_snap_user(self, mock_subp): - mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', - '')] - distro = self._make_distro('ubuntu') + mock_subp.side_effect = [ + ('{"username": "joe", "ssh-key-count": 1}\n', "") + ] + distro = self._make_distro("ubuntu") ug_cfg = { - 'users': [ - {'name': 'joe', 'snapuser': 'joe@joe.com'}, + "users": [ + {"name": "joe", "snapuser": "joe@joe.com"}, ], } (users, _groups) = self._norm(ug_cfg, distro) for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) + print("user=%s config=%s" % (user, config)) username = distro.create_user(user, **config) - snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com'] + snapcmd = ["snap", "create-user", "--sudoer", "--json", "joe@joe.com"] mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) - self.assertEqual(username, 'joe') + self.assertEqual(username, "joe") - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_create_snap_user_known(self, mock_subp): - mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', - '')] - distro = self._make_distro('ubuntu') + mock_subp.side_effect = [ + ('{"username": "joe", "ssh-key-count": 1}\n', "") + ] + distro = self._make_distro("ubuntu") ug_cfg = { - 'users': [ - {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True}, + "users": [ + {"name": "joe", "snapuser": "joe@joe.com", "known": True}, ], } (users, _groups) = self._norm(ug_cfg, distro) for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) + print("user=%s config=%s" % (user, config)) username = distro.create_user(user, **config) - snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known', - 'joe@joe.com'] + snapcmd = [ + "snap", + "create-user", + "--sudoer", + "--json", + "--known", + "joe@joe.com", + ] mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) - self.assertEqual(username, 'joe') - - @mock.patch('cloudinit.util.system_is_snappy') - @mock.patch('cloudinit.util.is_group') - @mock.patch('cloudinit.subp.subp') - def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp, - mock_snappy): + self.assertEqual(username, "joe") + + @mock.patch("cloudinit.util.system_is_snappy") + @mock.patch("cloudinit.util.is_group") + @mock.patch("cloudinit.subp.subp") + def test_add_user_on_snappy_system( + self, mock_subp, mock_isgrp, mock_snappy + ): mock_isgrp.return_value = False mock_subp.return_value = True mock_snappy.return_value = True - distro = self._make_distro('ubuntu') + distro = self._make_distro("ubuntu") ug_cfg = { - 'users': [ - {'name': 'joe', 'groups': 'users', 'create_groups': True}, + "users": [ + {"name": "joe", "groups": "users", "create_groups": True}, ], } (users, _groups) = self._norm(ug_cfg, distro) for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) + print("user=%s config=%s" % (user, config)) distro.add_user(user, **config) - groupcmd = ['groupadd', 'users', '--extrausers'] - addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m'] + groupcmd = ["groupadd", "users", "--extrausers"] + addcmd = ["useradd", "joe", "--extrausers", "--groups", "users", "-m"] mock_subp.assert_any_call(groupcmd) mock_subp.assert_any_call(addcmd, logstring=addcmd) + # vi: ts=4 expandtab diff --git a/tests/unittests/filters/test_launch_index.py b/tests/unittests/filters/test_launch_index.py index 0b1a7067..679bdfc3 100644 --- a/tests/unittests/filters/test_launch_index.py +++ b/tests/unittests/filters/test_launch_index.py @@ -3,11 +3,10 @@ import copy from itertools import filterfalse -from tests.unittests import helpers - -from cloudinit.filters import launch_index from cloudinit import user_data as ud from cloudinit import util +from cloudinit.filters import launch_index +from tests.unittests import helpers def count_messages(root): @@ -20,7 +19,6 @@ def count_messages(root): class TestLaunchFilter(helpers.ResourceUsingTestCase): - def assertCounts(self, message, expected_counts): orig_message = copy.deepcopy(message) for (index, count) in expected_counts.items(): @@ -54,7 +52,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): return True def testMultiEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_2.email') + test_data = helpers.readResource("filter_cloud_multipart_2.email") ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -69,7 +67,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testHeaderEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_header.email') + test_data = helpers.readResource("filter_cloud_multipart_header.email") ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -78,13 +76,13 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): expected_counts = { 5: 1, -1: 0, - 'c': 1, + "c": 1, None: 1, } self.assertCounts(message, expected_counts) def testConfigEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_1.email') + test_data = helpers.readResource("filter_cloud_multipart_1.email") ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -98,7 +96,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testNoneIndex(self): - test_data = helpers.readResource('filter_cloud_multipart.yaml') + test_data = helpers.readResource("filter_cloud_multipart.yaml") ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) start_count = count_messages(message) @@ -107,7 +105,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertTrue(self.equivalentMessage(message, filtered_message)) def testIndexes(self): - test_data = helpers.readResource('filter_cloud_multipart.yaml') + test_data = helpers.readResource("filter_cloud_multipart.yaml") ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) start_count = count_messages(message) @@ -126,10 +124,11 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): # None should just give all back None: start_count, # Non ints should be ignored - 'c': start_count, + "c": start_count, # Strings should be converted - '1': 2, + "1": 2, } self.assertCounts(message, expected_counts) + # vi: ts=4 expandtab diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index e9afbd36..67fed8c9 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -1,7 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import functools -import httpretty import io import logging import os @@ -12,21 +11,23 @@ import sys import tempfile import time import unittest -from pathlib import Path from contextlib import ExitStack, contextmanager +from pathlib import Path from unittest import mock from unittest.util import strclass +import httpretty + import cloudinit -from cloudinit.config.schema import ( - SchemaValidationError, validate_cloudconfig_schema) -from cloudinit import cloud -from cloudinit import distros +from cloudinit import cloud, distros from cloudinit import helpers as ch +from cloudinit import subp, util +from cloudinit.config.schema import ( + SchemaValidationError, + validate_cloudconfig_schema, +) from cloudinit.sources import DataSourceNone from cloudinit.templater import JINJA_AVAILABLE -from cloudinit import subp -from cloudinit import util _real_subp = subp.subp @@ -66,11 +67,11 @@ def retarget_many_wrapper(new_base, am, old_func): if isinstance(path, str): n_args[i] = rebase_path(path, new_base) return old_func(*n_args, **kwds) + return wrapper class TestCase(unittest.TestCase): - def reset_global_state(self): """Reset any global state to its original settings. @@ -93,13 +94,13 @@ class TestCase(unittest.TestCase): self.reset_global_state() def shortDescription(self): - return strclass(self.__class__) + '.' + self._testMethodName + return strclass(self.__class__) + "." + self._testMethodName def add_patch(self, target, attr, *args, **kwargs): """Patches specified target object and sets it as attr on test instance also schedules cleanup""" - if 'autospec' not in kwargs: - kwargs['autospec'] = True + if "autospec" not in kwargs: + kwargs["autospec"] = True m = mock.patch(target, *args, **kwargs) p = m.start() self.addCleanup(m.stop) @@ -108,7 +109,7 @@ class TestCase(unittest.TestCase): class CiTestCase(TestCase): """This is the preferred test case base class unless user - needs other test case classes below.""" + needs other test case classes below.""" # Subclass overrides for specific test behavior # Whether or not a unit test needs logfile setup @@ -131,7 +132,7 @@ class CiTestCase(TestCase): # Create a log handler so unit tests can search expected logs. self.logger = logging.getLogger() self.logs = io.StringIO() - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") handler = logging.StreamHandler(self.logs) handler.setFormatter(formatter) self.old_handlers = self.logger.handlers @@ -142,12 +143,13 @@ class CiTestCase(TestCase): subp.subp = self._fake_subp def _fake_subp(self, *args, **kwargs): - if 'args' in kwargs: - cmd = kwargs['args'] + if "args" in kwargs: + cmd = kwargs["args"] else: if not args: raise TypeError( - "subp() missing 1 required positional argument: 'args'") + "subp() missing 1 required positional argument: 'args'" + ) cmd = args[0] if not isinstance(cmd, str): @@ -158,16 +160,19 @@ class CiTestCase(TestCase): if isinstance(self.allowed_subp, bool): pass_through = self.allowed_subp else: - pass_through = ( - (cmd in self.allowed_subp) or - (self.SUBP_SHELL_TRUE in self.allowed_subp and - kwargs.get('shell'))) + pass_through = (cmd in self.allowed_subp) or ( + self.SUBP_SHELL_TRUE in self.allowed_subp + and kwargs.get("shell") + ) if pass_through: return _real_subp(*args, **kwargs) raise Exception( - "called subp. set self.allowed_subp=True to allow\n subp(%s)" % - ', '.join([str(repr(a)) for a in args] + - ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()])) + "called subp. set self.allowed_subp=True to allow\n subp(%s)" + % ", ".join( + [str(repr(a)) for a in args] + + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()] + ) + ) def tearDown(self): if self.with_logs: @@ -180,12 +185,12 @@ class CiTestCase(TestCase): def tmp_dir(self, dir=None, cleanup=True): # return a full path to a temporary directory that will be cleaned up. if dir is None: - tmpd = tempfile.mkdtemp( - prefix="ci-%s." % self.__class__.__name__) + tmpd = tempfile.mkdtemp(prefix="ci-%s." % self.__class__.__name__) else: tmpd = tempfile.mkdtemp(dir=dir) self.addCleanup( - functools.partial(shutil.rmtree, tmpd, ignore_errors=True)) + functools.partial(shutil.rmtree, tmpd, ignore_errors=True) + ) return tmpd def tmp_path(self, path, dir=None): @@ -208,7 +213,7 @@ class CiTestCase(TestCase): if not sys_cfg: sys_cfg = {} tmp_paths = {} - for var in ['templates_dir', 'run_dir', 'cloud_dir']: + for var in ["templates_dir", "run_dir", "cloud_dir"]: tmp_paths[var] = self.tmp_path(var, dir=self.new_root) util.ensure_dir(tmp_paths[var]) self.paths = ch.Paths(tmp_paths) @@ -221,13 +226,13 @@ class CiTestCase(TestCase): @classmethod def random_string(cls, length=8): - """ return a random lowercase string with default length of 8""" - return ''.join( - random.choice(string.ascii_lowercase) for _ in range(length)) + """return a random lowercase string with default length of 8""" + return "".join( + random.choice(string.ascii_lowercase) for _ in range(length) + ) class ResourceUsingTestCase(CiTestCase): - def setUp(self): super(ResourceUsingTestCase, self).setUp() self.resource_path = None @@ -235,14 +240,13 @@ class ResourceUsingTestCase(CiTestCase): def getCloudPaths(self, ds=None): tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir) - cp = ch.Paths({'cloud_dir': tmpdir, - 'templates_dir': resourceLocation()}, - ds=ds) + cp = ch.Paths( + {"cloud_dir": tmpdir, "templates_dir": resourceLocation()}, ds=ds + ) return cp class FilesystemMockingTestCase(ResourceUsingTestCase): - def setUp(self): super(FilesystemMockingTestCase, self).setUp() self.patched_funcs = ExitStack() @@ -253,10 +257,10 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def replicateTestRoot(self, example_root, target_root): real_root = resourceLocation() - real_root = os.path.join(real_root, 'roots', example_root) + real_root = os.path.join(real_root, "roots", example_root) for (dir_path, _dirnames, filenames) in os.walk(real_root): real_path = dir_path - make_path = rebase_path(real_path[len(real_root):], target_root) + make_path = rebase_path(real_path[len(real_root) :], target_root) util.ensure_dir(make_path) for f in filenames: real_path = util.abs_join(real_path, f) @@ -265,72 +269,89 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchUtils(self, new_root): patch_funcs = { - util: [('write_file', 1), - ('append_file', 1), - ('load_file', 1), - ('ensure_dir', 1), - ('chmod', 1), - ('delete_dir_contents', 1), - ('del_file', 1), - ('sym_link', -1), - ('copy', -1)], + util: [ + ("write_file", 1), + ("append_file", 1), + ("load_file", 1), + ("ensure_dir", 1), + ("chmod", 1), + ("delete_dir_contents", 1), + ("del_file", 1), + ("sym_link", -1), + ("copy", -1), + ], } for (mod, funcs) in patch_funcs.items(): for (f, am) in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(new_root, am, func) self.patched_funcs.enter_context( - mock.patch.object(mod, f, trap_func)) + mock.patch.object(mod, f, trap_func) + ) # Handle subprocess calls - func = getattr(subp, 'subp') + func = getattr(subp, "subp") def nsubp(*_args, **_kwargs): - return ('', '') + return ("", "") self.patched_funcs.enter_context( - mock.patch.object(subp, 'subp', nsubp)) + mock.patch.object(subp, "subp", nsubp) + ) def null_func(*_args, **_kwargs): return None - for f in ['chownbyid', 'chownbyname']: + for f in ["chownbyid", "chownbyname"]: self.patched_funcs.enter_context( - mock.patch.object(util, f, null_func)) + mock.patch.object(util, f, null_func) + ) def patchOS(self, new_root): patch_funcs = { - os.path: [('isfile', 1), ('exists', 1), - ('islink', 1), ('isdir', 1), ('lexists', 1)], - os: [('listdir', 1), ('mkdir', 1), - ('lstat', 1), ('symlink', 2), - ('stat', 1)] + os.path: [ + ("isfile", 1), + ("exists", 1), + ("islink", 1), + ("isdir", 1), + ("lexists", 1), + ], + os: [ + ("listdir", 1), + ("mkdir", 1), + ("lstat", 1), + ("symlink", 2), + ("stat", 1), + ], } - if hasattr(os, 'scandir'): + if hasattr(os, "scandir"): # py27 does not have scandir - patch_funcs[os].append(('scandir', 1)) + patch_funcs[os].append(("scandir", 1)) for (mod, funcs) in patch_funcs.items(): for f, nargs in funcs: func = getattr(mod, f) trap_func = retarget_many_wrapper(new_root, nargs, func) self.patched_funcs.enter_context( - mock.patch.object(mod, f, trap_func)) + mock.patch.object(mod, f, trap_func) + ) def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) self.patched_funcs.enter_context( - mock.patch('builtins.open', trap_func) + mock.patch("builtins.open", trap_func) ) def patchStdoutAndStderr(self, stdout=None, stderr=None): if stdout is not None: self.patched_funcs.enter_context( - mock.patch.object(sys, 'stdout', stdout)) + mock.patch.object(sys, "stdout", stdout) + ) if stderr is not None: self.patched_funcs.enter_context( - mock.patch.object(sys, 'stderr', stderr)) + mock.patch.object(sys, "stderr", stderr) + ) def reRoot(self, root=None): if root is None: @@ -355,33 +376,32 @@ class HttprettyTestCase(CiTestCase): # And make sure reset and enable/disable are done. def setUp(self): - self.restore_proxy = os.environ.get('http_proxy') + self.restore_proxy = os.environ.get("http_proxy") if self.restore_proxy is not None: - del os.environ['http_proxy'] + del os.environ["http_proxy"] super(HttprettyTestCase, self).setUp() httpretty.HTTPretty.allow_net_connect = False httpretty.reset() httpretty.enable() # Stop the logging from HttpPretty so our logs don't get mixed # up with its logs - logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) + logging.getLogger("httpretty.core").setLevel(logging.CRITICAL) def tearDown(self): httpretty.disable() httpretty.reset() if self.restore_proxy: - os.environ['http_proxy'] = self.restore_proxy + os.environ["http_proxy"] = self.restore_proxy super(HttprettyTestCase, self).tearDown() class SchemaTestCaseMixin(unittest.TestCase): - def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."): """Assert the config is valid per self.schema. If there is only one top level key in the schema properties, then the cfg will be put under that key.""" - props = list(self.schema.get('properties')) + props = list(self.schema.get("properties")) # put cfg under top level key if there is only one in the schema if len(props) == 1: cfg = {props[0]: cfg} @@ -402,7 +422,7 @@ def populate_dir(path, files): if isinstance(content, bytes): fp.write(content) else: - fp.write(content.encode('utf-8')) + fp.write(content.encode("utf-8")) fp.close() ret.append(p) @@ -425,7 +445,7 @@ def dir2dict(startdir, prefix=None): for root, _dirs, files in os.walk(startdir): for fname in files: fpath = os.path.join(root, fname) - key = fpath[len(prefix):] + key = fpath[len(prefix) :] flist[key] = util.load_file(fpath) return flist @@ -443,16 +463,16 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs): return_value: return from 'func' """ - delim = '.' + delim = "." if prefix is None: - prefix = '' + prefix = "" prefix = prefix.rstrip(delim) unwraps = [] for fname, kw in mocks.items(): if prefix: fname = delim.join((prefix, fname)) if not isinstance(kw, dict): - kw = {'return_value': kw} + kw = {"return_value": kw} p = mock.patch(fname, **kw) p.start() unwraps.append(p) @@ -464,19 +484,20 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs): def resourceLocation(subname=None): - path = cloud_init_project_dir('tests/data') + path = cloud_init_project_dir("tests/data") if not subname: return path return os.path.join(path, subname) -def readResource(name, mode='r'): +def readResource(name, mode="r"): with open(resourceLocation(name), mode) as fh: return fh.read() try: import jsonschema + assert jsonschema # avoid pyflakes error F401: import unused _missing_jsonschema_dep = False except ImportError: @@ -485,7 +506,8 @@ except ImportError: def skipUnlessJsonSchema(): return skipIf( - _missing_jsonschema_dep, "No python-jsonschema dependency present.") + _missing_jsonschema_dep, "No python-jsonschema dependency present." + ) def skipUnlessJinja(): @@ -497,13 +519,17 @@ def skipIfJinja(): # older versions of mock do not have the useful 'assert_not_called' -if not hasattr(mock.Mock, 'assert_not_called'): +if not hasattr(mock.Mock, "assert_not_called"): + def __mock_assert_not_called(mmock): if mmock.call_count != 0: - msg = ("[citest] Expected '%s' to not have been called. " - "Called %s times." % - (mmock._mock_name or 'mock', mmock.call_count)) + msg = ( + "[citest] Expected '%s' to not have been called. " + "Called %s times." + % (mmock._mock_name or "mock", mmock.call_count) + ) raise AssertionError(msg) + mock.Mock.assert_not_called = __mock_assert_not_called @@ -524,4 +550,5 @@ def cloud_init_project_dir(sub_path: str) -> str: """ return str(get_top_level_dir() / sub_path) + # vi: ts=4 expandtab diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py index d3da3981..876873d5 100644 --- a/tests/unittests/net/test_dhcp.py +++ b/tests/unittests/net/test_dhcp.py @@ -1,44 +1,54 @@ # This file is part of cloud-init. See LICENSE file for license information. -import httpretty import os import signal from textwrap import dedent +import httpretty + import cloudinit.net as net from cloudinit.net.dhcp import ( - InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, - parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, - parse_static_routes) + InvalidDHCPLeaseFileError, + dhcp_discovery, + maybe_perform_dhcp_discovery, + networkd_load_leases, + parse_dhcp_lease_file, + parse_static_routes, +) from cloudinit.util import ensure_file, write_file from tests.unittests.helpers import ( - CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) + CiTestCase, + HttprettyTestCase, + mock, + populate_dir, + wrap_and_call, +) class TestParseDHCPLeasesFile(CiTestCase): - def test_parse_empty_lease_file_errors(self): """parse_dhcp_lease_file errors when file content is empty.""" - empty_file = self.tmp_path('leases') + empty_file = self.tmp_path("leases") ensure_file(empty_file) with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: parse_dhcp_lease_file(empty_file) error = context_manager.exception - self.assertIn('Cannot parse empty dhcp lease file', str(error)) + self.assertIn("Cannot parse empty dhcp lease file", str(error)) def test_parse_malformed_lease_file_content_errors(self): """parse_dhcp_lease_file errors when file content isn't dhcp leases.""" - non_lease_file = self.tmp_path('leases') - write_file(non_lease_file, 'hi mom.') + non_lease_file = self.tmp_path("leases") + write_file(non_lease_file, "hi mom.") with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: parse_dhcp_lease_file(non_lease_file) error = context_manager.exception - self.assertIn('Cannot parse dhcp lease file', str(error)) + self.assertIn("Cannot parse dhcp lease file", str(error)) def test_parse_multiple_leases(self): """parse_dhcp_lease_file returns a list of all leases within.""" - lease_file = self.tmp_path('leases') - content = dedent(""" + lease_file = self.tmp_path("leases") + content = dedent( + """ lease { interface "wlp3s0"; fixed-address 192.168.2.74; @@ -55,26 +65,36 @@ class TestParseDHCPLeasesFile(CiTestCase): option subnet-mask 255.255.255.0; option routers 192.168.2.1; } - """) + """ + ) expected = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15', - 'filename': 'http://192.168.2.50/boot.php?mac=${netX}'}, - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'filename': 'http://192.168.2.50/boot.php?mac=${netX}', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}] + { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + "filename": "http://192.168.2.50/boot.php?mac=${netX}", + }, + { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "filename": "http://192.168.2.50/boot.php?mac=${netX}", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + }, + ] write_file(lease_file, content) self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) class TestDHCPRFC3442(CiTestCase): - def test_parse_lease_finds_rfc3442_classless_static_routes(self): """parse_dhcp_lease_file returns rfc3442-classless-static-routes.""" - lease_file = self.tmp_path('leases') - content = dedent(""" + lease_file = self.tmp_path("leases") + content = dedent( + """ lease { interface "wlp3s0"; fixed-address 192.168.2.74; @@ -84,13 +104,19 @@ class TestDHCPRFC3442(CiTestCase): renew 4 2017/07/27 18:02:30; expire 5 2017/07/28 07:08:15; } - """) + """ + ) expected = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'rfc3442-classless-static-routes': '0,130,56,240,1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] + { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "rfc3442-classless-static-routes": "0,130,56,240,1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + ] write_file(lease_file, content) self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) @@ -99,8 +125,9 @@ class TestDHCPRFC3442(CiTestCase): parse_dhcp_lease_file returns classless-static-routes for Centos lease format. """ - lease_file = self.tmp_path('leases') - content = dedent(""" + lease_file = self.tmp_path("leases") + content = dedent( + """ lease { interface "wlp3s0"; fixed-address 192.168.2.74; @@ -110,61 +137,79 @@ class TestDHCPRFC3442(CiTestCase): renew 4 2017/07/27 18:02:30; expire 5 2017/07/28 07:08:15; } - """) + """ + ) expected = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'classless-static-routes': '0 130.56.240.1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] + { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "classless-static-routes": "0 130.56.240.1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + ] write_file(lease_file, content) self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network") + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network""" lease = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'rfc3442-classless-static-routes': '0,130,56,240,1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] + { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "rfc3442-classless-static-routes": "0,130,56,240,1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + ] m_maybe.return_value = lease eph = net.dhcp.EphemeralDHCPv4() eph.obtain_lease() expected_kwargs = { - 'interface': 'wlp3s0', - 'ip': '192.168.2.74', - 'prefix_or_mask': '255.255.255.0', - 'broadcast': '192.168.2.255', - 'static_routes': [('0.0.0.0/0', '130.56.240.1')], - 'router': '192.168.2.1'} + "interface": "wlp3s0", + "ip": "192.168.2.74", + "prefix_or_mask": "255.255.255.0", + "broadcast": "192.168.2.255", + "static_routes": [("0.0.0.0/0", "130.56.240.1")], + "router": "192.168.2.1", + } m_ipv4.assert_called_with(**expected_kwargs) - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network") + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4): """ EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network for Centos Lease format """ lease = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'classless-static-routes': '0 130.56.240.1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] + { + "interface": "wlp3s0", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + "classless-static-routes": "0 130.56.240.1", + "renew": "4 2017/07/27 18:02:30", + "expire": "5 2017/07/28 07:08:15", + } + ] m_maybe.return_value = lease eph = net.dhcp.EphemeralDHCPv4() eph.obtain_lease() expected_kwargs = { - 'interface': 'wlp3s0', - 'ip': '192.168.2.74', - 'prefix_or_mask': '255.255.255.0', - 'broadcast': '192.168.2.255', - 'static_routes': [('0.0.0.0/0', '130.56.240.1')], - 'router': '192.168.2.1'} + "interface": "wlp3s0", + "ip": "192.168.2.74", + "prefix_or_mask": "255.255.255.0", + "broadcast": "192.168.2.255", + "static_routes": [("0.0.0.0/0", "130.56.240.1")], + "router": "192.168.2.1", + } m_ipv4.assert_called_with(**expected_kwargs) @@ -185,34 +230,45 @@ class TestDHCPParseStaticRoutes(CiTestCase): def test_parse_static_routes_single_ip(self): rfc3442 = "32,169,254,169,254,130,56,248,255" - self.assertEqual([('169.254.169.254/32', '130.56.248.255')], - parse_static_routes(rfc3442)) + self.assertEqual( + [("169.254.169.254/32", "130.56.248.255")], + parse_static_routes(rfc3442), + ) def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): rfc3442 = "32,169,254,169,254,130,56,248,255;" - self.assertEqual([('169.254.169.254/32', '130.56.248.255')], - parse_static_routes(rfc3442)) + self.assertEqual( + [("169.254.169.254/32", "130.56.248.255")], + parse_static_routes(rfc3442), + ) def test_parse_static_routes_default_route(self): rfc3442 = "0,130,56,240,1" - self.assertEqual([('0.0.0.0/0', '130.56.240.1')], - parse_static_routes(rfc3442)) + self.assertEqual( + [("0.0.0.0/0", "130.56.240.1")], parse_static_routes(rfc3442) + ) def test_unspecified_gateway(self): rfc3442 = "32,169,254,169,254,0,0,0,0" - self.assertEqual([('169.254.169.254/32', '0.0.0.0')], - parse_static_routes(rfc3442)) + self.assertEqual( + [("169.254.169.254/32", "0.0.0.0")], parse_static_routes(rfc3442) + ) def test_parse_static_routes_class_c_b_a(self): class_c = "24,192,168,74,192,168,0,4" class_b = "16,172,16,172,16,0,4" class_a = "8,10,10,0,0,4" rfc3442 = ",".join([class_c, class_b, class_a]) - self.assertEqual(sorted([ - ("192.168.74.0/24", "192.168.0.4"), - ("172.16.0.0/16", "172.16.0.4"), - ("10.0.0.0/8", "10.0.0.4") - ]), sorted(parse_static_routes(rfc3442))) + self.assertEqual( + sorted( + [ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ("10.0.0.0/8", "10.0.0.4"), + ] + ), + sorted(parse_static_routes(rfc3442)), + ) def test_parse_static_routes_logs_error_truncated(self): bad_rfc3442 = { @@ -233,261 +289,341 @@ class TestDHCPParseStaticRoutes(CiTestCase): class_b = "16,172,16,172,16,0,4" class_a_error = "8,10,10,0,0" rfc3442 = ",".join([class_c, class_b, class_a_error]) - self.assertEqual(sorted([ - ("192.168.74.0/24", "192.168.0.4"), - ("172.16.0.0/16", "172.16.0.4"), - ]), sorted(parse_static_routes(rfc3442))) + self.assertEqual( + sorted( + [ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ] + ), + sorted(parse_static_routes(rfc3442)), + ) logs = self.logs.getvalue() self.assertIn(rfc3442, logs.splitlines()[0]) def test_redhat_format(self): redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1" - self.assertEqual(sorted([ - ("191.168.128.0/24", "192.168.128.1"), - ("0.0.0.0/0", "192.168.128.1") - ]), sorted(parse_static_routes(redhat_format))) + self.assertEqual( + sorted( + [ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1"), + ] + ), + sorted(parse_static_routes(redhat_format)), + ) def test_redhat_format_with_a_space_too_much_after_comma(self): redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1" - self.assertEqual(sorted([ - ("191.168.128.0/24", "192.168.128.1"), - ("0.0.0.0/0", "192.168.128.1") - ]), sorted(parse_static_routes(redhat_format))) + self.assertEqual( + sorted( + [ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1"), + ] + ), + sorted(parse_static_routes(redhat_format)), + ) class TestDHCPDiscoveryClean(CiTestCase): with_logs = True - @mock.patch('cloudinit.net.dhcp.find_fallback_nic') + @mock.patch("cloudinit.net.dhcp.find_fallback_nic") def test_no_fallback_nic_found(self, m_fallback_nic): """Log and do nothing when nic is absent and no fallback is found.""" m_fallback_nic.return_value = None # No fallback nic found self.assertEqual([], maybe_perform_dhcp_discovery()) self.assertIn( - 'Skip dhcp_discovery: Unable to find fallback nic.', - self.logs.getvalue()) + "Skip dhcp_discovery: Unable to find fallback nic.", + self.logs.getvalue(), + ) def test_provided_nic_does_not_exist(self): """When the provided nic doesn't exist, log a message and no-op.""" - self.assertEqual([], maybe_perform_dhcp_discovery('idontexist')) + self.assertEqual([], maybe_perform_dhcp_discovery("idontexist")) self.assertIn( - 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.', - self.logs.getvalue()) + "Skip dhcp_discovery: nic idontexist not found in get_devicelist.", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.net.dhcp.subp.which') - @mock.patch('cloudinit.net.dhcp.find_fallback_nic') + @mock.patch("cloudinit.net.dhcp.subp.which") + @mock.patch("cloudinit.net.dhcp.find_fallback_nic") def test_absent_dhclient_command(self, m_fallback, m_which): """When dhclient doesn't exist in the OS, log the issue and no-op.""" - m_fallback.return_value = 'eth9' + m_fallback.return_value = "eth9" m_which.return_value = None # dhclient isn't found self.assertEqual([], maybe_perform_dhcp_discovery()) self.assertIn( - 'Skip dhclient configuration: No dhclient command found.', - self.logs.getvalue()) - - @mock.patch('cloudinit.temp_utils.os.getuid') - @mock.patch('cloudinit.net.dhcp.dhcp_discovery') - @mock.patch('cloudinit.net.dhcp.subp.which') - @mock.patch('cloudinit.net.dhcp.find_fallback_nic') + "Skip dhclient configuration: No dhclient command found.", + self.logs.getvalue(), + ) + + @mock.patch("cloudinit.temp_utils.os.getuid") + @mock.patch("cloudinit.net.dhcp.dhcp_discovery") + @mock.patch("cloudinit.net.dhcp.subp.which") + @mock.patch("cloudinit.net.dhcp.find_fallback_nic") def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid): """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery.""" m_uid.return_value = 0 # Fake root user for tmpdir - m_fback.return_value = 'eth9' - m_which.return_value = '/sbin/dhclient' - m_dhcp.return_value = {'address': '192.168.2.2'} + m_fback.return_value = "eth9" + m_which.return_value = "/sbin/dhclient" + m_dhcp.return_value = {"address": "192.168.2.2"} retval = wrap_and_call( - 'cloudinit.temp_utils', - {'_TMPDIR': {'new': None}, - 'os.getuid': 0}, - maybe_perform_dhcp_discovery) - self.assertEqual({'address': '192.168.2.2'}, retval) + "cloudinit.temp_utils", + {"_TMPDIR": {"new": None}, "os.getuid": 0}, + maybe_perform_dhcp_discovery, + ) + self.assertEqual({"address": "192.168.2.2"}, retval) self.assertEqual( - 1, m_dhcp.call_count, 'dhcp_discovery not called once') + 1, m_dhcp.call_count, "dhcp_discovery not called once" + ) call = m_dhcp.call_args_list[0] - self.assertEqual('/sbin/dhclient', call[0][0]) - self.assertEqual('eth9', call[0][1]) - self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) - - @mock.patch('time.sleep', mock.MagicMock()) - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, - m_kill): + self.assertEqual("/sbin/dhclient", call[0][0]) + self.assertEqual("eth9", call[0][1]) + self.assertIn("/var/tmp/cloud-init/cloud-init-dhcp-", call[0][2]) + + @mock.patch("time.sleep", mock.MagicMock()) + @mock.patch("cloudinit.net.dhcp.os.kill") + @mock.patch("cloudinit.net.dhcp.subp.subp") + def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid( + self, m_subp, m_kill + ): """dhcp_discovery logs a warning when pidfile contains invalid content. Lease processing still occurs and no proc kill is attempted. """ - m_subp.return_value = ('', '') + m_subp.return_value = ("", "") tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' + dhclient_script = os.path.join(tmpdir, "dhclient.orig") + script_content = "#!/bin/bash\necho fake-dhclient" write_file(dhclient_script, script_content, mode=0o755) - write_file(self.tmp_path('dhclient.pid', tmpdir), '') # Empty pid '' - lease_content = dedent(""" + write_file(self.tmp_path("dhclient.pid", tmpdir), "") # Empty pid '' + lease_content = dedent( + """ lease { interface "eth9"; fixed-address 192.168.2.74; option subnet-mask 255.255.255.0; option routers 192.168.2.1; } - """) - write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content) + """ + ) + write_file(self.tmp_path("dhcp.leases", tmpdir), lease_content) self.assertCountEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + [ + { + "interface": "eth9", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } + ], + dhcp_discovery(dhclient_script, "eth9", tmpdir), + ) self.assertIn( "dhclient(pid=, parentpid=unknown) failed " "to daemonize after 10.0 seconds", - self.logs.getvalue()) + self.logs.getvalue(), + ) m_kill.assert_not_called() - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.util.wait_for_files') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, - m_subp, - m_wait, - m_kill, - m_getppid): + @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") + @mock.patch("cloudinit.net.dhcp.os.kill") + @mock.patch("cloudinit.net.dhcp.util.wait_for_files") + @mock.patch("cloudinit.net.dhcp.subp.subp") + def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid( + self, m_subp, m_wait, m_kill, m_getppid + ): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" - m_subp.return_value = ('', '') + m_subp.return_value = ("", "") tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' + dhclient_script = os.path.join(tmpdir, "dhclient.orig") + script_content = "#!/bin/bash\necho fake-dhclient" write_file(dhclient_script, script_content, mode=0o755) # Don't create pid or leases file - pidfile = self.tmp_path('dhclient.pid', tmpdir) - leasefile = self.tmp_path('dhcp.leases', tmpdir) + pidfile = self.tmp_path("dhclient.pid", tmpdir) + leasefile = self.tmp_path("dhcp.leases", tmpdir) m_wait.return_value = [pidfile] # Return the missing pidfile wait for m_getppid.return_value = 1 # Indicate that dhclient has daemonized - self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + self.assertEqual([], dhcp_discovery(dhclient_script, "eth9", tmpdir)) self.assertEqual( mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), - m_wait.call_args_list[0]) + m_wait.call_args_list[0], + ) self.assertIn( - 'WARNING: dhclient did not produce expected files: dhclient.pid', - self.logs.getvalue()) + "WARNING: dhclient did not produce expected files: dhclient.pid", + self.logs.getvalue(), + ) m_kill.assert_not_called() - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') + @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") + @mock.patch("cloudinit.net.dhcp.os.kill") + @mock.patch("cloudinit.net.dhcp.subp.subp") def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. """ - m_subp.return_value = ('', '') + m_subp.return_value = ("", "") tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' + dhclient_script = os.path.join(tmpdir, "dhclient.orig") + script_content = "#!/bin/bash\necho fake-dhclient" write_file(dhclient_script, script_content, mode=0o755) - lease_content = dedent(""" + lease_content = dedent( + """ lease { interface "eth9"; fixed-address 192.168.2.74; option subnet-mask 255.255.255.0; option routers 192.168.2.1; } - """) - lease_file = os.path.join(tmpdir, 'dhcp.leases') + """ + ) + lease_file = os.path.join(tmpdir, "dhcp.leases") write_file(lease_file, lease_content) - pid_file = os.path.join(tmpdir, 'dhclient.pid') + pid_file = os.path.join(tmpdir, "dhclient.pid") my_pid = 1 write_file(pid_file, "%d\n" % my_pid) m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertCountEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + [ + { + "interface": "eth9", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } + ], + dhcp_discovery(dhclient_script, "eth9", tmpdir), + ) # dhclient script got copied - with open(os.path.join(tmpdir, 'dhclient')) as stream: + with open(os.path.join(tmpdir, "dhclient")) as stream: self.assertEqual(script_content, stream.read()) # Interface was brought up before dhclient called from sandbox - m_subp.assert_has_calls([ - mock.call( - ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), - mock.call( - [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf', - lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), - 'eth9', '-sf', '/bin/true'], capture=True)]) + m_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "dev", "eth9", "up"], capture=True + ), + mock.call( + [ + os.path.join(tmpdir, "dhclient"), + "-1", + "-v", + "-lf", + lease_file, + "-pf", + os.path.join(tmpdir, "dhclient.pid"), + "eth9", + "-sf", + "/bin/true", + ], + capture=True, + ), + ] + ) m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') + @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") + @mock.patch("cloudinit.net.dhcp.os.kill") + @mock.patch("cloudinit.net.dhcp.subp.subp") def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. """ - m_subp.return_value = ('', '') + m_subp.return_value = ("", "") tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' + dhclient_script = os.path.join(tmpdir, "dhclient.orig") + script_content = "#!/bin/bash\necho fake-dhclient" write_file(dhclient_script, script_content, mode=0o755) - lease_content = dedent(""" + lease_content = dedent( + """ lease { interface "eth9"; fixed-address 192.168.2.74; option subnet-mask 255.255.255.0; option routers 192.168.2.1; } - """) - lease_file = os.path.join(tmpdir, 'dhcp.leases') + """ + ) + lease_file = os.path.join(tmpdir, "dhcp.leases") write_file(lease_file, lease_content) - pid_file = os.path.join(tmpdir, 'dhclient.pid') + pid_file = os.path.join(tmpdir, "dhclient.pid") my_pid = 1 write_file(pid_file, "%d\n" % my_pid) m_getppid.return_value = 1 # Indicate that dhclient has daemonized - with mock.patch('os.access', return_value=False): + with mock.patch("os.access", return_value=False): self.assertCountEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + [ + { + "interface": "eth9", + "fixed-address": "192.168.2.74", + "subnet-mask": "255.255.255.0", + "routers": "192.168.2.1", + } + ], + dhcp_discovery(dhclient_script, "eth9", tmpdir), + ) # dhclient script got copied - with open(os.path.join(tmpdir, 'dhclient.orig')) as stream: + with open(os.path.join(tmpdir, "dhclient.orig")) as stream: self.assertEqual(script_content, stream.read()) # Interface was brought up before dhclient called from sandbox - m_subp.assert_has_calls([ - mock.call( - ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), - mock.call( - [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf', - lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), - 'eth9', '-sf', '/bin/true'], capture=True)]) + m_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "dev", "eth9", "up"], capture=True + ), + mock.call( + [ + os.path.join(tmpdir, "dhclient.orig"), + "-1", + "-v", + "-lf", + lease_file, + "-pf", + os.path.join(tmpdir, "dhclient.pid"), + "eth9", + "-sf", + "/bin/true", + ], + capture=True, + ), + ] + ) m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') + @mock.patch("cloudinit.net.dhcp.util.get_proc_ppid") + @mock.patch("cloudinit.net.dhcp.os.kill") + @mock.patch("cloudinit.net.dhcp.subp.subp") def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid): - """"dhcp_log_func is called with the output and error streams of + """ "dhcp_log_func is called with the output and error streams of dhclinet when the callable is passed.""" - dhclient_err = 'FAKE DHCLIENT ERROR' - dhclient_out = 'FAKE DHCLIENT OUT' + dhclient_err = "FAKE DHCLIENT ERROR" + dhclient_out = "FAKE DHCLIENT OUT" m_subp.return_value = (dhclient_out, dhclient_err) tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' + dhclient_script = os.path.join(tmpdir, "dhclient.orig") + script_content = "#!/bin/bash\necho fake-dhclient" write_file(dhclient_script, script_content, mode=0o755) - lease_content = dedent(""" + lease_content = dedent( + """ lease { interface "eth9"; fixed-address 192.168.2.74; option subnet-mask 255.255.255.0; option routers 192.168.2.1; } - """) - lease_file = os.path.join(tmpdir, 'dhcp.leases') + """ + ) + lease_file = os.path.join(tmpdir, "dhcp.leases") write_file(lease_file, lease_content) - pid_file = os.path.join(tmpdir, 'dhclient.pid') + pid_file = os.path.join(tmpdir, "dhclient.pid") my_pid = 1 write_file(pid_file, "%d\n" % my_pid) m_getppid.return_value = 1 # Indicate that dhclient has daemonized @@ -497,12 +633,14 @@ class TestDHCPDiscoveryClean(CiTestCase): self.assertEqual(err, dhclient_err) dhcp_discovery( - dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func) + dhclient_script, "eth9", tmpdir, dhcp_log_func=dhcp_log_func + ) class TestSystemdParseLeases(CiTestCase): - lxd_lease = dedent("""\ + lxd_lease = dedent( + """\ # This is private data. Do not parse. ADDRESS=10.75.205.242 NETMASK=255.255.255.0 @@ -517,25 +655,27 @@ class TestSystemdParseLeases(CiTestCase): DOMAINNAME=lxd HOSTNAME=a1 CLIENTID=ffe617693400020000ab110c65a6a0866931c2 - """) + """ + ) lxd_parsed = { - 'ADDRESS': '10.75.205.242', - 'NETMASK': '255.255.255.0', - 'ROUTER': '10.75.205.1', - 'SERVER_ADDRESS': '10.75.205.1', - 'NEXT_SERVER': '10.75.205.1', - 'BROADCAST': '10.75.205.255', - 'T1': '1580', - 'T2': '2930', - 'LIFETIME': '3600', - 'DNS': '10.75.205.1', - 'DOMAINNAME': 'lxd', - 'HOSTNAME': 'a1', - 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2', + "ADDRESS": "10.75.205.242", + "NETMASK": "255.255.255.0", + "ROUTER": "10.75.205.1", + "SERVER_ADDRESS": "10.75.205.1", + "NEXT_SERVER": "10.75.205.1", + "BROADCAST": "10.75.205.255", + "T1": "1580", + "T2": "2930", + "LIFETIME": "3600", + "DNS": "10.75.205.1", + "DOMAINNAME": "lxd", + "HOSTNAME": "a1", + "CLIENTID": "ffe617693400020000ab110c65a6a0866931c2", } - azure_lease = dedent("""\ + azure_lease = dedent( + """\ # This is private data. Do not parse. ADDRESS=10.132.0.5 NETMASK=255.255.255.255 @@ -554,26 +694,28 @@ class TestSystemdParseLeases(CiTestCase): ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 CLIENTID=ff405663a200020000ab11332859494d7a8b4c OPTION_245=624c3620 - """) + """ + ) azure_parsed = { - 'ADDRESS': '10.132.0.5', - 'NETMASK': '255.255.255.255', - 'ROUTER': '10.132.0.1', - 'SERVER_ADDRESS': '169.254.169.254', - 'NEXT_SERVER': '10.132.0.1', - 'MTU': '1460', - 'T1': '43200', - 'T2': '75600', - 'LIFETIME': '86400', - 'DNS': '169.254.169.254', - 'NTP': '169.254.169.254', - 'DOMAINNAME': 'c.ubuntu-foundations.internal', - 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal', - 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal', - 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1', - 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c', - 'OPTION_245': '624c3620'} + "ADDRESS": "10.132.0.5", + "NETMASK": "255.255.255.255", + "ROUTER": "10.132.0.1", + "SERVER_ADDRESS": "169.254.169.254", + "NEXT_SERVER": "10.132.0.1", + "MTU": "1460", + "T1": "43200", + "T2": "75600", + "LIFETIME": "86400", + "DNS": "169.254.169.254", + "NTP": "169.254.169.254", + "DOMAINNAME": "c.ubuntu-foundations.internal", + "DOMAIN_SEARCH_LIST": "c.ubuntu-foundations.internal google.internal", + "HOSTNAME": "tribaal-test-171002-1349.c.ubuntu-foundations.internal", + "ROUTES": "10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1", + "CLIENTID": "ff405663a200020000ab11332859494d7a8b4c", + "OPTION_245": "624c3620", + } def setUp(self): super(TestSystemdParseLeases, self).setUp() @@ -585,63 +727,71 @@ class TestSystemdParseLeases(CiTestCase): def test_no_leases_dir_returns_empty_dict(self): """A non-existing leases dir should return empty dict.""" - enodir = os.path.join(self.lease_d, 'does-not-exist') + enodir = os.path.join(self.lease_d, "does-not-exist") self.assertEqual({}, networkd_load_leases(enodir)) def test_single_leases_file(self): """A leases dir with one leases file.""" - populate_dir(self.lease_d, {'2': self.lxd_lease}) + populate_dir(self.lease_d, {"2": self.lxd_lease}) self.assertEqual( - {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d)) + {"2": self.lxd_parsed}, networkd_load_leases(self.lease_d) + ) def test_single_azure_leases_file(self): """On Azure, option 245 should be present, verify it specifically.""" - populate_dir(self.lease_d, {'1': self.azure_lease}) + populate_dir(self.lease_d, {"1": self.azure_lease}) self.assertEqual( - {'1': self.azure_parsed}, networkd_load_leases(self.lease_d)) + {"1": self.azure_parsed}, networkd_load_leases(self.lease_d) + ) def test_multiple_files(self): """Multiple leases files on azure with one found return that value.""" self.maxDiff = None - populate_dir(self.lease_d, {'1': self.azure_lease, - '9': self.lxd_lease}) - self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed}, - networkd_load_leases(self.lease_d)) + populate_dir( + self.lease_d, {"1": self.azure_lease, "9": self.lxd_lease} + ) + self.assertEqual( + {"1": self.azure_parsed, "9": self.lxd_parsed}, + networkd_load_leases(self.lease_d), + ) class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): - - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp): """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" - url = 'http://example.org/index.html' + url = "http://example.org/index.html" httpretty.register_uri(httpretty.GET, url) with net.dhcp.EphemeralDHCPv4( - connectivity_url_data={'url': url}, + connectivity_url_data={"url": url}, ) as lease: self.assertIsNone(lease) # Ensure that no teardown happens: m_dhcp.assert_not_called() - @mock.patch('cloudinit.net.dhcp.subp.subp') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch("cloudinit.net.dhcp.subp.subp") + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_ephemeral_dhcp_setup_network_if_url_connectivity( - self, m_dhcp, m_subp): + self, m_dhcp, m_subp + ): """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" - url = 'http://example.org/index.html' + url = "http://example.org/index.html" fake_lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.2', - 'subnet-mask': '255.255.0.0'} + "interface": "eth9", + "fixed-address": "192.168.2.2", + "subnet-mask": "255.255.0.0", + } m_dhcp.return_value = [fake_lease] - m_subp.return_value = ('', '') + m_subp.return_value = ("", "") httpretty.register_uri(httpretty.GET, url, body={}, status=404) with net.dhcp.EphemeralDHCPv4( - connectivity_url_data={'url': url}, + connectivity_url_data={"url": url}, ) as lease: self.assertEqual(fake_lease, lease) # Ensure that dhcp discovery occurs m_dhcp.called_once_with() + # vi: ts=4 expandtab diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py index 82854ab3..b245da94 100644 --- a/tests/unittests/net/test_init.py +++ b/tests/unittests/net/test_init.py @@ -13,24 +13,23 @@ import requests import cloudinit.net as net from cloudinit import safeyaml as yaml -from tests.unittests.helpers import CiTestCase, HttprettyTestCase from cloudinit.subp import ProcessExecutionError from cloudinit.util import ensure_file, write_file +from tests.unittests.helpers import CiTestCase, HttprettyTestCase class TestSysDevPath(CiTestCase): - def test_sys_dev_path(self): """sys_dev_path returns a path under SYS_CLASS_NET for a device.""" - dev = 'something' - path = 'attribute' - expected = net.SYS_CLASS_NET + dev + '/' + path + dev = "something" + path = "attribute" + expected = net.SYS_CLASS_NET + dev + "/" + path self.assertEqual(expected, net.sys_dev_path(dev, path)) def test_sys_dev_path_without_path(self): """When path param isn't provided it defaults to empty string.""" - dev = 'something' - expected = net.SYS_CLASS_NET + dev + '/' + dev = "something" + expected = net.SYS_CLASS_NET + dev + "/" self.assertEqual(expected, net.sys_dev_path(dev)) @@ -39,25 +38,25 @@ class TestReadSysNet(CiTestCase): def setUp(self): super(TestReadSysNet, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + sys_mock = mock.patch("cloudinit.net.get_sys_class_path") self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' + self.sysdir = self.tmp_dir() + "/" self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) def test_read_sys_net_strips_contents_of_sys_path(self): """read_sys_net strips whitespace from the contents of a sys file.""" - content = 'some stuff with trailing whitespace\t\r\n' - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - self.assertEqual(content.strip(), net.read_sys_net('dev', 'attr')) + content = "some stuff with trailing whitespace\t\r\n" + write_file(os.path.join(self.sysdir, "dev", "attr"), content) + self.assertEqual(content.strip(), net.read_sys_net("dev", "attr")) def test_read_sys_net_reraises_oserror(self): """read_sys_net raises OSError/IOError when file doesn't exist.""" # Non-specific Exception because versions of python OSError vs IOError. with self.assertRaises(Exception) as context_manager: # noqa: H202 - net.read_sys_net('dev', 'attr') + net.read_sys_net("dev", "attr") error = context_manager.exception - self.assertIn('No such file or directory', str(error)) + self.assertIn("No such file or directory", str(error)) def test_read_sys_net_handles_error_with_on_enoent(self): """read_sys_net handles OSError/IOError with on_enoent if provided.""" @@ -66,43 +65,44 @@ class TestReadSysNet(CiTestCase): def on_enoent(e): handled_errors.append(e) - net.read_sys_net('dev', 'attr', on_enoent=on_enoent) + net.read_sys_net("dev", "attr", on_enoent=on_enoent) error = handled_errors[0] self.assertIsInstance(error, Exception) - self.assertIn('No such file or directory', str(error)) + self.assertIn("No such file or directory", str(error)) def test_read_sys_net_translates_content(self): """read_sys_net translates content when translate dict is provided.""" content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - translate = {"you're welcome": 'de nada'} + write_file(os.path.join(self.sysdir, "dev", "attr"), content) + translate = {"you're welcome": "de nada"} self.assertEqual( - 'de nada', - net.read_sys_net('dev', 'attr', translate=translate)) + "de nada", net.read_sys_net("dev", "attr", translate=translate) + ) def test_read_sys_net_errors_on_translation_failures(self): """read_sys_net raises a KeyError and logs details on failure.""" content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) + write_file(os.path.join(self.sysdir, "dev", "attr"), content) with self.assertRaises(KeyError) as context_manager: - net.read_sys_net('dev', 'attr', translate={}) + net.read_sys_net("dev", "attr", translate={}) error = context_manager.exception self.assertEqual('"you\'re welcome"', str(error)) self.assertIn( "Found unexpected (not translatable) value 'you're welcome' in " "'{0}dev/attr".format(self.sysdir), - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_read_sys_net_handles_handles_with_onkeyerror(self): """read_sys_net handles translation errors calling on_keyerror.""" content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) + write_file(os.path.join(self.sysdir, "dev", "attr"), content) handled_errors = [] def on_keyerror(e): handled_errors.append(e) - net.read_sys_net('dev', 'attr', translate={}, on_keyerror=on_keyerror) + net.read_sys_net("dev", "attr", translate={}, on_keyerror=on_keyerror) error = handled_errors[0] self.assertIsInstance(error, KeyError) self.assertEqual('"you\'re welcome"', str(error)) @@ -110,274 +110,308 @@ class TestReadSysNet(CiTestCase): def test_read_sys_net_safe_false_on_translate_failure(self): """read_sys_net_safe returns False on translation failures.""" content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - self.assertFalse(net.read_sys_net_safe('dev', 'attr', translate={})) + write_file(os.path.join(self.sysdir, "dev", "attr"), content) + self.assertFalse(net.read_sys_net_safe("dev", "attr", translate={})) def test_read_sys_net_safe_returns_false_on_noent_failure(self): """read_sys_net_safe returns False on file not found failures.""" - self.assertFalse(net.read_sys_net_safe('dev', 'attr')) + self.assertFalse(net.read_sys_net_safe("dev", "attr")) def test_read_sys_net_int_returns_none_on_error(self): """read_sys_net_safe returns None on failures.""" - self.assertFalse(net.read_sys_net_int('dev', 'attr')) + self.assertFalse(net.read_sys_net_int("dev", "attr")) def test_read_sys_net_int_returns_none_on_valueerror(self): """read_sys_net_safe returns None when content is not an int.""" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), 'NOTINT\n') - self.assertFalse(net.read_sys_net_int('dev', 'attr')) + write_file(os.path.join(self.sysdir, "dev", "attr"), "NOTINT\n") + self.assertFalse(net.read_sys_net_int("dev", "attr")) def test_read_sys_net_int_returns_integer_from_content(self): """read_sys_net_safe returns None on failures.""" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), '1\n') - self.assertEqual(1, net.read_sys_net_int('dev', 'attr')) + write_file(os.path.join(self.sysdir, "dev", "attr"), "1\n") + self.assertEqual(1, net.read_sys_net_int("dev", "attr")) def test_is_up_true(self): """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'.""" - for state in ['up', 'unknown']: - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) - self.assertTrue(net.is_up('eth0')) + for state in ["up", "unknown"]: + write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) + self.assertTrue(net.is_up("eth0")) def test_is_up_false(self): """is_up is False if sys/net/devname/operstate is 'down' or invalid.""" - for state in ['down', 'incomprehensible']: - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) - self.assertFalse(net.is_up('eth0')) + for state in ["down", "incomprehensible"]: + write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) + self.assertFalse(net.is_up("eth0")) def test_is_bridge(self): """is_bridge is True when /sys/net/devname/bridge exists.""" - self.assertFalse(net.is_bridge('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge')) - self.assertTrue(net.is_bridge('eth0')) + self.assertFalse(net.is_bridge("eth0")) + ensure_file(os.path.join(self.sysdir, "eth0", "bridge")) + self.assertTrue(net.is_bridge("eth0")) def test_is_bond(self): """is_bond is True when /sys/net/devname/bonding exists.""" - self.assertFalse(net.is_bond('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) - self.assertTrue(net.is_bond('eth0')) + self.assertFalse(net.is_bond("eth0")) + ensure_file(os.path.join(self.sysdir, "eth0", "bonding")) + self.assertTrue(net.is_bond("eth0")) def test_get_master(self): """get_master returns the path when /sys/net/devname/master exists.""" - self.assertIsNone(net.get_master('enP1s1')) - master_path = os.path.join(self.sysdir, 'enP1s1', 'master') + self.assertIsNone(net.get_master("enP1s1")) + master_path = os.path.join(self.sysdir, "enP1s1", "master") ensure_file(master_path) - self.assertEqual(master_path, net.get_master('enP1s1')) + self.assertEqual(master_path, net.get_master("enP1s1")) def test_master_is_bridge_or_bond(self): - bridge_mac = 'aa:bb:cc:aa:bb:cc' - bond_mac = 'cc:bb:aa:cc:bb:aa' + bridge_mac = "aa:bb:cc:aa:bb:cc" + bond_mac = "cc:bb:aa:cc:bb:aa" # No master => False - write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) - write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) + write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac) + write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac) - self.assertFalse(net.master_is_bridge_or_bond('eth1')) - self.assertFalse(net.master_is_bridge_or_bond('eth2')) + self.assertFalse(net.master_is_bridge_or_bond("eth1")) + self.assertFalse(net.master_is_bridge_or_bond("eth2")) # masters without bridge/bonding => False - write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) - write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) + write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac) + write_file(os.path.join(self.sysdir, "bond0", "address"), bond_mac) - os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) - os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) + os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master")) + os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master")) - self.assertFalse(net.master_is_bridge_or_bond('eth1')) - self.assertFalse(net.master_is_bridge_or_bond('eth2')) + self.assertFalse(net.master_is_bridge_or_bond("eth1")) + self.assertFalse(net.master_is_bridge_or_bond("eth2")) # masters with bridge/bonding => True - write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') - write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') + write_file(os.path.join(self.sysdir, "br0", "bridge"), "") + write_file(os.path.join(self.sysdir, "bond0", "bonding"), "") - self.assertTrue(net.master_is_bridge_or_bond('eth1')) - self.assertTrue(net.master_is_bridge_or_bond('eth2')) + self.assertTrue(net.master_is_bridge_or_bond("eth1")) + self.assertTrue(net.master_is_bridge_or_bond("eth2")) def test_master_is_openvswitch(self): - ovs_mac = 'bb:cc:aa:bb:cc:aa' + ovs_mac = "bb:cc:aa:bb:cc:aa" # No master => False - write_file(os.path.join(self.sysdir, 'eth1', 'address'), ovs_mac) + write_file(os.path.join(self.sysdir, "eth1", "address"), ovs_mac) - self.assertFalse(net.master_is_bridge_or_bond('eth1')) + self.assertFalse(net.master_is_bridge_or_bond("eth1")) # masters without ovs-system => False - write_file(os.path.join(self.sysdir, 'ovs-system', 'address'), ovs_mac) + write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac) - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1', - 'master')) + os.symlink( + "../ovs-system", os.path.join(self.sysdir, "eth1", "master") + ) - self.assertFalse(net.master_is_openvswitch('eth1')) + self.assertFalse(net.master_is_openvswitch("eth1")) # masters with ovs-system => True - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1', - 'upper_ovs-system')) + os.symlink( + "../ovs-system", + os.path.join(self.sysdir, "eth1", "upper_ovs-system"), + ) - self.assertTrue(net.master_is_openvswitch('eth1')) + self.assertTrue(net.master_is_openvswitch("eth1")) def test_is_vlan(self): """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan.""" - ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent')) - self.assertFalse(net.is_vlan('eth0')) - content = 'junk\nDEVTYPE=vlan\njunk\n' - write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content) - self.assertTrue(net.is_vlan('eth0')) + ensure_file(os.path.join(self.sysdir, "eth0", "uevent")) + self.assertFalse(net.is_vlan("eth0")) + content = "junk\nDEVTYPE=vlan\njunk\n" + write_file(os.path.join(self.sysdir, "eth0", "uevent"), content) + self.assertTrue(net.is_vlan("eth0")) class TestGenerateFallbackConfig(CiTestCase): - def setUp(self): super(TestGenerateFallbackConfig, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + sys_mock = mock.patch("cloudinit.net.get_sys_class_path") self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' + self.sysdir = self.tmp_dir() + "/" self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) - self.add_patch('cloudinit.net.util.is_container', 'm_is_container', - return_value=False) - self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') - self.add_patch('cloudinit.net.is_netfailover', 'm_netfail', - return_value=False) - self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master', - return_value=False) + self.add_patch( + "cloudinit.net.util.is_container", + "m_is_container", + return_value=False, + ) + self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle") + self.add_patch( + "cloudinit.net.is_netfailover", "m_netfail", return_value=False + ) + self.add_patch( + "cloudinit.net.is_netfail_master", + "m_netfail_master", + return_value=False, + ) def test_generate_fallback_finds_connected_eth_with_mac(self): """generate_fallback_config finds any connected device with a mac.""" - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) + write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1") + write_file(os.path.join(self.sysdir, "eth1", "carrier"), "1") + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth1", "address"), mac) expected = { - 'ethernets': {'eth1': {'match': {'macaddress': mac}, - 'dhcp4': True, 'set-name': 'eth1'}}, - 'version': 2} + "ethernets": { + "eth1": { + "match": {"macaddress": mac}, + "dhcp4": True, + "set-name": "eth1", + } + }, + "version": 2, + } self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_dormant_eth_with_mac(self): """generate_fallback_config finds any dormant device with a mac.""" - write_file(os.path.join(self.sysdir, 'eth0', 'dormant'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) + write_file(os.path.join(self.sysdir, "eth0", "dormant"), "1") + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth0", "address"), mac) expected = { - 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, - 'set-name': 'eth0'}}, - 'version': 2} + "ethernets": { + "eth0": { + "match": {"macaddress": mac}, + "dhcp4": True, + "set-name": "eth0", + } + }, + "version": 2, + } self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_eth_by_operstate(self): """generate_fallback_config finds any dormant device with a mac.""" - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth0", "address"), mac) expected = { - 'ethernets': { - 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, - 'set-name': 'eth0'}}, - 'version': 2} - valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] + "ethernets": { + "eth0": { + "dhcp4": True, + "match": {"macaddress": mac}, + "set-name": "eth0", + } + }, + "version": 2, + } + valid_operstates = ["dormant", "down", "lowerlayerdown", "unknown"] for state in valid_operstates: - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) + write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) self.assertEqual(expected, net.generate_fallback_config()) - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'noworky') + write_file(os.path.join(self.sysdir, "eth0", "operstate"), "noworky") self.assertIsNone(net.generate_fallback_config()) def test_generate_fallback_config_skips_veth(self): """generate_fallback_config will skip any veth interfaces.""" # A connected veth which gets ignored - write_file(os.path.join(self.sysdir, 'veth0', 'carrier'), '1') + write_file(os.path.join(self.sysdir, "veth0", "carrier"), "1") self.assertIsNone(net.generate_fallback_config()) def test_generate_fallback_config_skips_bridges(self): """generate_fallback_config will skip any bridges interfaces.""" # A connected veth which gets ignored - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge')) + write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1") + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth0", "address"), mac) + ensure_file(os.path.join(self.sysdir, "eth0", "bridge")) self.assertIsNone(net.generate_fallback_config()) def test_generate_fallback_config_skips_bonds(self): """generate_fallback_config will skip any bonded interfaces.""" # A connected veth which gets ignored - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) + write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1") + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth0", "address"), mac) + ensure_file(os.path.join(self.sysdir, "eth0", "bonding")) self.assertIsNone(net.generate_fallback_config()) def test_generate_fallback_config_skips_netfail_devs(self): """gen_fallback_config ignores netfail primary,sby no mac on master.""" - mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac - for iface in ['ens3', 'ens3sby', 'enP0s1f3']: - write_file(os.path.join(self.sysdir, iface, 'carrier'), '1') - write_file( - os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') + mac = "aa:bb:cc:aa:bb:cc" # netfailover devs share the same mac + for iface in ["ens3", "ens3sby", "enP0s1f3"]: + write_file(os.path.join(self.sysdir, iface, "carrier"), "1") write_file( - os.path.join(self.sysdir, iface, 'address'), mac) + os.path.join(self.sysdir, iface, "addr_assign_type"), "0" + ) + write_file(os.path.join(self.sysdir, iface, "address"), mac) def is_netfail(iface, _driver=None): # ens3 is the master - if iface == 'ens3': + if iface == "ens3": return False return True + self.m_netfail.side_effect = is_netfail def is_netfail_master(iface, _driver=None): # ens3 is the master - if iface == 'ens3': + if iface == "ens3": return True return False + self.m_netfail_master.side_effect = is_netfail_master expected = { - 'ethernets': { - 'ens3': {'dhcp4': True, 'match': {'name': 'ens3'}, - 'set-name': 'ens3'}}, - 'version': 2} + "ethernets": { + "ens3": { + "dhcp4": True, + "match": {"name": "ens3"}, + "set-name": "ens3", + } + }, + "version": 2, + } result = net.generate_fallback_config() self.assertEqual(expected, result) class TestNetFindFallBackNic(CiTestCase): - def setUp(self): super(TestNetFindFallBackNic, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + sys_mock = mock.patch("cloudinit.net.get_sys_class_path") self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' + self.sysdir = self.tmp_dir() + "/" self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) - self.add_patch('cloudinit.net.util.is_container', 'm_is_container', - return_value=False) - self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') + self.add_patch( + "cloudinit.net.util.is_container", + "m_is_container", + return_value=False, + ) + self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle") def test_generate_fallback_finds_first_connected_eth_with_mac(self): """find_fallback_nic finds any connected device with a mac.""" - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) - self.assertEqual('eth1', net.find_fallback_nic()) + write_file(os.path.join(self.sysdir, "eth0", "carrier"), "1") + write_file(os.path.join(self.sysdir, "eth1", "carrier"), "1") + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth1", "address"), mac) + self.assertEqual("eth1", net.find_fallback_nic()) class TestGetDeviceList(CiTestCase): - def setUp(self): super(TestGetDeviceList, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + sys_mock = mock.patch("cloudinit.net.get_sys_class_path") self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' + self.sysdir = self.tmp_dir() + "/" self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) def test_get_devicelist_raise_oserror(self): """get_devicelist raise any non-ENOENT OSerror.""" - error = OSError('Can not do it') + error = OSError("Can not do it") error.errno = errno.EPERM # Set non-ENOENT self.m_sys_path.side_effect = error with self.assertRaises(OSError) as context_manager: net.get_devicelist() exception = context_manager.exception - self.assertEqual('Can not do it', str(exception)) + self.assertEqual("Can not do it", str(exception)) def test_get_devicelist_empty_without_sys_net(self): """get_devicelist returns empty list when missing SYS_CLASS_NET.""" - self.m_sys_path.return_value = 'idontexist' + self.m_sys_path.return_value = "idontexist" self.assertEqual([], net.get_devicelist()) def test_get_devicelist_empty_with_no_devices_in_sys_net(self): @@ -386,9 +420,9 @@ class TestGetDeviceList(CiTestCase): def test_get_devicelist_lists_any_subdirectories_in_sys_net(self): """get_devicelist returns a directory listing for SYS_CLASS_NET.""" - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up') - write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up') - self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) + write_file(os.path.join(self.sysdir, "eth0", "operstate"), "up") + write_file(os.path.join(self.sysdir, "eth1", "operstate"), "up") + self.assertCountEqual(["eth0", "eth1"], net.get_devicelist()) @mock.patch( @@ -396,239 +430,288 @@ class TestGetDeviceList(CiTestCase): mock.Mock(return_value=False), ) class TestGetInterfaceMAC(CiTestCase): - def setUp(self): super(TestGetInterfaceMAC, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + sys_mock = mock.patch("cloudinit.net.get_sys_class_path") self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' + self.sysdir = self.tmp_dir() + "/" self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) def test_get_interface_mac_false_with_no_mac(self): """get_device_list returns False when no mac is reported.""" - ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) - mac_path = os.path.join(self.sysdir, 'eth0', 'address') + ensure_file(os.path.join(self.sysdir, "eth0", "bonding")) + mac_path = os.path.join(self.sysdir, "eth0", "address") self.assertFalse(os.path.exists(mac_path)) - self.assertFalse(net.get_interface_mac('eth0')) + self.assertFalse(net.get_interface_mac("eth0")) def test_get_interface_mac(self): """get_interfaces returns the mac from SYS_CLASS_NET/dev/address.""" - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) - self.assertEqual(mac, net.get_interface_mac('eth1')) + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth1", "address"), mac) + self.assertEqual(mac, net.get_interface_mac("eth1")) def test_get_interface_mac_grabs_bonding_address(self): """get_interfaces returns the source device mac for bonded devices.""" - source_dev_mac = 'aa:bb:cc:aa:bb:cc' - bonded_mac = 'dd:ee:ff:dd:ee:ff' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), bonded_mac) + source_dev_mac = "aa:bb:cc:aa:bb:cc" + bonded_mac = "dd:ee:ff:dd:ee:ff" + write_file(os.path.join(self.sysdir, "eth1", "address"), bonded_mac) write_file( - os.path.join(self.sysdir, 'eth1', 'bonding_slave', 'perm_hwaddr'), - source_dev_mac) - self.assertEqual(source_dev_mac, net.get_interface_mac('eth1')) + os.path.join(self.sysdir, "eth1", "bonding_slave", "perm_hwaddr"), + source_dev_mac, + ) + self.assertEqual(source_dev_mac, net.get_interface_mac("eth1")) def test_get_interfaces_empty_list_without_sys_net(self): """get_interfaces returns an empty list when missing SYS_CLASS_NET.""" - self.m_sys_path.return_value = 'idontexist' + self.m_sys_path.return_value = "idontexist" self.assertEqual([], net.get_interfaces()) def test_get_interfaces_by_mac_skips_empty_mac(self): """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac.""" - empty_mac = '00:00:00:00:00:00' - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), empty_mac) - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac) - expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] + empty_mac = "00:00:00:00:00:00" + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth1", "address"), empty_mac) + write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0") + write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0") + write_file(os.path.join(self.sysdir, "eth2", "address"), mac) + expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)] self.assertEqual(expected, net.get_interfaces()) def test_get_interfaces_by_mac_skips_missing_mac(self): """Ignore interfaces without an address from get_interfaces_by_mac.""" - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') - address_path = os.path.join(self.sysdir, 'eth1', 'address') + write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0") + address_path = os.path.join(self.sysdir, "eth1", "address") self.assertFalse(os.path.exists(address_path)) - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac) - expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] + mac = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0") + write_file(os.path.join(self.sysdir, "eth2", "address"), mac) + expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)] self.assertEqual(expected, net.get_interfaces()) def test_get_interfaces_by_mac_skips_master_devs(self): """Ignore interfaces with a master device which would have dup mac.""" - mac1 = mac2 = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac1) - write_file(os.path.join(self.sysdir, 'eth1', 'master'), "blah") - write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac2) - expected = [('eth2', mac2, None, None)] + mac1 = mac2 = "aa:bb:cc:aa:bb:cc" + write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0") + write_file(os.path.join(self.sysdir, "eth1", "address"), mac1) + write_file(os.path.join(self.sysdir, "eth1", "master"), "blah") + write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0") + write_file(os.path.join(self.sysdir, "eth2", "address"), mac2) + expected = [("eth2", mac2, None, None)] self.assertEqual(expected, net.get_interfaces()) - @mock.patch('cloudinit.net.is_netfailover') + @mock.patch("cloudinit.net.is_netfailover") def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail): """Ignore interfaces if netfailover primary or standby.""" - mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac - for iface in ['ens3', 'ens3sby', 'enP0s1f3']: + mac = "aa:bb:cc:aa:bb:cc" # netfailover devs share the same mac + for iface in ["ens3", "ens3sby", "enP0s1f3"]: write_file( - os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') - write_file( - os.path.join(self.sysdir, iface, 'address'), mac) + os.path.join(self.sysdir, iface, "addr_assign_type"), "0" + ) + write_file(os.path.join(self.sysdir, iface, "address"), mac) def is_netfail(iface, _driver=None): # ens3 is the master - if iface == 'ens3': + if iface == "ens3": return False else: return True + m_netfail.side_effect = is_netfail - expected = [('ens3', mac, None, None)] + expected = [("ens3", mac, None, None)] self.assertEqual(expected, net.get_interfaces()) def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds( - self + self, ): - bridge_mac = 'aa:bb:cc:aa:bb:cc' - bond_mac = 'cc:bb:aa:cc:bb:aa' - ovs_mac = 'bb:cc:aa:bb:cc:aa' + bridge_mac = "aa:bb:cc:aa:bb:cc" + bond_mac = "cc:bb:aa:cc:bb:aa" + ovs_mac = "bb:cc:aa:bb:cc:aa" - write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) - write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') + write_file(os.path.join(self.sysdir, "br0", "address"), bridge_mac) + write_file(os.path.join(self.sysdir, "br0", "bridge"), "") - write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) - write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') + write_file(os.path.join(self.sysdir, "bond0", "address"), bond_mac) + write_file(os.path.join(self.sysdir, "bond0", "bonding"), "") - write_file(os.path.join(self.sysdir, 'ovs-system', 'address'), - ovs_mac) + write_file(os.path.join(self.sysdir, "ovs-system", "address"), ovs_mac) - write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) - os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) + write_file(os.path.join(self.sysdir, "eth1", "address"), bridge_mac) + os.symlink("../br0", os.path.join(self.sysdir, "eth1", "master")) - write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) - os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) + write_file(os.path.join(self.sysdir, "eth2", "address"), bond_mac) + os.symlink("../bond0", os.path.join(self.sysdir, "eth2", "master")) - write_file(os.path.join(self.sysdir, 'eth3', 'address'), ovs_mac) - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3', - 'master')) - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3', - 'upper_ovs-system')) + write_file(os.path.join(self.sysdir, "eth3", "address"), ovs_mac) + os.symlink( + "../ovs-system", os.path.join(self.sysdir, "eth3", "master") + ) + os.symlink( + "../ovs-system", + os.path.join(self.sysdir, "eth3", "upper_ovs-system"), + ) interface_names = [interface[0] for interface in net.get_interfaces()] - self.assertEqual(['eth1', 'eth2', 'eth3', 'ovs-system'], - sorted(interface_names)) + self.assertEqual( + ["eth1", "eth2", "eth3", "ovs-system"], sorted(interface_names) + ) class TestInterfaceHasOwnMAC(CiTestCase): - def setUp(self): super(TestInterfaceHasOwnMAC, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + sys_mock = mock.patch("cloudinit.net.get_sys_class_path") self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' + self.sysdir = self.tmp_dir() + "/" self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) def test_interface_has_own_mac_false_when_stolen(self): """Return False from interface_has_own_mac when address is stolen.""" - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '2') - self.assertFalse(net.interface_has_own_mac('eth1')) + write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "2") + self.assertFalse(net.interface_has_own_mac("eth1")) def test_interface_has_own_mac_true_when_not_stolen(self): """Return False from interface_has_own_mac when mac isn't stolen.""" - valid_assign_types = ['0', '1', '3'] - assign_path = os.path.join(self.sysdir, 'eth1', 'addr_assign_type') + valid_assign_types = ["0", "1", "3"] + assign_path = os.path.join(self.sysdir, "eth1", "addr_assign_type") for _type in valid_assign_types: write_file(assign_path, _type) - self.assertTrue(net.interface_has_own_mac('eth1')) + self.assertTrue(net.interface_has_own_mac("eth1")) def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self): """When addr_assign_type is absent, interface_has_own_mac errors.""" with self.assertRaises(ValueError): - net.interface_has_own_mac('eth1', strict=True) + net.interface_has_own_mac("eth1", strict=True) -@mock.patch('cloudinit.net.subp.subp') +@mock.patch("cloudinit.net.subp.subp") class TestEphemeralIPV4Network(CiTestCase): with_logs = True def setUp(self): super(TestEphemeralIPV4Network, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + sys_mock = mock.patch("cloudinit.net.get_sys_class_path") self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' + self.sysdir = self.tmp_dir() + "/" self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp): """No required params for EphemeralIPv4Network can be None.""" required_params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} + "interface": "eth0", + "ip": "192.168.2.2", + "prefix_or_mask": "255.255.255.0", + "broadcast": "192.168.2.255", + } for key in required_params.keys(): params = copy.deepcopy(required_params) params[key] = None with self.assertRaises(ValueError) as context_manager: net.EphemeralIPv4Network(**params) error = context_manager.exception - self.assertIn('Cannot init network on', str(error)) + self.assertIn("Cannot init network on", str(error)) self.assertEqual(0, m_subp.call_count) def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): """Raise an error when prefix_or_mask is not a netmask or prefix.""" params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'broadcast': '192.168.2.255'} - invalid_masks = ('invalid', 'invalid.', '123.123.123') + "interface": "eth0", + "ip": "192.168.2.2", + "broadcast": "192.168.2.255", + } + invalid_masks = ("invalid", "invalid.", "123.123.123") for error_val in invalid_masks: - params['prefix_or_mask'] = error_val + params["prefix_or_mask"] = error_val with self.assertRaises(ValueError) as context_manager: with net.EphemeralIPv4Network(**params): pass error = context_manager.exception - self.assertIn('Cannot setup network: netmask', str(error)) + self.assertIn("Cannot setup network: netmask", str(error)) self.assertEqual(0, m_subp.call_count) def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): """EphemeralIPv4Network performs teardown on the device if setup.""" expected_setup_calls = [ mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'}), + [ + "ip", + "-family", + "inet", + "addr", + "add", + "192.168.2.2/24", + "broadcast", + "192.168.2.255", + "dev", + "eth0", + ], + capture=True, + update_env={"LANG": "C"}, + ), mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], - capture=True)] + ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"], + capture=True, + ), + ] expected_teardown_calls = [ mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', - 'down'], capture=True), + [ + "ip", + "-family", + "inet", + "link", + "set", + "dev", + "eth0", + "down", + ], + capture=True, + ), mock.call( - ['ip', '-family', 'inet', 'addr', 'del', '192.168.2.2/24', - 'dev', 'eth0'], capture=True)] + [ + "ip", + "-family", + "inet", + "addr", + "del", + "192.168.2.2/24", + "dev", + "eth0", + ], + capture=True, + ), + ] params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} + "interface": "eth0", + "ip": "192.168.2.2", + "prefix_or_mask": "255.255.255.0", + "broadcast": "192.168.2.255", + } with net.EphemeralIPv4Network(**params): self.assertEqual(expected_setup_calls, m_subp.call_args_list) m_subp.assert_has_calls(expected_teardown_calls) - @mock.patch('cloudinit.net.readurl') + @mock.patch("cloudinit.net.readurl") def test_ephemeral_ipv4_no_network_if_url_connectivity( - self, m_readurl, m_subp): + self, m_readurl, m_subp + ): """No network setup is performed if we can successfully connect to connectivity_url.""" params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', - 'connectivity_url_data': {'url': 'http://example.org/index.html'} + "interface": "eth0", + "ip": "192.168.2.2", + "prefix_or_mask": "255.255.255.0", + "broadcast": "192.168.2.255", + "connectivity_url_data": {"url": "http://example.org/index.html"}, } with net.EphemeralIPv4Network(**params): self.assertEqual( - [mock.call(url='http://example.org/index.html', timeout=5)], - m_readurl.call_args_list + [mock.call(url="http://example.org/index.html", timeout=5)], + m_readurl.call_args_list, ) # Ensure that no teardown happens: m_subp.assert_has_calls([]) @@ -639,67 +722,173 @@ class TestEphemeralIPV4Network(CiTestCase): It performs no cleanup as the interface was already setup. """ params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} + "interface": "eth0", + "ip": "192.168.2.2", + "prefix_or_mask": "255.255.255.0", + "broadcast": "192.168.2.255", + } m_subp.side_effect = ProcessExecutionError( - '', 'RTNETLINK answers: File exists', 2) + "", "RTNETLINK answers: File exists", 2 + ) expected_calls = [ mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'})] + [ + "ip", + "-family", + "inet", + "addr", + "add", + "192.168.2.2/24", + "broadcast", + "192.168.2.255", + "dev", + "eth0", + ], + capture=True, + update_env={"LANG": "C"}, + ) + ] with net.EphemeralIPv4Network(**params): pass self.assertEqual(expected_calls, m_subp.call_args_list) self.assertIn( - 'Skip ephemeral network setup, eth0 already has address', - self.logs.getvalue()) + "Skip ephemeral network setup, eth0 already has address", + self.logs.getvalue(), + ) def test_ephemeral_ipv4_network_with_prefix(self, m_subp): """EphemeralIPv4Network takes a valid prefix to setup the network.""" params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '24', 'broadcast': '192.168.2.255'} - for prefix_val in ['24', 16]: # prefix can be int or string - params['prefix_or_mask'] = prefix_val + "interface": "eth0", + "ip": "192.168.2.2", + "prefix_or_mask": "24", + "broadcast": "192.168.2.255", + } + for prefix_val in ["24", 16]: # prefix can be int or string + params["prefix_or_mask"] = prefix_val with net.EphemeralIPv4Network(**params): pass - m_subp.assert_has_calls([mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'})]) - m_subp.assert_has_calls([mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/16', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'})]) + m_subp.assert_has_calls( + [ + mock.call( + [ + "ip", + "-family", + "inet", + "addr", + "add", + "192.168.2.2/24", + "broadcast", + "192.168.2.255", + "dev", + "eth0", + ], + capture=True, + update_env={"LANG": "C"}, + ) + ] + ) + m_subp.assert_has_calls( + [ + mock.call( + [ + "ip", + "-family", + "inet", + "addr", + "add", + "192.168.2.2/16", + "broadcast", + "192.168.2.255", + "dev", + "eth0", + ], + capture=True, + update_env={"LANG": "C"}, + ) + ] + ) def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): """Add the route when router is set and no default route exists.""" params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', - 'router': '192.168.2.1'} - m_subp.return_value = '', '' # Empty response from ip route gw check + "interface": "eth0", + "ip": "192.168.2.2", + "prefix_or_mask": "255.255.255.0", + "broadcast": "192.168.2.255", + "router": "192.168.2.1", + } + m_subp.return_value = "", "" # Empty response from ip route gw check expected_setup_calls = [ mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'}), + [ + "ip", + "-family", + "inet", + "addr", + "add", + "192.168.2.2/24", + "broadcast", + "192.168.2.255", + "dev", + "eth0", + ], + capture=True, + update_env={"LANG": "C"}, + ), mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], - capture=True), + ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"], + capture=True, + ), + mock.call(["ip", "route", "show", "0.0.0.0/0"], capture=True), mock.call( - ['ip', 'route', 'show', '0.0.0.0/0'], capture=True), - mock.call(['ip', '-4', 'route', 'add', '192.168.2.1', - 'dev', 'eth0', 'src', '192.168.2.2'], capture=True), + [ + "ip", + "-4", + "route", + "add", + "192.168.2.1", + "dev", + "eth0", + "src", + "192.168.2.2", + ], + capture=True, + ), mock.call( - ['ip', '-4', 'route', 'add', 'default', 'via', - '192.168.2.1', 'dev', 'eth0'], capture=True)] + [ + "ip", + "-4", + "route", + "add", + "default", + "via", + "192.168.2.1", + "dev", + "eth0", + ], + capture=True, + ), + ] expected_teardown_calls = [ - mock.call(['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'], - capture=True), - mock.call(['ip', '-4', 'route', 'del', '192.168.2.1', - 'dev', 'eth0', 'src', '192.168.2.2'], capture=True), + mock.call( + ["ip", "-4", "route", "del", "default", "dev", "eth0"], + capture=True, + ), + mock.call( + [ + "ip", + "-4", + "route", + "del", + "192.168.2.1", + "dev", + "eth0", + "src", + "192.168.2.2", + ], + capture=True, + ), ] with net.EphemeralIPv4Network(**params): @@ -708,45 +897,138 @@ class TestEphemeralIPV4Network(CiTestCase): def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.255', 'broadcast': '192.168.2.255', - 'static_routes': [('192.168.2.1/32', '0.0.0.0'), - ('169.254.169.254/32', '192.168.2.1'), - ('0.0.0.0/0', '192.168.2.1')], - 'router': '192.168.2.1'} + "interface": "eth0", + "ip": "192.168.2.2", + "prefix_or_mask": "255.255.255.255", + "broadcast": "192.168.2.255", + "static_routes": [ + ("192.168.2.1/32", "0.0.0.0"), + ("169.254.169.254/32", "192.168.2.1"), + ("0.0.0.0/0", "192.168.2.1"), + ], + "router": "192.168.2.1", + } expected_setup_calls = [ mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/32', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'}), + [ + "ip", + "-family", + "inet", + "addr", + "add", + "192.168.2.2/32", + "broadcast", + "192.168.2.255", + "dev", + "eth0", + ], + capture=True, + update_env={"LANG": "C"}, + ), mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], - capture=True), + ["ip", "-family", "inet", "link", "set", "dev", "eth0", "up"], + capture=True, + ), mock.call( - ['ip', '-4', 'route', 'append', '192.168.2.1/32', - 'dev', 'eth0'], capture=True), + [ + "ip", + "-4", + "route", + "append", + "192.168.2.1/32", + "dev", + "eth0", + ], + capture=True, + ), mock.call( - ['ip', '-4', 'route', 'append', '169.254.169.254/32', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + [ + "ip", + "-4", + "route", + "append", + "169.254.169.254/32", + "via", + "192.168.2.1", + "dev", + "eth0", + ], + capture=True, + ), mock.call( - ['ip', '-4', 'route', 'append', '0.0.0.0/0', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] + [ + "ip", + "-4", + "route", + "append", + "0.0.0.0/0", + "via", + "192.168.2.1", + "dev", + "eth0", + ], + capture=True, + ), + ] expected_teardown_calls = [ mock.call( - ['ip', '-4', 'route', 'del', '0.0.0.0/0', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + [ + "ip", + "-4", + "route", + "del", + "0.0.0.0/0", + "via", + "192.168.2.1", + "dev", + "eth0", + ], + capture=True, + ), mock.call( - ['ip', '-4', 'route', 'del', '169.254.169.254/32', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + [ + "ip", + "-4", + "route", + "del", + "169.254.169.254/32", + "via", + "192.168.2.1", + "dev", + "eth0", + ], + capture=True, + ), mock.call( - ['ip', '-4', 'route', 'del', '192.168.2.1/32', - 'dev', 'eth0'], capture=True), + ["ip", "-4", "route", "del", "192.168.2.1/32", "dev", "eth0"], + capture=True, + ), mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', - 'eth0', 'down'], capture=True), + [ + "ip", + "-family", + "inet", + "link", + "set", + "dev", + "eth0", + "down", + ], + capture=True, + ), mock.call( - ['ip', '-family', 'inet', 'addr', 'del', - '192.168.2.2/32', 'dev', 'eth0'], capture=True) + [ + "ip", + "-family", + "inet", + "addr", + "del", + "192.168.2.2/32", + "dev", + "eth0", + ], + capture=True, + ), ] with net.EphemeralIPv4Network(**params): self.assertEqual(expected_setup_calls, m_subp.call_args_list) @@ -754,7 +1036,8 @@ class TestEphemeralIPV4Network(CiTestCase): class TestApplyNetworkCfgNames(CiTestCase): - V1_CONFIG = textwrap.dedent("""\ + V1_CONFIG = textwrap.dedent( + """\ version: 1 config: - type: physical @@ -765,8 +1048,10 @@ class TestApplyNetworkCfgNames(CiTestCase): address: 10.0.2.15 netmask: 255.255.255.0 gateway: 10.0.2.2 - """) - V2_CONFIG = textwrap.dedent("""\ + """ + ) + V2_CONFIG = textwrap.dedent( + """\ version: 2 ethernets: interface0: @@ -776,9 +1061,11 @@ class TestApplyNetworkCfgNames(CiTestCase): - 10.0.2.15/24 gateway4: 10.0.2.2 set-name: interface0 - """) + """ + ) - V2_CONFIG_NO_SETNAME = textwrap.dedent("""\ + V2_CONFIG_NO_SETNAME = textwrap.dedent( + """\ version: 2 ethernets: interface0: @@ -787,9 +1074,11 @@ class TestApplyNetworkCfgNames(CiTestCase): addresses: - 10.0.2.15/24 gateway4: 10.0.2.2 - """) + """ + ) - V2_CONFIG_NO_MAC = textwrap.dedent("""\ + V2_CONFIG_NO_MAC = textwrap.dedent( + """\ version: 2 ethernets: interface0: @@ -799,40 +1088,43 @@ class TestApplyNetworkCfgNames(CiTestCase): - 10.0.2.15/24 gateway4: 10.0.2.2 set-name: interface0 - """) + """ + ) - @mock.patch('cloudinit.net.device_devid') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net._rename_interfaces') - def test_apply_v1_renames(self, m_rename_interfaces, m_device_driver, - m_device_devid): - m_device_driver.return_value = 'virtio_net' - m_device_devid.return_value = '0x15d8' + @mock.patch("cloudinit.net.device_devid") + @mock.patch("cloudinit.net.device_driver") + @mock.patch("cloudinit.net._rename_interfaces") + def test_apply_v1_renames( + self, m_rename_interfaces, m_device_driver, m_device_devid + ): + m_device_driver.return_value = "virtio_net" + m_device_devid.return_value = "0x15d8" net.apply_network_config_names(yaml.load(self.V1_CONFIG)) - call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] + call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"] m_rename_interfaces.assert_called_with([call]) - @mock.patch('cloudinit.net.device_devid') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net._rename_interfaces') - def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver, - m_device_devid): - m_device_driver.return_value = 'virtio_net' - m_device_devid.return_value = '0x15d8' + @mock.patch("cloudinit.net.device_devid") + @mock.patch("cloudinit.net.device_driver") + @mock.patch("cloudinit.net._rename_interfaces") + def test_apply_v2_renames( + self, m_rename_interfaces, m_device_driver, m_device_devid + ): + m_device_driver.return_value = "virtio_net" + m_device_devid.return_value = "0x15d8" net.apply_network_config_names(yaml.load(self.V2_CONFIG)) - call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] + call = ["52:54:00:12:34:00", "interface0", "virtio_net", "0x15d8"] m_rename_interfaces.assert_called_with([call]) - @mock.patch('cloudinit.net._rename_interfaces') + @mock.patch("cloudinit.net._rename_interfaces") def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces): net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME)) m_rename_interfaces.assert_called_with([]) - @mock.patch('cloudinit.net._rename_interfaces') + @mock.patch("cloudinit.net._rename_interfaces") def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces): net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC)) m_rename_interfaces.assert_called_with([]) @@ -843,184 +1135,191 @@ class TestApplyNetworkCfgNames(CiTestCase): class TestHasURLConnectivity(HttprettyTestCase): - def setUp(self): super(TestHasURLConnectivity, self).setUp() - self.url = 'http://fake/' - self.kwargs = {'allow_redirects': True, 'timeout': 5.0} + self.url = "http://fake/" + self.kwargs = {"allow_redirects": True, "timeout": 5.0} - @mock.patch('cloudinit.net.readurl') + @mock.patch("cloudinit.net.readurl") def test_url_timeout_on_connectivity_check(self, m_readurl): """A timeout of 5 seconds is provided when reading a url.""" self.assertTrue( - net.has_url_connectivity({'url': self.url}), - 'Expected True on url connect') + net.has_url_connectivity({"url": self.url}), + "Expected True on url connect", + ) def test_true_on_url_connectivity_success(self): httpretty.register_uri(httpretty.GET, self.url) self.assertTrue( - net.has_url_connectivity({'url': self.url}), - 'Expected True on url connect') + net.has_url_connectivity({"url": self.url}), + "Expected True on url connect", + ) - @mock.patch('requests.Session.request') + @mock.patch("requests.Session.request") def test_true_on_url_connectivity_timeout(self, m_request): """A timeout raised accessing the url will return False.""" - m_request.side_effect = requests.Timeout('Fake Connection Timeout') + m_request.side_effect = requests.Timeout("Fake Connection Timeout") self.assertFalse( - net.has_url_connectivity({'url': self.url}), - 'Expected False on url timeout') + net.has_url_connectivity({"url": self.url}), + "Expected False on url timeout", + ) def test_true_on_url_connectivity_failure(self): httpretty.register_uri(httpretty.GET, self.url, body={}, status=404) self.assertFalse( - net.has_url_connectivity({'url': self.url}), - 'Expected False on url fail') + net.has_url_connectivity({"url": self.url}), + "Expected False on url fail", + ) def _mk_v1_phys(mac, name, driver, device_id): - v1_cfg = {'type': 'physical', 'name': name, 'mac_address': mac} + v1_cfg = {"type": "physical", "name": name, "mac_address": mac} params = {} if driver: - params.update({'driver': driver}) + params.update({"driver": driver}) if device_id: - params.update({'device_id': device_id}) + params.update({"device_id": device_id}) if params: - v1_cfg.update({'params': params}) + v1_cfg.update({"params": params}) return v1_cfg def _mk_v2_phys(mac, name, driver=None, device_id=None): - v2_cfg = {'set-name': name, 'match': {'macaddress': mac}} + v2_cfg = {"set-name": name, "match": {"macaddress": mac}} if driver: - v2_cfg['match'].update({'driver': driver}) + v2_cfg["match"].update({"driver": driver}) if device_id: - v2_cfg['match'].update({'device_id': device_id}) + v2_cfg["match"].update({"device_id": device_id}) return v2_cfg class TestExtractPhysdevs(CiTestCase): - def setUp(self): super(TestExtractPhysdevs, self).setUp() - self.add_patch('cloudinit.net.device_driver', 'm_driver') - self.add_patch('cloudinit.net.device_devid', 'm_devid') + self.add_patch("cloudinit.net.device_driver", "m_driver") + self.add_patch("cloudinit.net.device_devid", "m_devid") def test_extract_physdevs_looks_up_driver_v1(self): - driver = 'virtio' + driver = "virtio" self.m_driver.return_value = driver physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ["aa:bb:cc:dd:ee:ff", "eth0", None, "0x1000"], ] netcfg = { - 'version': 1, - 'config': [_mk_v1_phys(*args) for args in physdevs], + "version": 1, + "config": [_mk_v1_phys(*args) for args in physdevs], } # insert the driver value for verification physdevs[0][2] = driver - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_driver.assert_called_with('eth0') + self.assertEqual( + sorted(physdevs), sorted(net.extract_physdevs(netcfg)) + ) + self.m_driver.assert_called_with("eth0") def test_extract_physdevs_looks_up_driver_v2(self): - driver = 'virtio' + driver = "virtio" self.m_driver.return_value = driver physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ["aa:bb:cc:dd:ee:ff", "eth0", None, "0x1000"], ] netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + "version": 2, + "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs}, } # insert the driver value for verification physdevs[0][2] = driver - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_driver.assert_called_with('eth0') + self.assertEqual( + sorted(physdevs), sorted(net.extract_physdevs(netcfg)) + ) + self.m_driver.assert_called_with("eth0") def test_extract_physdevs_looks_up_devid_v1(self): - devid = '0x1000' + devid = "0x1000" self.m_devid.return_value = devid physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", None], ] netcfg = { - 'version': 1, - 'config': [_mk_v1_phys(*args) for args in physdevs], + "version": 1, + "config": [_mk_v1_phys(*args) for args in physdevs], } # insert the driver value for verification physdevs[0][3] = devid - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_devid.assert_called_with('eth0') + self.assertEqual( + sorted(physdevs), sorted(net.extract_physdevs(netcfg)) + ) + self.m_devid.assert_called_with("eth0") def test_extract_physdevs_looks_up_devid_v2(self): - devid = '0x1000' + devid = "0x1000" self.m_devid.return_value = devid physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", None], ] netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + "version": 2, + "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs}, } # insert the driver value for verification physdevs[0][3] = devid - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_devid.assert_called_with('eth0') + self.assertEqual( + sorted(physdevs), sorted(net.extract_physdevs(netcfg)) + ) + self.m_devid.assert_called_with("eth0") def test_get_v1_type_physical(self): physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"], + ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"], + ["09:87:65:43:21:10", "ens0p1", "mlx4_core", "0:0:1000"], ] netcfg = { - 'version': 1, - 'config': [_mk_v1_phys(*args) for args in physdevs], + "version": 1, + "config": [_mk_v1_phys(*args) for args in physdevs], } - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) + self.assertEqual( + sorted(physdevs), sorted(net.extract_physdevs(netcfg)) + ) def test_get_v2_type_physical(self): physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"], + ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"], + ["09:87:65:43:21:10", "ens0p1", "mlx4_core", "0:0:1000"], ] netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + "version": 2, + "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs}, } - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) + self.assertEqual( + sorted(physdevs), sorted(net.extract_physdevs(netcfg)) + ) def test_get_v2_type_physical_skips_if_no_set_name(self): netcfg = { - 'version': 2, - 'ethernets': { - 'ens3': { - 'match': {'macaddress': '00:11:22:33:44:55'}, + "version": 2, + "ethernets": { + "ens3": { + "match": {"macaddress": "00:11:22:33:44:55"}, } - } + }, } self.assertEqual([], net.extract_physdevs(netcfg)) def test_runtime_error_on_unknown_netcfg_version(self): with self.assertRaises(RuntimeError): - net.extract_physdevs({'version': 3, 'awesome_config': []}) + net.extract_physdevs({"version": 3, "awesome_config": []}) class TestNetFailOver(CiTestCase): - def setUp(self): super(TestNetFailOver, self).setUp() - self.add_patch('cloudinit.net.util', 'm_util') - self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net') - self.add_patch('cloudinit.net.device_driver', 'm_device_driver') + self.add_patch("cloudinit.net.util", "m_util") + self.add_patch("cloudinit.net.read_sys_net", "m_read_sys_net") + self.add_patch("cloudinit.net.device_driver", "m_device_driver") def test_get_dev_features(self): devname = self.random_string() @@ -1029,192 +1328,210 @@ class TestNetFailOver(CiTestCase): self.assertEqual(features, net.get_dev_features(devname)) self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual(mock.call(devname, 'device/features'), - self.m_read_sys_net.call_args_list[0]) + self.assertEqual( + mock.call(devname, "device/features"), + self.m_read_sys_net.call_args_list[0], + ) def test_get_dev_features_none_returns_empty_string(self): devname = self.random_string() - self.m_read_sys_net.side_effect = Exception('error') - self.assertEqual('', net.get_dev_features(devname)) + self.m_read_sys_net.side_effect = Exception("error") + self.assertEqual("", net.get_dev_features(devname)) self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual(mock.call(devname, 'device/features'), - self.m_read_sys_net.call_args_list[0]) + self.assertEqual( + mock.call(devname, "device/features"), + self.m_read_sys_net.call_args_list[0], + ) - @mock.patch('cloudinit.net.get_dev_features') + @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature(self, m_dev_features): devname = self.random_string() - standby_features = ('0' * 62) + '1' + '0' + standby_features = ("0" * 62) + "1" + "0" m_dev_features.return_value = standby_features self.assertTrue(net.has_netfail_standby_feature(devname)) - @mock.patch('cloudinit.net.get_dev_features') + @mock.patch("cloudinit.net.get_dev_features") def test_has_netfail_standby_feature_short_is_false(self, m_dev_features): devname = self.random_string() standby_features = self.random_string() m_dev_features.return_value = standby_features self.assertFalse(net.has_netfail_standby_feature(devname)) - @mock.patch('cloudinit.net.get_dev_features') - def test_has_netfail_standby_feature_not_present_is_false(self, - m_dev_features): + @mock.patch("cloudinit.net.get_dev_features") + def test_has_netfail_standby_feature_not_present_is_false( + self, m_dev_features + ): devname = self.random_string() - standby_features = '0' * 64 + standby_features = "0" * 64 m_dev_features.return_value = standby_features self.assertFalse(net.has_netfail_standby_feature(devname)) - @mock.patch('cloudinit.net.get_dev_features') - def test_has_netfail_standby_feature_no_features_is_false(self, - m_dev_features): + @mock.patch("cloudinit.net.get_dev_features") + def test_has_netfail_standby_feature_no_features_is_false( + self, m_dev_features + ): devname = self.random_string() standby_features = None m_dev_features.return_value = standby_features self.assertFalse(net.has_netfail_standby_feature(devname)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master(self, m_exists, m_standby): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" m_exists.return_value = False # no master sysfs attr m_standby.return_value = True # has standby feature flag self.assertTrue(net.is_netfail_master(devname, driver)) - @mock.patch('cloudinit.net.sys_dev_path') + @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_master_checks_master_attr(self, m_sysdev): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" m_sysdev.return_value = self.random_string() self.assertFalse(net.is_netfail_master(devname, driver)) self.assertEqual(1, m_sysdev.call_count) - self.assertEqual(mock.call(devname, path='master'), - m_sysdev.call_args_list[0]) + self.assertEqual( + mock.call(devname, path="master"), m_sysdev.call_args_list[0] + ) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_wrong_driver(self, m_exists, m_standby): devname = self.random_string() driver = self.random_string() self.assertFalse(net.is_netfail_master(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_has_master_attr(self, m_exists, m_standby): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" m_exists.return_value = True # has master sysfs attr self.assertFalse(net.is_netfail_master(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" m_exists.return_value = False # no master sysfs attr m_standby.return_value = False # no standby feature flag self.assertFalse(net.is_netfail_master(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") + @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): devname = self.random_string() driver = self.random_string() # device not virtio_net master_devname = self.random_string() - m_sysdev.return_value = "%s/%s" % (self.random_string(), - master_devname) + m_sysdev.return_value = "%s/%s" % ( + self.random_string(), + master_devname, + ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = 'virtio_net' # master virtio_net + self.m_device_driver.return_value = "virtio_net" # master virtio_net m_standby.return_value = True # has standby feature flag self.assertTrue(net.is_netfail_primary(devname, driver)) self.assertEqual(1, self.m_device_driver.call_count) - self.assertEqual(mock.call(master_devname), - self.m_device_driver.call_args_list[0]) + self.assertEqual( + mock.call(master_devname), self.m_device_driver.call_args_list[0] + ) self.assertEqual(1, m_standby.call_count) - self.assertEqual(mock.call(master_devname), - m_standby.call_args_list[0]) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists, - m_standby): + self.assertEqual( + mock.call(master_devname), m_standby.call_args_list[0] + ) + + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") + @mock.patch("cloudinit.net.sys_dev_path") + def test_is_netfail_primary_wrong_driver( + self, m_sysdev, m_exists, m_standby + ): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" self.assertFalse(net.is_netfail_primary(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") + @mock.patch("cloudinit.net.sys_dev_path") def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): devname = self.random_string() driver = self.random_string() # device not virtio_net m_exists.return_value = False # no master sysfs attr self.assertFalse(net.is_netfail_primary(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists, - m_standby): + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") + @mock.patch("cloudinit.net.sys_dev_path") + def test_is_netfail_primary_bad_master( + self, m_sysdev, m_exists, m_standby + ): devname = self.random_string() driver = self.random_string() # device not virtio_net master_devname = self.random_string() - m_sysdev.return_value = "%s/%s" % (self.random_string(), - master_devname) + m_sysdev.return_value = "%s/%s" % ( + self.random_string(), + master_devname, + ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = 'XXXX' # master not virtio_net + self.m_device_driver.return_value = "XXXX" # master not virtio_net self.assertFalse(net.is_netfail_primary(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists, - m_standby): + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") + @mock.patch("cloudinit.net.sys_dev_path") + def test_is_netfail_primary_no_standby( + self, m_sysdev, m_exists, m_standby + ): devname = self.random_string() driver = self.random_string() # device not virtio_net master_devname = self.random_string() - m_sysdev.return_value = "%s/%s" % (self.random_string(), - master_devname) + m_sysdev.return_value = "%s/%s" % ( + self.random_string(), + master_devname, + ) m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = 'virtio_net' # master virtio_net + self.m_device_driver.return_value = "virtio_net" # master virtio_net m_standby.return_value = False # master has no standby feature flag self.assertFalse(net.is_netfail_primary(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby(self, m_exists, m_standby): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" m_exists.return_value = True # has master sysfs attr m_standby.return_value = True # has standby feature flag self.assertTrue(net.is_netfail_standby(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby): devname = self.random_string() driver = self.random_string() self.assertFalse(net.is_netfail_standby(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_no_master(self, m_exists, m_standby): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" m_exists.return_value = False # has master sysfs attr self.assertFalse(net.is_netfail_standby(devname, driver)) - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') + @mock.patch("cloudinit.net.has_netfail_standby_feature") + @mock.patch("cloudinit.net.os.path.exists") def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby): devname = self.random_string() - driver = 'virtio_net' + driver = "virtio_net" m_exists.return_value = True # has master sysfs attr m_standby.return_value = False # has standby feature flag self.assertFalse(net.is_netfail_standby(devname, driver)) - @mock.patch('cloudinit.net.is_netfail_standby') - @mock.patch('cloudinit.net.is_netfail_primary') + @mock.patch("cloudinit.net.is_netfail_standby") + @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_primary(self, m_primary, m_standby): devname = self.random_string() driver = self.random_string() @@ -1222,8 +1539,8 @@ class TestNetFailOver(CiTestCase): m_standby.return_value = False self.assertTrue(net.is_netfailover(devname, driver)) - @mock.patch('cloudinit.net.is_netfail_standby') - @mock.patch('cloudinit.net.is_netfail_primary') + @mock.patch("cloudinit.net.is_netfail_standby") + @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_standby(self, m_primary, m_standby): devname = self.random_string() driver = self.random_string() @@ -1231,8 +1548,8 @@ class TestNetFailOver(CiTestCase): m_standby.return_value = True self.assertTrue(net.is_netfailover(devname, driver)) - @mock.patch('cloudinit.net.is_netfail_standby') - @mock.patch('cloudinit.net.is_netfail_primary') + @mock.patch("cloudinit.net.is_netfail_standby") + @mock.patch("cloudinit.net.is_netfail_primary") def test_is_netfailover_returns_false(self, m_primary, m_standby): devname = self.random_string() driver = self.random_string() @@ -1274,6 +1591,7 @@ class TestGetOVSInternalInterfaces: Uses the ``clear_lru_cache`` local autouse fixture to allow us to test despite the ``lru_cache`` decorator on the unit under test. """ + @pytest.fixture(autouse=True) def clear_lru_cache(self): net.get_ovs_internal_interfaces.cache_clear() @@ -1364,14 +1682,19 @@ class TestIsIpAddress: the ipaddress module correctly. """ - @pytest.mark.parametrize('ip_address_side_effect,expected_return', ( - (ValueError, False), - (lambda _: ipaddress.IPv4Address('192.168.0.1'), True), - (lambda _: ipaddress.IPv6Address('2001:db8::'), True), - )) + @pytest.mark.parametrize( + "ip_address_side_effect,expected_return", + ( + (ValueError, False), + (lambda _: ipaddress.IPv4Address("192.168.0.1"), True), + (lambda _: ipaddress.IPv6Address("2001:db8::"), True), + ), + ) def test_is_ip_address(self, ip_address_side_effect, expected_return): - with mock.patch('cloudinit.net.ipaddress.ip_address', - side_effect=ip_address_side_effect) as m_ip_address: + with mock.patch( + "cloudinit.net.ipaddress.ip_address", + side_effect=ip_address_side_effect, + ) as m_ip_address: ret = net.is_ip_address(mock.sentinel.ip_address_in) assert expected_return == ret expected_call = mock.call(mock.sentinel.ip_address_in) @@ -1386,13 +1709,20 @@ class TestIsIpv4Address: the ipaddress module correctly. """ - @pytest.mark.parametrize('ipv4address_mock,expected_return', ( - (mock.Mock(side_effect=ValueError), False), - (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True), - )) + @pytest.mark.parametrize( + "ipv4address_mock,expected_return", + ( + (mock.Mock(side_effect=ValueError), False), + ( + mock.Mock(return_value=ipaddress.IPv4Address("192.168.0.1")), + True, + ), + ), + ) def test_is_ip_address(self, ipv4address_mock, expected_return): - with mock.patch('cloudinit.net.ipaddress.IPv4Address', - ipv4address_mock) as m_ipv4address: + with mock.patch( + "cloudinit.net.ipaddress.IPv4Address", ipv4address_mock + ) as m_ipv4address: ret = net.is_ipv4_address(mock.sentinel.ip_address_in) assert expected_return == ret expected_call = mock.call(mock.sentinel.ip_address_in) diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py index fdcd5296..88da9f94 100644 --- a/tests/unittests/net/test_network_state.py +++ b/tests/unittests/net/test_network_state.py @@ -8,7 +8,7 @@ from cloudinit import safeyaml from cloudinit.net import network_state from tests.unittests.helpers import CiTestCase -netstate_path = 'cloudinit.net.network_state' +netstate_path = "cloudinit.net.network_state" _V1_CONFIG_NAMESERVERS = """\ @@ -36,8 +36,8 @@ network: mac_address: '66:77:88:99:00:11' """ -V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface='eth1') -V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface='eth90') +V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface="eth1") +V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface="eth90") V2_CONFIG_NAMESERVERS = """\ network: @@ -60,11 +60,10 @@ network: class TestNetworkStateParseConfig(CiTestCase): - def setUp(self): super(TestNetworkStateParseConfig, self).setUp() - nsi_path = netstate_path + '.NetworkStateInterpreter' - self.add_patch(nsi_path, 'm_nsi') + nsi_path = netstate_path + ".NetworkStateInterpreter" + self.add_patch(nsi_path, "m_nsi") def test_missing_version_returns_none(self): ncfg = {} @@ -72,93 +71,96 @@ class TestNetworkStateParseConfig(CiTestCase): network_state.parse_net_config_data(ncfg) def test_unknown_versions_returns_none(self): - ncfg = {'version': 13.2} + ncfg = {"version": 13.2} with self.assertRaises(RuntimeError): network_state.parse_net_config_data(ncfg) def test_version_2_passes_self_as_config(self): - ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} + ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]} network_state.parse_net_config_data(ncfg) - self.assertEqual([mock.call(version=2, config=ncfg)], - self.m_nsi.call_args_list) + self.assertEqual( + [mock.call(version=2, config=ncfg)], self.m_nsi.call_args_list + ) def test_valid_config_gets_network_state(self): - ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} + ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]} result = network_state.parse_net_config_data(ncfg) self.assertNotEqual(None, result) def test_empty_v1_config_gets_network_state(self): - ncfg = {'version': 1, 'config': []} + ncfg = {"version": 1, "config": []} result = network_state.parse_net_config_data(ncfg) self.assertNotEqual(None, result) def test_empty_v2_config_gets_network_state(self): - ncfg = {'version': 2} + ncfg = {"version": 2} result = network_state.parse_net_config_data(ncfg) self.assertNotEqual(None, result) class TestNetworkStateParseConfigV2(CiTestCase): - def test_version_2_ignores_renderer_key(self): - ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}} - nsi = network_state.NetworkStateInterpreter(version=ncfg['version'], - config=ncfg) + ncfg = {"version": 2, "renderer": "networkd", "ethernets": {}} + nsi = network_state.NetworkStateInterpreter( + version=ncfg["version"], config=ncfg + ) nsi.parse_config(skip_broken=False) - self.assertEqual(ncfg, nsi.as_dict()['config']) + self.assertEqual(ncfg, nsi.as_dict()["config"]) class TestNetworkStateParseNameservers: def _parse_network_state_from_config(self, config): yaml = safeyaml.load(config) - return network_state.parse_net_config_data(yaml['network']) + return network_state.parse_net_config_data(yaml["network"]) def test_v1_nameservers_valid(self): config = self._parse_network_state_from_config( - V1_CONFIG_NAMESERVERS_VALID) + V1_CONFIG_NAMESERVERS_VALID + ) # If an interface was specified, DNS shouldn't be in the global list - assert ['192.168.1.0', '4.4.4.4'] == sorted( - config.dns_nameservers) - assert ['eggs.local'] == config.dns_searchdomains + assert ["192.168.1.0", "4.4.4.4"] == sorted(config.dns_nameservers) + assert ["eggs.local"] == config.dns_searchdomains # If an interface was specified, DNS should be part of the interface for iface in config.iter_interfaces(): - if iface['name'] == 'eth1': - assert iface['dns']['addresses'] == ['192.168.1.1', '8.8.8.8'] - assert iface['dns']['search'] == ['spam.local'] + if iface["name"] == "eth1": + assert iface["dns"]["addresses"] == ["192.168.1.1", "8.8.8.8"] + assert iface["dns"]["search"] == ["spam.local"] else: - assert 'dns' not in iface + assert "dns" not in iface def test_v1_nameservers_invalid(self): with pytest.raises(ValueError): self._parse_network_state_from_config( - V1_CONFIG_NAMESERVERS_INVALID) + V1_CONFIG_NAMESERVERS_INVALID + ) def test_v2_nameservers(self): config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS) # Ensure DNS defined on interface exists on interface for iface in config.iter_interfaces(): - if iface['name'] == 'eth0': - assert iface['dns'] == { - 'nameservers': ['8.8.8.8'], - 'search': ['spam.local', 'eggs.local'], + if iface["name"] == "eth0": + assert iface["dns"] == { + "nameservers": ["8.8.8.8"], + "search": ["spam.local", "eggs.local"], } else: - assert iface['dns'] == { - 'nameservers': ['4.4.4.4'], - 'search': ['foo.local', 'bar.local'] + assert iface["dns"] == { + "nameservers": ["4.4.4.4"], + "search": ["foo.local", "bar.local"], } # Ensure DNS defined on interface also exists globally (since there # is no global DNS definitions in v2) - assert ['4.4.4.4', '8.8.8.8'] == sorted(config.dns_nameservers) + assert ["4.4.4.4", "8.8.8.8"] == sorted(config.dns_nameservers) assert [ - 'bar.local', - 'eggs.local', - 'foo.local', - 'spam.local', + "bar.local", + "eggs.local", + "foo.local", + "spam.local", ] == sorted(config.dns_searchdomains) + # vi: ts=4 expandtab diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py index 8dc90b48..ec1d04e9 100644 --- a/tests/unittests/net/test_networkd.py +++ b/tests/unittests/net/test_networkd.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import safeyaml -from cloudinit.net import networkd, network_state +from cloudinit.net import network_state, networkd V2_CONFIG_SET_NAME = """\ network: diff --git a/tests/unittests/runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py index 29439c8a..1b1b5595 100644 --- a/tests/unittests/runs/test_merge_run.py +++ b/tests/unittests/runs/test_merge_run.py @@ -4,12 +4,9 @@ import os import shutil import tempfile -from tests.unittests import helpers - +from cloudinit import safeyaml, stages, util from cloudinit.settings import PER_INSTANCE -from cloudinit import safeyaml -from cloudinit import stages -from cloudinit import util +from tests.unittests import helpers class TestMergeRun(helpers.FilesystemMockingTestCase): @@ -20,17 +17,18 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): def test_none_ds(self): new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, new_root) - self.replicateTestRoot('simple_ubuntu', new_root) + self.replicateTestRoot("simple_ubuntu", new_root) cfg = { - 'datasource_list': ['None'], - 'cloud_init_modules': ['write-files'], - 'system_info': {'paths': {'run_dir': new_root}} + "datasource_list": ["None"], + "cloud_init_modules": ["write-files"], + "system_info": {"paths": {"run_dir": new_root}}, } - ud = helpers.readResource('user_data.1.txt') + ud = helpers.readResource("user_data.1.txt") cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) - util.write_file(os.path.join(new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) + util.ensure_dir(os.path.join(new_root, "etc", "cloud")) + util.write_file( + os.path.join(new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg + ) self._patchIn(new_root) # Now start verifying whats created @@ -41,20 +39,23 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): initer.datasource.userdata_raw = ud initer.instancify() initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) - mirrors = initer.distro.get_option('package_mirrors') + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) + mirrors = initer.distro.get_option("package_mirrors") self.assertEqual(1, len(mirrors)) mirror = mirrors[0] - self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah']) + self.assertEqual(mirror["arches"], ["i386", "amd64", "blah"]) mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') + (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) - self.assertTrue(os.path.exists('/etc/blah.ini')) - self.assertIn('write-files', which_ran) - contents = util.load_file('/etc/blah.ini') - self.assertEqual(contents, 'blah') + self.assertTrue(os.path.exists("/etc/blah.ini")) + self.assertIn("write-files", which_ran) + contents = util.load_file("/etc/blah.ini") + self.assertEqual(contents, "blah") + # vi: ts=4 expandtab diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py index aa78dda3..38cf9494 100644 --- a/tests/unittests/runs/test_simple_run.py +++ b/tests/unittests/runs/test_simple_run.py @@ -3,12 +3,9 @@ import copy import os - +from cloudinit import safeyaml, stages, util from cloudinit.settings import PER_INSTANCE -from cloudinit import safeyaml -from cloudinit import stages from tests.unittests import helpers -from cloudinit import util class TestSimpleRun(helpers.FilesystemMockingTestCase): @@ -18,27 +15,28 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): def setUp(self): super(TestSimpleRun, self).setUp() self.new_root = self.tmp_dir() - self.replicateTestRoot('simple_ubuntu', self.new_root) + self.replicateTestRoot("simple_ubuntu", self.new_root) # Seed cloud.cfg file for our tests self.cfg = { - 'datasource_list': ['None'], - 'runcmd': ['ls /etc'], # test ALL_DISTROS - 'spacewalk': {}, # test non-ubuntu distros module definition - 'system_info': {'paths': {'run_dir': self.new_root}}, - 'write_files': [ + "datasource_list": ["None"], + "runcmd": ["ls /etc"], # test ALL_DISTROS + "spacewalk": {}, # test non-ubuntu distros module definition + "system_info": {"paths": {"run_dir": self.new_root}}, + "write_files": [ { - 'path': '/etc/blah.ini', - 'content': 'blah', - 'permissions': 0o755, + "path": "/etc/blah.ini", + "content": "blah", + "permissions": 0o755, }, ], - 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], + "cloud_init_modules": ["write-files", "spacewalk", "runcmd"], } cloud_cfg = safeyaml.dumps(self.cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) + util.ensure_dir(os.path.join(self.new_root, "etc", "cloud")) + util.write_file( + os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg + ) self.patchOS(self.new_root) self.patchUtils(self.new_root) @@ -49,12 +47,12 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): initer.read_cfg() initer.initialize() self.assertTrue(os.path.exists("/var/lib/cloud")) - for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']: + for d in ["scripts", "seed", "instances", "handlers", "sem", "data"]: self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) initer.fetch() iid = initer.instancify() - self.assertEqual(iid, 'iid-datasource-none') + self.assertEqual(iid, "iid-datasource-none") initer.update() self.assertTrue(os.path.islink("var/lib/cloud/instance")) @@ -66,20 +64,25 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') + (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) - self.assertTrue(os.path.exists('/etc/blah.ini')) - self.assertIn('write-files', which_ran) - contents = util.load_file('/etc/blah.ini') - self.assertEqual(contents, 'blah') + self.assertTrue(os.path.exists("/etc/blah.ini")) + self.assertIn("write-files", which_ran) + contents = util.load_file("/etc/blah.ini") + self.assertEqual(contents, "blah") self.assertNotIn( "Skipping modules ['write-files'] because they are not verified on" " distro 'ubuntu'", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_none_ds_skips_modules_which_define_unmatched_distros(self): """Skip modules which define distros which don't match the current.""" @@ -89,17 +92,22 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') + (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) self.assertIn( "Skipping modules 'spacewalk' because they are not verified on" " distro 'ubuntu'", - self.logs.getvalue()) - self.assertNotIn('spacewalk', which_ran) + self.logs.getvalue(), + ) + self.assertNotIn("spacewalk", which_ran) def test_none_ds_runs_modules_which_distros_all(self): """Skip modules which define distros attribute as supporting 'all'. @@ -113,28 +121,34 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') + (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) - self.assertIn('runcmd', which_ran) + self.assertIn("runcmd", which_ran) self.assertNotIn( "Skipping modules 'runcmd' because they are not verified on" " distro 'ubuntu'", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_none_ds_forces_run_via_unverified_modules(self): """run_section forced skipped modules by using unverified_modules.""" # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) - cfg['unverified_modules'] = ['spacewalk'] # Would have skipped + cfg["unverified_modules"] = ["spacewalk"] # Would have skipped cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) + util.ensure_dir(os.path.join(self.new_root, "etc", "cloud")) + util.write_file( + os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg + ) initer = stages.Init() initer.read_cfg() @@ -142,16 +156,20 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') + (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) - self.assertIn('spacewalk', which_ran) + self.assertIn("spacewalk", which_ran) self.assertIn( - "running unverified_modules: 'spacewalk'", - self.logs.getvalue()) + "running unverified_modules: 'spacewalk'", self.logs.getvalue() + ) def test_none_ds_run_with_no_config_modules(self): """run_section will report no modules run when none are configured.""" @@ -159,11 +177,12 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) # Represent empty configuration in /etc/cloud/cloud.cfg - cfg['cloud_init_modules'] = None + cfg["cloud_init_modules"] = None cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) + util.ensure_dir(os.path.join(self.new_root, "etc", "cloud")) + util.write_file( + os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg + ) initer = stages.Init() initer.read_cfg() @@ -171,12 +190,17 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') + (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) self.assertEqual([], which_ran) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/helpers/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py index 478ce375..5eabf104 100644 --- a/tests/unittests/sources/helpers/test_netlink.py +++ b/tests/unittests/sources/helpers/test_netlink.py @@ -2,48 +2,64 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from tests.unittests.helpers import CiTestCase, mock +import codecs import socket import struct -import codecs + from cloudinit.sources.helpers.netlink import ( - NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket, - read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect, - wait_for_nic_attach_event, wait_for_nic_detach_event, - OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT, - OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK, - RTM_SETLINK, RTM_GETLINK, MAX_SIZE) + MAX_SIZE, + OPER_DORMANT, + OPER_DOWN, + OPER_LOWERLAYERDOWN, + OPER_NOTPRESENT, + OPER_TESTING, + OPER_UNKNOWN, + OPER_UP, + RTATTR_START_OFFSET, + RTM_DELLINK, + RTM_GETLINK, + RTM_NEWLINK, + RTM_SETLINK, + NetlinkCreateSocketError, + create_bound_netlink_socket, + read_netlink_socket, + read_rta_oper_state, + unpack_rta_attr, + wait_for_media_disconnect_connect, + wait_for_nic_attach_event, + wait_for_nic_detach_event, +) +from tests.unittests.helpers import CiTestCase, mock def int_to_bytes(i): - '''convert integer to binary: eg: 1 to \x01''' - hex_value = '{0:x}'.format(i) - hex_value = '0' * (len(hex_value) % 2) + hex_value - return codecs.decode(hex_value, 'hex_codec') + """convert integer to binary: eg: 1 to \x01""" + hex_value = "{0:x}".format(i) + hex_value = "0" * (len(hex_value) % 2) + hex_value + return codecs.decode(hex_value, "hex_codec") class TestCreateBoundNetlinkSocket(CiTestCase): - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch("cloudinit.sources.helpers.netlink.socket.socket") def test_socket_error_on_create(self, m_socket): - '''create_bound_netlink_socket catches socket creation exception''' + """create_bound_netlink_socket catches socket creation exception""" """NetlinkCreateSocketError is raised when socket creation errors.""" m_socket.side_effect = socket.error("Fake socket failure") with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: create_bound_netlink_socket() self.assertEqual( - 'Exception during netlink socket create: Fake socket failure', - str(ctx_mgr.exception)) + "Exception during netlink socket create: Fake socket failure", + str(ctx_mgr.exception), + ) class TestReadNetlinkSocket(CiTestCase): - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - @mock.patch('cloudinit.sources.helpers.netlink.select.select') + @mock.patch("cloudinit.sources.helpers.netlink.socket.socket") + @mock.patch("cloudinit.sources.helpers.netlink.select.select") def test_read_netlink_socket(self, m_select, m_socket): - '''read_netlink_socket able to receive data''' - data = 'netlinktest' + """read_netlink_socket able to receive data""" + data = "netlinktest" m_select.return_value = [m_socket], None, None m_socket.recv.return_value = data recv_data = read_netlink_socket(m_socket, 2) @@ -52,10 +68,10 @@ class TestReadNetlinkSocket(CiTestCase): self.assertIsNotNone(recv_data) self.assertEqual(recv_data, data) - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - @mock.patch('cloudinit.sources.helpers.netlink.select.select') + @mock.patch("cloudinit.sources.helpers.netlink.socket.socket") + @mock.patch("cloudinit.sources.helpers.netlink.select.select") def test_netlink_read_timeout(self, m_select, m_socket): - '''read_netlink_socket should timeout if nothing to read''' + """read_netlink_socket should timeout if nothing to read""" m_select.return_value = [], None, None data = read_netlink_socket(m_socket, 1) m_select.assert_called_with([m_socket], [], [], 1) @@ -63,35 +79,43 @@ class TestReadNetlinkSocket(CiTestCase): self.assertIsNone(data) def test_read_invalid_socket(self): - '''read_netlink_socket raises assert error if socket is invalid''' + """read_netlink_socket raises assert error if socket is invalid""" socket = None with self.assertRaises(AssertionError) as context: read_netlink_socket(socket, 1) - self.assertTrue('netlink socket is none' in str(context.exception)) + self.assertTrue("netlink socket is none" in str(context.exception)) class TestParseNetlinkMessage(CiTestCase): - def test_read_rta_oper_state(self): - '''read_rta_oper_state could parse netlink message and extract data''' + """read_rta_oper_state could parse netlink message and extract data""" ifname = "eth0" bytes = ifname.encode("utf-8") buf = bytearray(48) - struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5, - 16, int_to_bytes(OPER_DOWN)) + struct.pack_into( + "HH4sHHc", + buf, + RTATTR_START_OFFSET, + 8, + 3, + bytes, + 5, + 16, + int_to_bytes(OPER_DOWN), + ) interface_state = read_rta_oper_state(buf) self.assertEqual(interface_state.ifname, ifname) self.assertEqual(interface_state.operstate, OPER_DOWN) def test_read_none_data(self): - '''read_rta_oper_state raises assert error if data is none''' + """read_rta_oper_state raises assert error if data is none""" data = None with self.assertRaises(AssertionError) as context: read_rta_oper_state(data) - self.assertEqual('data is none', str(context.exception)) + self.assertEqual("data is none", str(context.exception)) def test_read_invalid_rta_operstate_none(self): - '''read_rta_oper_state returns none if operstate is none''' + """read_rta_oper_state returns none if operstate is none""" ifname = "eth0" buf = bytearray(40) bytes = ifname.encode("utf-8") @@ -100,65 +124,84 @@ class TestParseNetlinkMessage(CiTestCase): self.assertIsNone(interface_state) def test_read_invalid_rta_ifname_none(self): - '''read_rta_oper_state returns none if ifname is none''' + """read_rta_oper_state returns none if ifname is none""" buf = bytearray(40) - struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(OPER_DOWN)) + struct.pack_into( + "HHc", buf, RTATTR_START_OFFSET, 5, 16, int_to_bytes(OPER_DOWN) + ) interface_state = read_rta_oper_state(buf) self.assertIsNone(interface_state) def test_read_invalid_data_len(self): - '''raise assert error if data size is smaller than required size''' + """raise assert error if data size is smaller than required size""" buf = bytearray(32) with self.assertRaises(AssertionError) as context: read_rta_oper_state(buf) - self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in - str(context.exception)) + self.assertTrue( + "length of data is smaller than RTATTR_START_OFFSET" + in str(context.exception) + ) def test_unpack_rta_attr_none_data(self): - '''unpack_rta_attr raises assert error if data is none''' + """unpack_rta_attr raises assert error if data is none""" data = None with self.assertRaises(AssertionError) as context: unpack_rta_attr(data, RTATTR_START_OFFSET) - self.assertTrue('data is none' in str(context.exception)) + self.assertTrue("data is none" in str(context.exception)) def test_unpack_rta_attr_invalid_offset(self): - '''unpack_rta_attr raises assert error if offset is invalid''' + """unpack_rta_attr raises assert error if offset is invalid""" data = bytearray(48) with self.assertRaises(AssertionError) as context: unpack_rta_attr(data, "offset") - self.assertTrue('offset is not integer' in str(context.exception)) + self.assertTrue("offset is not integer" in str(context.exception)) with self.assertRaises(AssertionError) as context: unpack_rta_attr(data, 31) - self.assertTrue('rta offset is less than expected length' in - str(context.exception)) + self.assertTrue( + "rta offset is less than expected length" in str(context.exception) + ) -@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') -@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +@mock.patch("cloudinit.sources.helpers.netlink.socket.socket") +@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket") class TestNicAttachDetach(CiTestCase): with_logs = True def _media_switch_data(self, ifname, msg_type, operstate): - '''construct netlink data with specified fields''' + """construct netlink data with specified fields""" if ifname and operstate is not None: data = bytearray(48) bytes = ifname.encode("utf-8") - struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(operstate)) + struct.pack_into( + "HH4sHHc", + data, + RTATTR_START_OFFSET, + 8, + 3, + bytes, + 5, + 16, + int_to_bytes(operstate), + ) elif ifname: data = bytearray(40) bytes = ifname.encode("utf-8") struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) elif operstate: data = bytearray(40) - struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(operstate)) + struct.pack_into( + "HHc", + data, + RTATTR_START_OFFSET, + 5, + 16, + int_to_bytes(operstate), + ) struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) return data def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket): - '''Test for a new nic attached''' + """Test for a new nic attached""" ifname = "eth0" data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) m_read_netlink_socket.side_effect = [data_op_down] @@ -167,7 +210,7 @@ class TestNicAttachDetach(CiTestCase): self.assertEqual(ifname, ifread) def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket): - '''Test for a new nic attached''' + """Test for a new nic attached""" ifname = "eth0" data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) m_read_netlink_socket.side_effect = [data_op_up] @@ -176,7 +219,7 @@ class TestNicAttachDetach(CiTestCase): self.assertEqual(ifname, ifread) def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket): - '''Test that we read only the interfaces we are interested in.''' + """Test that we read only the interfaces we are interested in.""" data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) m_read_netlink_socket.side_effect = [data_eth0, data_eth1] @@ -185,7 +228,7 @@ class TestNicAttachDetach(CiTestCase): self.assertEqual("eth1", ifread) def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket): - '''Test that we read only the interfaces we are interested in.''' + """Test that we read only the interfaces we are interested in.""" data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) m_read_netlink_socket.side_effect = [data_eth0, data_eth1] @@ -194,7 +237,7 @@ class TestNicAttachDetach(CiTestCase): self.assertEqual("eth0", ifread) def test_nic_detached(self, m_read_netlink_socket, m_socket): - '''Test for an existing nic detached''' + """Test for an existing nic detached""" ifname = "eth0" data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN) m_read_netlink_socket.side_effect = [data_op_down] @@ -203,32 +246,46 @@ class TestNicAttachDetach(CiTestCase): self.assertEqual(ifname, ifread) -@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') -@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +@mock.patch("cloudinit.sources.helpers.netlink.socket.socket") +@mock.patch("cloudinit.sources.helpers.netlink.read_netlink_socket") class TestWaitForMediaDisconnectConnect(CiTestCase): with_logs = True def _media_switch_data(self, ifname, msg_type, operstate): - '''construct netlink data with specified fields''' + """construct netlink data with specified fields""" if ifname and operstate is not None: data = bytearray(48) bytes = ifname.encode("utf-8") - struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(operstate)) + struct.pack_into( + "HH4sHHc", + data, + RTATTR_START_OFFSET, + 8, + 3, + bytes, + 5, + 16, + int_to_bytes(operstate), + ) elif ifname: data = bytearray(40) bytes = ifname.encode("utf-8") struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) elif operstate: data = bytearray(40) - struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(operstate)) + struct.pack_into( + "HHc", + data, + RTATTR_START_OFFSET, + 5, + 16, + int_to_bytes(operstate), + ) struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) return data - def test_media_down_up_scenario(self, m_read_netlink_socket, - m_socket): - '''Test for media down up sequence for required interface name''' + def test_media_down_up_scenario(self, m_read_netlink_socket, m_socket): + """Test for media down up sequence for required interface name""" ifname = "eth0" # construct data for Oper State down data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) @@ -238,15 +295,16 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 2) - def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket, - m_socket): - '''wait_for_media_disconnect_connect ignores unexpected interfaces. + def test_wait_for_media_switch_diff_interface( + self, m_read_netlink_socket, m_socket + ): + """wait_for_media_disconnect_connect ignores unexpected interfaces. The first two messages are for other interfaces and last two are for expected interface. So the function exit only after receiving last 2 messages and therefore the call count for m_read_netlink_socket has to be 4 - ''' + """ other_ifname = "eth1" expected_ifname = "eth0" data_op_down_eth1 = self._media_switch_data( @@ -259,51 +317,50 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): expected_ifname, RTM_NEWLINK, OPER_DOWN ) data_op_up_eth0 = self._media_switch_data( - expected_ifname, RTM_NEWLINK, OPER_UP) + expected_ifname, RTM_NEWLINK, OPER_UP + ) m_read_netlink_socket.side_effect = [ data_op_down_eth1, data_op_up_eth1, data_op_down_eth0, - data_op_up_eth0 + data_op_up_eth0, ] wait_for_media_disconnect_connect(m_socket, expected_ifname) - self.assertIn('Ignored netlink event on interface %s' % other_ifname, - self.logs.getvalue()) + self.assertIn( + "Ignored netlink event on interface %s" % other_ifname, + self.logs.getvalue(), + ) self.assertEqual(m_read_netlink_socket.call_count, 4) def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect ignores GETLINK events. + """wait_for_media_disconnect_connect ignores GETLINK events. The first two messages are for oper down and up for RTM_GETLINK type which netlink module will ignore. The last 2 messages are RTM_NEWLINK with oper state down and up messages. Therefore the call count for m_read_netlink_socket has to be 4 ignoring first 2 messages of RTM_GETLINK - ''' + """ ifname = "eth0" data_getlink_down = self._media_switch_data( ifname, RTM_GETLINK, OPER_DOWN ) - data_getlink_up = self._media_switch_data( - ifname, RTM_GETLINK, OPER_UP - ) + data_getlink_up = self._media_switch_data(ifname, RTM_GETLINK, OPER_UP) data_newlink_down = self._media_switch_data( ifname, RTM_NEWLINK, OPER_DOWN ) - data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP - ) + data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) m_read_netlink_socket.side_effect = [ data_getlink_down, data_getlink_up, data_newlink_down, - data_newlink_up + data_newlink_up, ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect ignores SETLINK events. + """wait_for_media_disconnect_connect ignores SETLINK events. The first two messages are for oper down and up for RTM_GETLINK type which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down @@ -311,34 +368,31 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): sees down->up scenario. So the call count for m_read_netlink_socket has to be 4 ignoring first 2 messages of RTM_GETLINK and last 2 messages of RTM_NEWLINK - ''' + """ ifname = "eth0" data_setlink_down = self._media_switch_data( ifname, RTM_SETLINK, OPER_DOWN ) - data_setlink_up = self._media_switch_data( - ifname, RTM_SETLINK, OPER_UP - ) + data_setlink_up = self._media_switch_data(ifname, RTM_SETLINK, OPER_UP) data_newlink_down = self._media_switch_data( ifname, RTM_NEWLINK, OPER_DOWN ) - data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP - ) + data_newlink_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) m_read_netlink_socket.side_effect = [ data_setlink_down, data_setlink_up, data_newlink_down, data_newlink_up, data_newlink_down, - data_newlink_up + data_newlink_up, ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) - def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket, - m_socket): - '''returns only if it receives UP event after a DOWN event''' + def test_netlink_invalid_switch_scenario( + self, m_read_netlink_socket, m_socket + ): + """returns only if it receives UP event after a DOWN event""" ifname = "eth0" data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) @@ -358,114 +412,153 @@ class TestWaitForMediaDisconnectConnect(CiTestCase): ifname, RTM_NEWLINK, OPER_UNKNOWN ) m_read_netlink_socket.side_effect = [ - data_op_up, data_op_up, - data_op_dormant, data_op_up, - data_op_notpresent, data_op_up, - data_op_lowerdown, data_op_up, - data_op_testing, data_op_up, - data_op_unknown, data_op_up, - data_op_down, data_op_up + data_op_up, + data_op_up, + data_op_dormant, + data_op_up, + data_op_notpresent, + data_op_up, + data_op_lowerdown, + data_op_up, + data_op_testing, + data_op_up, + data_op_unknown, + data_op_up, + data_op_down, + data_op_up, ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 14) - def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket, - m_socket): - '''wait_for_media_disconnect_connect handles in between transitions''' + def test_netlink_valid_inbetween_transitions( + self, m_read_netlink_socket, m_socket + ): + """wait_for_media_disconnect_connect handles in between transitions""" ifname = "eth0" data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) data_op_dormant = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DORMANT) + ifname, RTM_NEWLINK, OPER_DORMANT + ) data_op_unknown = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UNKNOWN) + ifname, RTM_NEWLINK, OPER_UNKNOWN + ) m_read_netlink_socket.side_effect = [ - data_op_down, data_op_dormant, - data_op_unknown, data_op_up + data_op_down, + data_op_dormant, + data_op_unknown, + data_op_up, ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect should handle invalid operstates. + """wait_for_media_disconnect_connect should handle invalid operstates. The function should not fail and return even if it receives invalid operstates. It always should wait for down up sequence. - ''' + """ ifname = "eth0" data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) m_read_netlink_socket.side_effect = [ - data_op_invalid, data_op_up, - data_op_down, data_op_invalid, - data_op_up + data_op_invalid, + data_op_up, + data_op_down, + data_op_invalid, + data_op_up, ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 5) def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect handle none netlink socket.''' + """wait_for_media_disconnect_connect handle none netlink socket.""" socket = None ifname = "eth0" with self.assertRaises(AssertionError) as context: wait_for_media_disconnect_connect(socket, ifname) - self.assertTrue('netlink socket is none' in str(context.exception)) + self.assertTrue("netlink socket is none" in str(context.exception)) def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect handle none interface name''' + """wait_for_media_disconnect_connect handle none interface name""" ifname = None with self.assertRaises(AssertionError) as context: wait_for_media_disconnect_connect(m_socket, ifname) - self.assertTrue('interface name is none' in str(context.exception)) + self.assertTrue("interface name is none" in str(context.exception)) ifname = "" with self.assertRaises(AssertionError) as context: wait_for_media_disconnect_connect(m_socket, ifname) - self.assertTrue('interface name cannot be empty' in - str(context.exception)) + self.assertTrue( + "interface name cannot be empty" in str(context.exception) + ) def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket): - ''' wait_for_media_disconnect_connect handles invalid rta data''' + """wait_for_media_disconnect_connect handles invalid rta data""" ifname = "eth0" data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN) data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) m_read_netlink_socket.side_effect = [ - data_invalid1, data_invalid2, data_op_down, data_op_up + data_invalid1, + data_invalid2, + data_op_down, + data_op_up, ] wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 4) def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket): - '''Read multiple messages in single receive call''' + """Read multiple messages in single receive call""" ifname = "eth0" bytes = ifname.encode("utf-8") data = bytearray(96) struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) struct.pack_into( - "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN) + "HH4sHHc", + data, + RTATTR_START_OFFSET, + 8, + 3, + bytes, + 5, + 16, + int_to_bytes(OPER_DOWN), ) struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) struct.pack_into( - "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, - 3, bytes, 5, 16, int_to_bytes(OPER_UP) + "HH4sHHc", + data, + 48 + RTATTR_START_OFFSET, + 8, + 3, + bytes, + 5, + 16, + int_to_bytes(OPER_UP), ) m_read_netlink_socket.return_value = data wait_for_media_disconnect_connect(m_socket, ifname) self.assertEqual(m_read_netlink_socket.call_count, 1) def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket): - '''Read partial messages in receive call''' + """Read partial messages in receive call""" ifname = "eth0" bytes = ifname.encode("utf-8") data1 = bytearray(112) data2 = bytearray(32) struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) struct.pack_into( - "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN) + "HH4sHHc", + data1, + RTATTR_START_OFFSET, + 8, + 3, + bytes, + 5, + 16, + int_to_bytes(OPER_DOWN), ) struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) struct.pack_into( diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py index 74743e7c..eb87b1ce 100644 --- a/tests/unittests/sources/helpers/test_openstack.py +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -8,10 +8,9 @@ from tests.unittests import helpers as test_helpers @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestConvertNetJson(test_helpers.CiTestCase): - def test_phy_types(self): """Verify the different known physical types are handled.""" # network_data.json example from @@ -19,31 +18,45 @@ class TestConvertNetJson(test_helpers.CiTestCase): mac0 = "fa:16:3e:9c:bf:3d" net_json = { "links": [ - {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a", - "mtu": None, "type": "bridge", - "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"} + { + "ethernet_mac_address": mac0, + "id": "tapcd9f6d46-4a", + "mtu": None, + "type": "bridge", + "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc", + } ], "networks": [ - {"id": "network0", "link": "tapcd9f6d46-4a", - "network_id": "99e88329-f20d-4741-9593-25bf07847b16", - "type": "ipv4_dhcp"} + { + "id": "network0", + "link": "tapcd9f6d46-4a", + "network_id": "99e88329-f20d-4741-9593-25bf07847b16", + "type": "ipv4_dhcp", + } ], - "services": [{"address": "8.8.8.8", "type": "dns"}] + "services": [{"address": "8.8.8.8", "type": "dns"}], } - macs = {mac0: 'eth0'} + macs = {mac0: "eth0"} expected = { - 'version': 1, - 'config': [ - {'mac_address': 'fa:16:3e:9c:bf:3d', - 'mtu': None, 'name': 'eth0', - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical'}, - {'address': '8.8.8.8', 'type': 'nameserver'}]} + "version": 1, + "config": [ + { + "mac_address": "fa:16:3e:9c:bf:3d", + "mtu": None, + "name": "eth0", + "subnets": [{"type": "dhcp4"}], + "type": "physical", + }, + {"address": "8.8.8.8", "type": "nameserver"}, + ], + } for t in openstack.KNOWN_PHYSICAL_TYPES: net_json["links"][0]["type"] = t self.assertEqual( expected, - openstack.convert_net_json(network_json=net_json, - known_macs=macs)) + openstack.convert_net_json( + network_json=net_json, known_macs=macs + ), + ) diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py index 00209913..8a61d5ee 100644 --- a/tests/unittests/sources/test_aliyun.py +++ b/tests/unittests/sources/test_aliyun.py @@ -1,38 +1,46 @@ # This file is part of cloud-init. See LICENSE file for license information. import functools -import httpretty import os from unittest import mock +import httpretty + from cloudinit import helpers from cloudinit.sources import DataSourceAliYun as ay from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config from tests.unittests import helpers as test_helpers DEFAULT_METADATA = { - 'instance-id': 'aliyun-test-vm-00', - 'eipv4': '10.0.0.1', - 'hostname': 'test-hostname', - 'image-id': 'm-test', - 'launch-index': '0', - 'mac': '00:16:3e:00:00:00', - 'network-type': 'vpc', - 'private-ipv4': '192.168.0.1', - 'serial-number': 'test-string', - 'vpc-cidr-block': '192.168.0.0/16', - 'vpc-id': 'test-vpc', - 'vswitch-id': 'test-vpc', - 'vswitch-cidr-block': '192.168.0.0/16', - 'zone-id': 'test-zone-1', - 'ntp-conf': {'ntp_servers': [ - 'ntp1.aliyun.com', - 'ntp2.aliyun.com', - 'ntp3.aliyun.com']}, - 'source-address': ['http://mirrors.aliyun.com', - 'http://mirrors.aliyuncs.com'], - 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'}, - 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}} + "instance-id": "aliyun-test-vm-00", + "eipv4": "10.0.0.1", + "hostname": "test-hostname", + "image-id": "m-test", + "launch-index": "0", + "mac": "00:16:3e:00:00:00", + "network-type": "vpc", + "private-ipv4": "192.168.0.1", + "serial-number": "test-string", + "vpc-cidr-block": "192.168.0.0/16", + "vpc-id": "test-vpc", + "vswitch-id": "test-vpc", + "vswitch-cidr-block": "192.168.0.0/16", + "zone-id": "test-zone-1", + "ntp-conf": { + "ntp_servers": [ + "ntp1.aliyun.com", + "ntp2.aliyun.com", + "ntp3.aliyun.com", + ] + }, + "source-address": [ + "http://mirrors.aliyun.com", + "http://mirrors.aliyuncs.com", + ], + "public-keys": { + "key-pair-1": {"openssh-key": "ssh-rsa AAAAB3..."}, + "key-pair-2": {"openssh-key": "ssh-rsa AAAAB3..."}, + }, } DEFAULT_USERDATA = """\ @@ -46,21 +54,22 @@ def register_mock_metaserver(base_url, data): if isinstance(body, str): register(base_url, body) elif isinstance(body, list): - register(base_url.rstrip('/'), '\n'.join(body) + '\n') + register(base_url.rstrip("/"), "\n".join(body) + "\n") elif isinstance(body, dict): if not body: - register(base_url.rstrip('/') + '/', 'not found', - status_code=404) + register( + base_url.rstrip("/") + "/", "not found", status_code=404 + ) vals = [] for k, v in body.items(): if isinstance(v, (str, list)): - suffix = k.rstrip('/') + suffix = k.rstrip("/") else: - suffix = k.rstrip('/') + '/' + suffix = k.rstrip("/") + "/" vals.append(suffix) - url = base_url.rstrip('/') + '/' + suffix + url = base_url.rstrip("/") + "/" + suffix register_helper(register, url, v) - register(base_url, '\n'.join(vals) + '\n') + register(base_url, "\n".join(vals) + "\n") register = functools.partial(httpretty.register_uri, httpretty.GET) register_helper(register, base_url, data) @@ -69,9 +78,9 @@ def register_mock_metaserver(base_url, data): class TestAliYunDatasource(test_helpers.HttprettyTestCase): def setUp(self): super(TestAliYunDatasource, self).setUp() - cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} + cfg = {"datasource": {"AliYun": {"timeout": "1", "max_wait": "1"}}} distro = {} - paths = helpers.Paths({'run_dir': self.tmp_dir()}) + paths = helpers.Paths({"run_dir": self.tmp_dir()}) self.ds = ay.DataSourceAliYun(cfg, distro, paths) self.metadata_address = self.ds.metadata_urls[0] @@ -85,15 +94,20 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): @property def metadata_url(self): - return os.path.join( - self.metadata_address, - self.ds.min_metadata_version, 'meta-data') + '/' + return ( + os.path.join( + self.metadata_address, + self.ds.min_metadata_version, + "meta-data", + ) + + "/" + ) @property def userdata_url(self): return os.path.join( - self.metadata_address, - self.ds.min_metadata_version, 'user-data') + self.metadata_address, self.ds.min_metadata_version, "user-data" + ) # EC2 provides an instance-identity document which must return 404 here # for this test to pass. @@ -103,9 +117,12 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): @property def identity_url(self): - return os.path.join(self.metadata_address, - self.ds.min_metadata_version, - 'dynamic', 'instance-identity') + return os.path.join( + self.metadata_address, + self.ds.min_metadata_version, + "dynamic", + "instance-identity", + ) def regist_default_server(self): register_mock_metaserver(self.metadata_url, self.default_metadata) @@ -114,21 +131,26 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): def _test_get_data(self): self.assertEqual(self.ds.metadata, self.default_metadata) - self.assertEqual(self.ds.userdata_raw, - self.default_userdata.encode('utf8')) + self.assertEqual( + self.ds.userdata_raw, self.default_userdata.encode("utf8") + ) def _test_get_sshkey(self): - pub_keys = [v['openssh-key'] for (_, v) in - self.default_metadata['public-keys'].items()] + pub_keys = [ + v["openssh-key"] + for (_, v) in self.default_metadata["public-keys"].items() + ] self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys) def _test_get_iid(self): - self.assertEqual(self.default_metadata['instance-id'], - self.ds.get_instance_id()) + self.assertEqual( + self.default_metadata["instance-id"], self.ds.get_instance_id() + ) def _test_host_name(self): - self.assertEqual(self.default_metadata['hostname'], - self.ds.get_hostname()) + self.assertEqual( + self.default_metadata["hostname"], self.ds.get_hostname() + ) @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") def test_with_mock_server(self, m_is_aliyun): @@ -141,10 +163,11 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self._test_get_sshkey() self._test_get_iid() self._test_host_name() - self.assertEqual('aliyun', self.ds.cloud_name) - self.assertEqual('ec2', self.ds.platform) + self.assertEqual("aliyun", self.ds.cloud_name) + self.assertEqual("ec2", self.ds.platform) self.assertEqual( - 'metadata (http://100.100.100.200)', self.ds.subplatform) + "metadata (http://100.100.100.200)", self.ds.subplatform + ) @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): @@ -159,30 +182,38 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): public_keys = {} self.assertEqual(ay.parse_public_keys(public_keys), []) - public_keys = {'key-pair-0': 'ssh-key-0'} - self.assertEqual(ay.parse_public_keys(public_keys), - [public_keys['key-pair-0']]) + public_keys = {"key-pair-0": "ssh-key-0"} + self.assertEqual( + ay.parse_public_keys(public_keys), [public_keys["key-pair-0"]] + ) - public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'} - self.assertEqual(set(ay.parse_public_keys(public_keys)), - set([public_keys['key-pair-0'], - public_keys['key-pair-1']])) + public_keys = {"key-pair-0": "ssh-key-0", "key-pair-1": "ssh-key-1"} + self.assertEqual( + set(ay.parse_public_keys(public_keys)), + set([public_keys["key-pair-0"], public_keys["key-pair-1"]]), + ) - public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']} - self.assertEqual(ay.parse_public_keys(public_keys), - public_keys['key-pair-0']) + public_keys = {"key-pair-0": ["ssh-key-0", "ssh-key-1"]} + self.assertEqual( + ay.parse_public_keys(public_keys), public_keys["key-pair-0"] + ) - public_keys = {'key-pair-0': {'openssh-key': []}} + public_keys = {"key-pair-0": {"openssh-key": []}} self.assertEqual(ay.parse_public_keys(public_keys), []) - public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}} - self.assertEqual(ay.parse_public_keys(public_keys), - [public_keys['key-pair-0']['openssh-key']]) + public_keys = {"key-pair-0": {"openssh-key": "ssh-key-0"}} + self.assertEqual( + ay.parse_public_keys(public_keys), + [public_keys["key-pair-0"]["openssh-key"]], + ) - public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0', - 'ssh-key-1']}} - self.assertEqual(ay.parse_public_keys(public_keys), - public_keys['key-pair-0']['openssh-key']) + public_keys = { + "key-pair-0": {"openssh-key": ["ssh-key-0", "ssh-key-1"]} + } + self.assertEqual( + ay.parse_public_keys(public_keys), + public_keys["key-pair-0"]["openssh-key"], + ) def test_route_metric_calculated_without_device_number(self): """Test that route-metric code works without `device-number` @@ -193,38 +224,43 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): LP: #1917875 """ netcfg = convert_ec2_metadata_network_config( - {"interfaces": {"macs": { - "06:17:04:d7:26:09": { - "interface-id": "eni-e44ef49e", - }, - "06:17:04:d7:26:08": { - "interface-id": "eni-e44ef49f", + { + "interfaces": { + "macs": { + "06:17:04:d7:26:09": { + "interface-id": "eni-e44ef49e", + }, + "06:17:04:d7:26:08": { + "interface-id": "eni-e44ef49f", + }, + } } - }}}, + }, macs_to_nics={ - '06:17:04:d7:26:09': 'eth0', - '06:17:04:d7:26:08': 'eth1', - } + "06:17:04:d7:26:09": "eth0", + "06:17:04:d7:26:08": "eth1", + }, ) - met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] - met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] + met0 = netcfg["ethernets"]["eth0"]["dhcp4-overrides"]["route-metric"] + met1 = netcfg["ethernets"]["eth1"]["dhcp4-overrides"]["route-metric"] # route-metric numbers should be 100 apart assert 100 == abs(met0 - met1) class TestIsAliYun(test_helpers.CiTestCase): - ALIYUN_PRODUCT = 'Alibaba Cloud ECS' - read_dmi_data_expected = [mock.call('system-product-name')] + ALIYUN_PRODUCT = "Alibaba Cloud ECS" + read_dmi_data_expected = [mock.call("system-product-name")] @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") def test_true_on_aliyun_product(self, m_read_dmi_data): """Should return true if the dmi product data has expected value.""" m_read_dmi_data.return_value = self.ALIYUN_PRODUCT ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) + self.assertEqual( + self.read_dmi_data_expected, m_read_dmi_data.call_args_list + ) self.assertEqual(True, ret) @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") @@ -232,8 +268,9 @@ class TestIsAliYun(test_helpers.CiTestCase): """Should return false on empty value returned.""" m_read_dmi_data.return_value = "" ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) + self.assertEqual( + self.read_dmi_data_expected, m_read_dmi_data.call_args_list + ) self.assertEqual(False, ret) @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") @@ -241,8 +278,10 @@ class TestIsAliYun(test_helpers.CiTestCase): """Should return false on an unrelated string.""" m_read_dmi_data.return_value = "cubs win" ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) + self.assertEqual( + self.read_dmi_data_expected, m_read_dmi_data.call_args_list + ) self.assertEqual(False, ret) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_altcloud.py b/tests/unittests/sources/test_altcloud.py index 7384c104..44dfafd9 100644 --- a/tests/unittests/sources/test_altcloud.py +++ b/tests/unittests/sources/test_altcloud.py @@ -6,54 +6,47 @@ # # This file is part of cloud-init. See LICENSE file for license information. -''' +""" This test file exercises the code in sources DataSourceAltCloud.py -''' +""" import os import shutil import tempfile -from cloudinit import dmi -from cloudinit import helpers -from cloudinit import subp -from cloudinit import util - -from tests.unittests.helpers import CiTestCase, mock - import cloudinit.sources.DataSourceAltCloud as dsac +from cloudinit import dmi, helpers, subp, util +from tests.unittests.helpers import CiTestCase, mock -OS_UNAME_ORIG = getattr(os, 'uname') +OS_UNAME_ORIG = getattr(os, "uname") def _write_user_data_files(mount_dir, value): - ''' + """ Populate the deltacloud_user_data_file the user_data_file which would be populated with user data. - ''' - deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' - user_data_file = mount_dir + '/user-data.txt' + """ + deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt" + user_data_file = mount_dir + "/user-data.txt" - udfile = open(deltacloud_user_data_file, 'w') + udfile = open(deltacloud_user_data_file, "w") udfile.write(value) udfile.close() os.chmod(deltacloud_user_data_file, 0o664) - udfile = open(user_data_file, 'w') + udfile = open(user_data_file, "w") udfile.write(value) udfile.close() os.chmod(user_data_file, 0o664) -def _remove_user_data_files(mount_dir, - dc_file=True, - non_dc_file=True): - ''' +def _remove_user_data_files(mount_dir, dc_file=True, non_dc_file=True): + """ Remove the test files: deltacloud_user_data_file and user_data_file - ''' - deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' - user_data_file = mount_dir + '/user-data.txt' + """ + deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt" + user_data_file = mount_dir + "/user-data.txt" # Ignore any failures removeing files that are already gone. if dc_file: @@ -70,9 +63,10 @@ def _remove_user_data_files(mount_dir, def _dmi_data(expected): - ''' + """ Spoof the data received over DMI - ''' + """ + def _data(key): return expected @@ -80,19 +74,19 @@ def _dmi_data(expected): class TestGetCloudType(CiTestCase): - '''Test to exercise method: DataSourceAltCloud.get_cloud_type()''' + """Test to exercise method: DataSourceAltCloud.get_cloud_type()""" with_logs = True def setUp(self): - '''Set up.''' + """Set up.""" super(TestGetCloudType, self).setUp() self.tmp = self.tmp_dir() - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.paths = helpers.Paths({"cloud_dir": self.tmp}) self.dmi_data = dmi.read_dmi_data # We have a different code path for arm to deal with LP1243287 # We have to switch arch to x86_64 to avoid test failure - force_arch('x86_64') + force_arch("x86_64") def tearDown(self): # Reset @@ -101,216 +95,226 @@ class TestGetCloudType(CiTestCase): def test_cloud_info_file_ioerror(self): """Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors.""" - self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE) + self.assertEqual("/etc/sysconfig/cloud-info", dsac.CLOUD_INFO_FILE) dsrc = dsac.DataSourceAltCloud({}, None, self.paths) # Attempting to read the directory generates IOError - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp): - self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) + with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.tmp): + self.assertEqual("UNKNOWN", dsrc.get_cloud_type()) self.assertIn( - "[Errno 21] Is a directory: '%s'" % self.tmp, - self.logs.getvalue()) + "[Errno 21] Is a directory: '%s'" % self.tmp, self.logs.getvalue() + ) def test_cloud_info_file(self): """Return uppercase stripped content from /etc/sysconfig/cloud-info.""" dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - cloud_info = self.tmp_path('cloud-info', dir=self.tmp) - util.write_file(cloud_info, ' OverRiDdeN CloudType ') + cloud_info = self.tmp_path("cloud-info", dir=self.tmp) + util.write_file(cloud_info, " OverRiDdeN CloudType ") # Attempting to read the directory generates IOError - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info): - self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type()) + with mock.patch.object(dsac, "CLOUD_INFO_FILE", cloud_info): + self.assertEqual("OVERRIDDEN CLOUDTYPE", dsrc.get_cloud_type()) def test_rhev(self): - ''' + """ Test method get_cloud_type() for RHEVm systems. Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor - ''' - dmi.read_dmi_data = _dmi_data('RHEV') + """ + dmi.read_dmi_data = _dmi_data("RHEV") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('RHEV', dsrc.get_cloud_type()) + self.assertEqual("RHEV", dsrc.get_cloud_type()) def test_vsphere(self): - ''' + """ Test method get_cloud_type() for vSphere systems. Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor - ''' - dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') + """ + dmi.read_dmi_data = _dmi_data("VMware Virtual Platform") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('VSPHERE', dsrc.get_cloud_type()) + self.assertEqual("VSPHERE", dsrc.get_cloud_type()) def test_unknown(self): - ''' + """ Test method get_cloud_type() for unknown systems. Forcing read_dmi_data return to match an unrecognized return. - ''' - dmi.read_dmi_data = _dmi_data('Unrecognized Platform') + """ + dmi.read_dmi_data = _dmi_data("Unrecognized Platform") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) + self.assertEqual("UNKNOWN", dsrc.get_cloud_type()) class TestGetDataCloudInfoFile(CiTestCase): - ''' + """ Test to exercise method: DataSourceAltCloud.get_data() With a contrived CLOUD_INFO_FILE - ''' + """ + def setUp(self): - '''Set up.''' + """Set up.""" self.tmp = self.tmp_dir() self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp) + {"cloud_dir": self.tmp, "run_dir": self.tmp} + ) + self.cloud_info_file = self.tmp_path("cloud-info", dir=self.tmp) def test_rhev(self): - '''Success Test module get_data() forcing RHEV.''' + """Success Test module get_data() forcing RHEV.""" - util.write_file(self.cloud_info_file, 'RHEV') + util.write_file(self.cloud_info_file, "RHEV") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: True - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file): self.assertEqual(True, dsrc.get_data()) - self.assertEqual('altcloud', dsrc.cloud_name) - self.assertEqual('altcloud', dsrc.platform_type) - self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform) + self.assertEqual("altcloud", dsrc.cloud_name) + self.assertEqual("altcloud", dsrc.platform_type) + self.assertEqual("rhev (/dev/fd0)", dsrc.subplatform) def test_vsphere(self): - '''Success Test module get_data() forcing VSPHERE.''' + """Success Test module get_data() forcing VSPHERE.""" - util.write_file(self.cloud_info_file, 'VSPHERE') + util.write_file(self.cloud_info_file, "VSPHERE") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: True - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file): self.assertEqual(True, dsrc.get_data()) - self.assertEqual('altcloud', dsrc.cloud_name) - self.assertEqual('altcloud', dsrc.platform_type) - self.assertEqual('vsphere (unknown)', dsrc.subplatform) + self.assertEqual("altcloud", dsrc.cloud_name) + self.assertEqual("altcloud", dsrc.platform_type) + self.assertEqual("vsphere (unknown)", dsrc.subplatform) def test_fail_rhev(self): - '''Failure Test module get_data() forcing RHEV.''' + """Failure Test module get_data() forcing RHEV.""" - util.write_file(self.cloud_info_file, 'RHEV') + util.write_file(self.cloud_info_file, "RHEV") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: False - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file): self.assertEqual(False, dsrc.get_data()) def test_fail_vsphere(self): - '''Failure Test module get_data() forcing VSPHERE.''' + """Failure Test module get_data() forcing VSPHERE.""" - util.write_file(self.cloud_info_file, 'VSPHERE') + util.write_file(self.cloud_info_file, "VSPHERE") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: False - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file): self.assertEqual(False, dsrc.get_data()) def test_unrecognized(self): - '''Failure Test module get_data() forcing unrecognized.''' + """Failure Test module get_data() forcing unrecognized.""" - util.write_file(self.cloud_info_file, 'unrecognized') + util.write_file(self.cloud_info_file, "unrecognized") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + with mock.patch.object(dsac, "CLOUD_INFO_FILE", self.cloud_info_file): self.assertEqual(False, dsrc.get_data()) class TestGetDataNoCloudInfoFile(CiTestCase): - ''' + """ Test to exercise method: DataSourceAltCloud.get_data() Without a CLOUD_INFO_FILE - ''' + """ + def setUp(self): - '''Set up.''' + """Set up.""" self.tmp = self.tmp_dir() self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + {"cloud_dir": self.tmp, "run_dir": self.tmp} + ) self.dmi_data = dmi.read_dmi_data - dsac.CLOUD_INFO_FILE = \ - 'no such file' + dsac.CLOUD_INFO_FILE = "no such file" # We have a different code path for arm to deal with LP1243287 # We have to switch arch to x86_64 to avoid test failure - force_arch('x86_64') + force_arch("x86_64") def tearDown(self): # Reset - dsac.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' + dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info" dmi.read_dmi_data = self.dmi_data # Return back to original arch force_arch() def test_rhev_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing RHEV.''' + """Test No cloud info file module get_data() forcing RHEV.""" - dmi.read_dmi_data = _dmi_data('RHEV Hypervisor') + dmi.read_dmi_data = _dmi_data("RHEV Hypervisor") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_rhevm = lambda: True self.assertEqual(True, dsrc.get_data()) def test_vsphere_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing VSPHERE.''' + """Test No cloud info file module get_data() forcing VSPHERE.""" - dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') + dmi.read_dmi_data = _dmi_data("VMware Virtual Platform") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) dsrc.user_data_vsphere = lambda: True self.assertEqual(True, dsrc.get_data()) def test_failure_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing unrecognized.''' + """Test No cloud info file module get_data() forcing unrecognized.""" - dmi.read_dmi_data = _dmi_data('Unrecognized Platform') + dmi.read_dmi_data = _dmi_data("Unrecognized Platform") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.get_data()) class TestUserDataRhevm(CiTestCase): - ''' + """ Test to exercise method: DataSourceAltCloud.user_data_rhevm() - ''' + """ + def setUp(self): - '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + """Set up.""" + self.paths = helpers.Paths({"cloud_dir": "/tmp"}) self.mount_dir = self.tmp_dir() - _write_user_data_files(self.mount_dir, 'test user data') + _write_user_data_files(self.mount_dir, "test user data") self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', - 'm_modprobe_floppy', return_value=None) + "cloudinit.sources.DataSourceAltCloud.modprobe_floppy", + "m_modprobe_floppy", + return_value=None, + ) self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', - 'm_udevadm_settle', return_value=('', '')) + "cloudinit.sources.DataSourceAltCloud.util.udevadm_settle", + "m_udevadm_settle", + return_value=("", ""), + ) self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', - 'm_mount_cb') + "cloudinit.sources.DataSourceAltCloud.util.mount_cb", "m_mount_cb" + ) def test_mount_cb_fails(self): - '''Test user_data_rhevm() where mount_cb fails.''' + """Test user_data_rhevm() where mount_cb fails.""" self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_modprobe_fails(self): - '''Test user_data_rhevm() where modprobe fails.''' + """Test user_data_rhevm() where modprobe fails.""" self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( - "Failed modprobe") + "Failed modprobe" + ) dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_modprobe_cmd(self): - '''Test user_data_rhevm() with no modprobe command.''' + """Test user_data_rhevm() with no modprobe command.""" self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( - "No such file or dir") + "No such file or dir" + ) dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_udevadm_fails(self): - '''Test user_data_rhevm() where udevadm fails.''' + """Test user_data_rhevm() where udevadm fails.""" self.m_udevadm_settle.side_effect = subp.ProcessExecutionError( - "Failed settle.") + "Failed settle." + ) dsrc = dsac.DataSourceAltCloud({}, None, self.paths) self.assertEqual(False, dsrc.user_data_rhevm()) def test_no_udevadm_cmd(self): - '''Test user_data_rhevm() with no udevadm command.''' + """Test user_data_rhevm() with no udevadm command.""" self.m_udevadm_settle.side_effect = OSError("No such file or dir") dsrc = dsac.DataSourceAltCloud({}, None, self.paths) @@ -318,16 +322,17 @@ class TestUserDataRhevm(CiTestCase): class TestUserDataVsphere(CiTestCase): - ''' + """ Test to exercise method: DataSourceAltCloud.user_data_vsphere() - ''' + """ + def setUp(self): - '''Set up.''' + """Set up.""" self.tmp = self.tmp_dir() - self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.paths = helpers.Paths({"cloud_dir": self.tmp}) self.mount_dir = tempfile.mkdtemp() - _write_user_data_files(self.mount_dir, 'test user data') + _write_user_data_files(self.mount_dir, "test user data") def tearDown(self): # Reset @@ -340,13 +345,12 @@ class TestUserDataVsphere(CiTestCase): except OSError: pass - dsac.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' + dsac.CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info" @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with): - '''Test user_data_vsphere() where mount_cb fails.''' + """Test user_data_vsphere() where mount_cb fails.""" m_mount_cb.return_value = [] dsrc = dsac.DataSourceAltCloud({}, None, self.paths) @@ -356,7 +360,7 @@ class TestUserDataVsphere(CiTestCase): @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with): - '''Test user_data_vsphere() where mount_cb fails.''' + """Test user_data_vsphere() where mount_cb fails.""" m_find_devs_with.return_value = ["/dev/mock/cdrom"] m_mount_cb.side_effect = util.MountFailedError("Unable To mount") @@ -370,28 +374,30 @@ class TestUserDataVsphere(CiTestCase): def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with): """Test user_data_vsphere() where successful.""" m_find_devs_with.return_value = ["/dev/mock/cdrom"] - m_mount_cb.return_value = 'raw userdata from cdrom' + m_mount_cb.return_value = "raw userdata from cdrom" dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - cloud_info = self.tmp_path('cloud-info', dir=self.tmp) - util.write_file(cloud_info, 'VSPHERE') + cloud_info = self.tmp_path("cloud-info", dir=self.tmp) + util.write_file(cloud_info, "VSPHERE") self.assertEqual(True, dsrc.user_data_vsphere()) - m_find_devs_with.assert_called_once_with('LABEL=CDROM') + m_find_devs_with.assert_called_once_with("LABEL=CDROM") m_mount_cb.assert_called_once_with( - '/dev/mock/cdrom', dsac.read_user_data_callback) - with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'): - self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform) + "/dev/mock/cdrom", dsac.read_user_data_callback + ) + with mock.patch.object(dsrc, "get_cloud_type", return_value="VSPHERE"): + self.assertEqual("vsphere (/dev/mock/cdrom)", dsrc.subplatform) class TestReadUserDataCallback(CiTestCase): - ''' + """ Test to exercise method: DataSourceAltCloud.read_user_data_callback() - ''' + """ + def setUp(self): - '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + """Set up.""" + self.paths = helpers.Paths({"cloud_dir": "/tmp"}) self.mount_dir = tempfile.mkdtemp() - _write_user_data_files(self.mount_dir, 'test user data') + _write_user_data_files(self.mount_dir, "test user data") def tearDown(self): # Reset @@ -405,46 +411,49 @@ class TestReadUserDataCallback(CiTestCase): pass def test_callback_both(self): - '''Test read_user_data_callback() with both files.''' + """Test read_user_data_callback() with both files.""" - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) + self.assertEqual( + "test user data", dsac.read_user_data_callback(self.mount_dir) + ) def test_callback_dc(self): - '''Test read_user_data_callback() with only DC file.''' + """Test read_user_data_callback() with only DC file.""" - _remove_user_data_files(self.mount_dir, - dc_file=False, - non_dc_file=True) + _remove_user_data_files( + self.mount_dir, dc_file=False, non_dc_file=True + ) - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) + self.assertEqual( + "test user data", dsac.read_user_data_callback(self.mount_dir) + ) def test_callback_non_dc(self): - '''Test read_user_data_callback() with only non-DC file.''' + """Test read_user_data_callback() with only non-DC file.""" - _remove_user_data_files(self.mount_dir, - dc_file=True, - non_dc_file=False) + _remove_user_data_files( + self.mount_dir, dc_file=True, non_dc_file=False + ) - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) + self.assertEqual( + "test user data", dsac.read_user_data_callback(self.mount_dir) + ) def test_callback_none(self): - '''Test read_user_data_callback() no files are found.''' + """Test read_user_data_callback() no files are found.""" _remove_user_data_files(self.mount_dir) self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) def force_arch(arch=None): - def _os_uname(): - return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch) + return ("LINUX", "NODENAME", "RELEASE", "VERSION", arch) if arch: - setattr(os, 'uname', _os_uname) + setattr(os, "uname", _os_uname) elif arch is None: - setattr(os, 'uname', OS_UNAME_ORIG) + setattr(os, "uname", OS_UNAME_ORIG) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index ad8be04b..8b0762b7 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -1,33 +1,47 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import distros -from cloudinit import helpers -from cloudinit import url_helper -from cloudinit.sources import ( - UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) -from cloudinit.util import (b64e, decode_binary, load_file, write_file, - MountFailedError, json_dumps, load_json) -from cloudinit.version import version_string as vs -from tests.unittests.helpers import ( - HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, resourceLocation) -from cloudinit.sources.helpers import netlink - import copy import crypt -import httpretty import json import os -import requests import stat import xml.etree.ElementTree as ET -import yaml +import httpretty +import requests +import yaml -def construct_valid_ovf_env(data=None, pubkeys=None, - userdata=None, platform_settings=None): +from cloudinit import distros, helpers, url_helper +from cloudinit.sources import UNSET +from cloudinit.sources import DataSourceAzure as dsaz +from cloudinit.sources import InvalidMetaDataException +from cloudinit.sources.helpers import netlink +from cloudinit.util import ( + MountFailedError, + b64e, + decode_binary, + json_dumps, + load_file, + load_json, + write_file, +) +from cloudinit.version import version_string as vs +from tests.unittests.helpers import ( + CiTestCase, + ExitStack, + HttprettyTestCase, + mock, + populate_dir, + resourceLocation, + wrap_and_call, +) + + +def construct_valid_ovf_env( + data=None, pubkeys=None, userdata=None, platform_settings=None +): if data is None: - data = {'HostName': 'FOOHOST'} + data = {"HostName": "FOOHOST"} if pubkeys is None: pubkeys = {} @@ -45,9 +59,14 @@ def construct_valid_ovf_env(data=None, pubkeys=None, """ for key, dval in data.items(): if isinstance(dval, dict): - val = dict(dval).get('text') - attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v - in dict(dval).items() if k != 'text']) + val = dict(dval).get("text") + attrs = " " + " ".join( + [ + "%s='%s'" % (k, v) + for k, v in dict(dval).items() + if k != "text" + ] + ) else: val = dval attrs = "" @@ -61,8 +80,10 @@ def construct_valid_ovf_env(data=None, pubkeys=None, for fp, path, value in pubkeys: content += " <PublicKey>" if fp and path: - content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" % - (fp, path)) + content += "<Fingerprint>%s</Fingerprint><Path>%s</Path>" % ( + fp, + path, + ) if value: content += "<Value>%s</Value>" % value content += "</PublicKey>\n" @@ -106,300 +127,331 @@ NETWORK_METADATA = { "vmScaleSetName": "", "vmSize": "Standard_DS1_v2", "zone": "", - "publicKeys": [ - { - "keyData": "ssh-rsa key1", - "path": "path1" - } - ] + "publicKeys": [{"keyData": "ssh-rsa key1", "path": "path1"}], }, "network": { "interface": [ { "macAddress": "000D3A047598", - "ipv6": { - "ipAddress": [] - }, + "ipv6": {"ipAddress": []}, "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.0.0" - } - ], + "subnet": [{"prefix": "24", "address": "10.0.0.0"}], "ipAddress": [ { "privateIpAddress": "10.0.0.4", - "publicIpAddress": "104.46.124.81" + "publicIpAddress": "104.46.124.81", } - ] - } + ], + }, } ] - } + }, } SECONDARY_INTERFACE = { "macAddress": "220D3A047598", - "ipv6": { - "ipAddress": [] - }, + "ipv6": {"ipAddress": []}, "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.1.0" - } - ], + "subnet": [{"prefix": "24", "address": "10.0.1.0"}], "ipAddress": [ { "privateIpAddress": "10.0.1.5", } - ] - } + ], + }, } SECONDARY_INTERFACE_NO_IP = { "macAddress": "220D3A047598", - "ipv6": { - "ipAddress": [] - }, + "ipv6": {"ipAddress": []}, "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.1.0" - } - ], - "ipAddress": [] - } + "subnet": [{"prefix": "24", "address": "10.0.1.0"}], + "ipAddress": [], + }, } IMDS_NETWORK_METADATA = { "interface": [ { "macAddress": "000D3A047598", - "ipv6": { - "ipAddress": [] - }, + "ipv6": {"ipAddress": []}, "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.0.0" - } - ], + "subnet": [{"prefix": "24", "address": "10.0.0.0"}], "ipAddress": [ { "privateIpAddress": "10.0.0.4", - "publicIpAddress": "104.46.124.81" + "publicIpAddress": "104.46.124.81", } - ] - } + ], + }, } ] } -MOCKPATH = 'cloudinit.sources.DataSourceAzure.' -EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8' +MOCKPATH = "cloudinit.sources.DataSourceAzure." +EXAMPLE_UUID = "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8" class TestParseNetworkConfig(CiTestCase): maxDiff = None fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] + "version": 1, + "config": [ + { + "type": "physical", + "name": "eth0", + "mac_address": "00:11:22:33:44:55", + "params": {"driver": "hv_netsvc"}, + "subnets": [{"type": "dhcp"}], + } + ], } - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_single_ipv4_nic_configuration(self, m_driver): """parse_network_config emits dhcp on single nic with ipv4""" - expected = {'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} + expected = { + "ethernets": { + "eth0": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": False, + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "set-name": "eth0", + } + }, + "version": 2, + } self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_increases_route_metric_for_non_primary_nics(self, m_driver): """parse_network_config increases route-metric for each nic""" - expected = {'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + expected = { + "ethernets": { + "eth0": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": False, + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "set-name": "eth0", + }, + "eth1": { + "set-name": "eth1", + "match": {"macaddress": "22:0d:3a:04:75:98"}, + "dhcp6": False, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 200}, + }, + "eth2": { + "set-name": "eth2", + "match": {"macaddress": "33:0d:3a:04:75:98"}, + "dhcp6": False, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 300}, + }, + }, + "version": 2, + } imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE) + imds_data["network"]["interface"].append(SECONDARY_INTERFACE) third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - imds_data['network']['interface'].append(third_intf) + third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33") + third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0" + third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6" + imds_data["network"]["interface"].append(third_intf) self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver): """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp4': True, - 'dhcp6': False, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + expected = { + "ethernets": { + "eth0": { + "addresses": ["10.0.0.5/24", "2001:dead:beef::2/128"], + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": True, + "dhcp6-overrides": {"route-metric": 100}, + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "set-name": "eth0", + }, + "eth1": { + "set-name": "eth1", + "match": {"macaddress": "22:0d:3a:04:75:98"}, + "dhcp4": True, + "dhcp6": False, + "dhcp4-overrides": {"route-metric": 200}, + }, + "eth2": { + "set-name": "eth2", + "match": {"macaddress": "33:0d:3a:04:75:98"}, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 300}, + "dhcp6": True, + "dhcp6-overrides": {"route-metric": 300}, + }, + }, + "version": 2, + } imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + nic1 = imds_data["network"]["interface"][0] + nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"}) - nic1['ipv6'] = { + nic1["ipv6"] = { "subnet": [{"address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, - {"privateIpAddress": "2001:dead:beef::2"}] + "ipAddress": [ + {"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}, + ], } - imds_data['network']['interface'].append(SECONDARY_INTERFACE) + imds_data["network"]["interface"].append(SECONDARY_INTERFACE) third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - third_intf['ipv6'] = { + third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33") + third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0" + third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6" + third_intf["ipv6"] = { "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}], } - imds_data['network']['interface'].append(third_intf) + imds_data["network"]["interface"].append(third_intf) self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver): """parse_network_config emits primary ipv4 as dhcp others are static""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} + expected = { + "ethernets": { + "eth0": { + "addresses": ["10.0.0.5/24"], + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": True, + "dhcp6-overrides": {"route-metric": 100}, + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "set-name": "eth0", + } + }, + "version": 2, + } imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + nic1 = imds_data["network"]["interface"][0] + nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"}) - nic1['ipv6'] = { + nic1["ipv6"] = { "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}], } self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver): """parse_network_config emits primary ipv6 as dhcp others are static""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} + expected = { + "ethernets": { + "eth0": { + "addresses": ["10.0.0.5/24", "2001:dead:beef::2/10"], + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": True, + "dhcp6-overrides": {"route-metric": 100}, + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "set-name": "eth0", + } + }, + "version": 2, + } imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + nic1 = imds_data["network"]["interface"][0] + nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"}) # Secondary ipv6 addresses currently ignored/unconfigured - nic1['ipv6'] = { + nic1["ipv6"] = { "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, - {"privateIpAddress": "2001:dead:beef::2"}] + "ipAddress": [ + {"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}, + ], } self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value='hv_netvsc') + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", + return_value="hv_netvsc", + ) def test_match_driver_for_netvsc(self, m_driver): """parse_network_config emits driver when using netvsc.""" - expected = {'ethernets': { - 'eth0': { - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': { - 'macaddress': '00:0d:3a:04:75:98', - 'driver': 'hv_netvsc', - }, - 'set-name': 'eth0' - }}, 'version': 2} + expected = { + "ethernets": { + "eth0": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": False, + "match": { + "macaddress": "00:0d:3a:04:75:98", + "driver": "hv_netvsc", + }, + "set-name": "eth0", + } + }, + "version": 2, + } self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) + @mock.patch("cloudinit.net.generate_fallback_config") def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata( - self, m_fallback_config, m_driver): + self, m_fallback_config, m_driver + ): """parse_network_config generates fallback network config when the IMDS instance metadata is corrupted/invalid, such as when network metadata is not present. """ imds_metadata_missing_network_metadata = copy.deepcopy( - NETWORK_METADATA) - del imds_metadata_missing_network_metadata['network'] + NETWORK_METADATA + ) + del imds_metadata_missing_network_metadata["network"] m_fallback_config.return_value = self.fallback_config self.assertEqual( self.fallback_config, - dsaz.parse_network_config( - imds_metadata_missing_network_metadata)) + dsaz.parse_network_config(imds_metadata_missing_network_metadata), + ) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) + @mock.patch("cloudinit.net.generate_fallback_config") def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata( - self, m_fallback_config, m_driver): + self, m_fallback_config, m_driver + ): """parse_network_config generates fallback network config when the IMDS instance metadata is corrupted/invalid, such as when network interface metadata is not present. """ imds_metadata_missing_interface_metadata = copy.deepcopy( - NETWORK_METADATA) - del imds_metadata_missing_interface_metadata['network']['interface'] + NETWORK_METADATA + ) + del imds_metadata_missing_interface_metadata["network"]["interface"] m_fallback_config.return_value = self.fallback_config self.assertEqual( self.fallback_config, dsaz.parse_network_config( - imds_metadata_missing_interface_metadata)) + imds_metadata_missing_interface_metadata + ), + ) class TestGetMetadataFromIMDS(HttprettyTestCase): @@ -412,175 +464,218 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): dsaz.IMDS_URL ) - @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + @mock.patch(MOCKPATH + "readurl") + @mock.patch(MOCKPATH + "EphemeralDHCPv4", autospec=True) + @mock.patch(MOCKPATH + "net.is_up", autospec=True) def test_get_metadata_does_not_dhcp_if_network_is_up( - self, m_net_is_up, m_dhcp, m_readurl): + self, m_net_is_up, m_dhcp, m_readurl + ): """Do not perform DHCP setup when nic is already up.""" m_net_is_up.return_value = True m_readurl.return_value = url_helper.StringResponse( - json.dumps(NETWORK_METADATA).encode('utf-8')) + json.dumps(NETWORK_METADATA).encode("utf-8") + ) self.assertEqual( - NETWORK_METADATA, - dsaz.get_metadata_from_imds('eth9', retries=3)) + NETWORK_METADATA, dsaz.get_metadata_from_imds("eth9", retries=3) + ) - m_net_is_up.assert_called_with('eth9') + m_net_is_up.assert_called_with("eth9") m_dhcp.assert_not_called() self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') + @mock.patch(MOCKPATH + "readurl", autospec=True) + @mock.patch(MOCKPATH + "EphemeralDHCPv4") + @mock.patch(MOCKPATH + "net.is_up") def test_get_metadata_uses_instance_url( - self, m_net_is_up, m_dhcp, m_readurl): + self, m_net_is_up, m_dhcp, m_readurl + ): """Make sure readurl is called with the correct url when accessing metadata""" m_net_is_up.return_value = True m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + json.dumps(IMDS_NETWORK_METADATA).encode("utf-8") + ) dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.all) + "eth0", retries=3, md_type=dsaz.metadata_type.all + ) m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) + "http://169.254.169.254/metadata/instance?api-version=2019-06-01", + exception_cb=mock.ANY, + headers=mock.ANY, + retries=mock.ANY, + timeout=mock.ANY, + infinite=False, + ) - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') + @mock.patch(MOCKPATH + "readurl", autospec=True) + @mock.patch(MOCKPATH + "EphemeralDHCPv4") + @mock.patch(MOCKPATH + "net.is_up") def test_get_network_metadata_uses_network_url( - self, m_net_is_up, m_dhcp, m_readurl): + self, m_net_is_up, m_dhcp, m_readurl + ): """Make sure readurl is called with the correct url when accessing network metadata""" m_net_is_up.return_value = True m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + json.dumps(IMDS_NETWORK_METADATA).encode("utf-8") + ) dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.network) + "eth0", retries=3, md_type=dsaz.metadata_type.network + ) m_readurl.assert_called_with( "http://169.254.169.254/metadata/instance/network?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) + "2019-06-01", + exception_cb=mock.ANY, + headers=mock.ANY, + retries=mock.ANY, + timeout=mock.ANY, + infinite=False, + ) - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') + @mock.patch(MOCKPATH + "readurl", autospec=True) + @mock.patch(MOCKPATH + "EphemeralDHCPv4") + @mock.patch(MOCKPATH + "net.is_up") def test_get_default_metadata_uses_instance_url( - self, m_net_is_up, m_dhcp, m_readurl): + self, m_net_is_up, m_dhcp, m_readurl + ): """Make sure readurl is called with the correct url when accessing metadata""" m_net_is_up.return_value = True m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + json.dumps(IMDS_NETWORK_METADATA).encode("utf-8") + ) - dsaz.get_metadata_from_imds( - 'eth0', retries=3) + dsaz.get_metadata_from_imds("eth0", retries=3) m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) + "http://169.254.169.254/metadata/instance?api-version=2019-06-01", + exception_cb=mock.ANY, + headers=mock.ANY, + retries=mock.ANY, + timeout=mock.ANY, + infinite=False, + ) - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') + @mock.patch(MOCKPATH + "readurl", autospec=True) + @mock.patch(MOCKPATH + "EphemeralDHCPv4") + @mock.patch(MOCKPATH + "net.is_up") def test_get_metadata_uses_extended_url( - self, m_net_is_up, m_dhcp, m_readurl): + self, m_net_is_up, m_dhcp, m_readurl + ): """Make sure readurl is called with the correct url when accessing metadata""" m_net_is_up.return_value = True m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + json.dumps(IMDS_NETWORK_METADATA).encode("utf-8") + ) dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.all, - api_version="2021-08-01") + "eth0", + retries=3, + md_type=dsaz.metadata_type.all, + api_version="2021-08-01", + ) m_readurl.assert_called_with( "http://169.254.169.254/metadata/instance?api-version=" - "2021-08-01&extended=true", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) + "2021-08-01&extended=true", + exception_cb=mock.ANY, + headers=mock.ANY, + retries=mock.ANY, + timeout=mock.ANY, + infinite=False, + ) - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + @mock.patch(MOCKPATH + "readurl", autospec=True) + @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting", autospec=True) + @mock.patch(MOCKPATH + "net.is_up", autospec=True) def test_get_metadata_performs_dhcp_when_network_is_down( - self, m_net_is_up, m_dhcp, m_readurl): + self, m_net_is_up, m_dhcp, m_readurl + ): """Perform DHCP setup when nic is not up.""" m_net_is_up.return_value = False m_readurl.return_value = url_helper.StringResponse( - json.dumps(NETWORK_METADATA).encode('utf-8')) + json.dumps(NETWORK_METADATA).encode("utf-8") + ) self.assertEqual( - NETWORK_METADATA, - dsaz.get_metadata_from_imds('eth9', retries=2)) + NETWORK_METADATA, dsaz.get_metadata_from_imds("eth9", retries=2) + ) - m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with(mock.ANY, 'eth9') + m_net_is_up.assert_called_with("eth9") + m_dhcp.assert_called_with(mock.ANY, "eth9") self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) + self.logs.getvalue(), + ) m_readurl.assert_called_with( - self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) + self.network_md_url, + exception_cb=mock.ANY, + headers={"Metadata": "true"}, + retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + infinite=False, + ) - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + @mock.patch("cloudinit.url_helper.time.sleep") + @mock.patch(MOCKPATH + "net.is_up", autospec=True) def test_get_metadata_from_imds_empty_when_no_imds_present( - self, m_net_is_up, m_sleep): + self, m_net_is_up, m_sleep + ): """Return empty dict when IMDS network metadata is absent.""" httpretty.register_uri( httpretty.GET, - dsaz.IMDS_URL + '/instance?api-version=2017-12-01', - body={}, status=404) + dsaz.IMDS_URL + "/instance?api-version=2017-12-01", + body={}, + status=404, + ) m_net_is_up.return_value = True # skips dhcp - self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) + self.assertEqual({}, dsaz.get_metadata_from_imds("eth9", retries=2)) - m_net_is_up.assert_called_with('eth9') + m_net_is_up.assert_called_with("eth9") self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) + self.logs.getvalue(), + ) - @mock.patch('requests.Session.request') - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + @mock.patch("requests.Session.request") + @mock.patch("cloudinit.url_helper.time.sleep") + @mock.patch(MOCKPATH + "net.is_up", autospec=True) def test_get_metadata_from_imds_retries_on_timeout( - self, m_net_is_up, m_sleep, m_request): + self, m_net_is_up, m_sleep, m_request + ): """Retry IMDS network metadata on timeout errors.""" self.attempt = 0 - m_request.side_effect = requests.Timeout('Fake Connection Timeout') + m_request.side_effect = requests.Timeout("Fake Connection Timeout") def retry_callback(request, uri, headers): self.attempt += 1 - raise requests.Timeout('Fake connection timeout') + raise requests.Timeout("Fake connection timeout") httpretty.register_uri( httpretty.GET, - dsaz.IMDS_URL + 'instance?api-version=2017-12-01', - body=retry_callback) + dsaz.IMDS_URL + "instance?api-version=2017-12-01", + body=retry_callback, + ) m_net_is_up.return_value = True # skips dhcp - self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) + self.assertEqual({}, dsaz.get_metadata_from_imds("eth9", retries=3)) - m_net_is_up.assert_called_with('eth9') - self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) + m_net_is_up.assert_called_with("eth9") + self.assertEqual([mock.call(1)] * 3, m_sleep.call_args_list) self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) + self.logs.getvalue(), + ) class TestAzureDataSource(CiTestCase): @@ -593,25 +688,35 @@ class TestAzureDataSource(CiTestCase): # patch cloud_dir, so our 'seed_dir' is guaranteed empty self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') + {"cloud_dir": self.tmp, "run_dir": self.tmp} + ) + self.waagent_d = os.path.join(self.tmp, "var", "lib", "waagent") self.patches = ExitStack() self.addCleanup(self.patches.close) - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) + self.patches.enter_context( + mock.patch.object(dsaz, "_get_random_seed", return_value="wild") + ) self.m_get_metadata_from_imds = self.patches.enter_context( mock.patch.object( - dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value=NETWORK_METADATA))) + dsaz, + "get_metadata_from_imds", + mock.MagicMock(return_value=NETWORK_METADATA), + ) + ) self.m_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.sources.net.find_fallback_nic', - return_value='eth9')) + mock.patch( + "cloudinit.sources.net.find_fallback_nic", return_value="eth9" + ) + ) self.m_remove_ubuntu_network_scripts = self.patches.enter_context( mock.patch.object( - dsaz, 'maybe_remove_ubuntu_network_config_scripts', - mock.MagicMock())) + dsaz, + "maybe_remove_ubuntu_network_config_scripts", + mock.MagicMock(), + ) + ) super(TestAzureDataSource, self).setUp() def apply_patches(self, patches): @@ -619,15 +724,21 @@ class TestAzureDataSource(CiTestCase): self.patches.enter_context(mock.patch.object(module, name, new)) def _get_mockds(self): - sysctl_out = "dev.storvsc.3.%pnpinfo: "\ - "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ - "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" - sysctl_out += "dev.storvsc.2.%pnpinfo: "\ - "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ - "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" - sysctl_out += "dev.storvsc.1.%pnpinfo: "\ - "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ - "deviceid=00000000-0001-8899-0000-000000000000\n" + sysctl_out = ( + "dev.storvsc.3.%pnpinfo: " + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f " + "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" + ) + sysctl_out += ( + "dev.storvsc.2.%pnpinfo: " + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f " + "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" + ) + sysctl_out += ( + "dev.storvsc.1.%pnpinfo: " + "classid=32412632-86cb-44a2-9b5c-50d1417354f5 " + "deviceid=00000000-0001-8899-0000-000000000000\n" + ) camctl_devbus = """ scbus0 on ata0 bus 0 scbus1 on ata1 bus 0 @@ -642,45 +753,57 @@ scbus-1 on xpt0 bus 0 <Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1) <Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2) """ - self.apply_patches([ - (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( - return_value=sysctl_out)), - (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( - return_value=camctl_devbus)), - (dsaz, 'get_camcontrol_dev', mock.MagicMock( - return_value=camctl_dev)) - ]) + self.apply_patches( + [ + ( + dsaz, + "get_dev_storvsc_sysctl", + mock.MagicMock(return_value=sysctl_out), + ), + ( + dsaz, + "get_camcontrol_dev_bus", + mock.MagicMock(return_value=camctl_devbus), + ), + ( + dsaz, + "get_camcontrol_dev", + mock.MagicMock(return_value=camctl_dev), + ), + ] + ) return dsaz - def _get_ds(self, data, distro='ubuntu', - apply_network=None, instance_id=None): - + def _get_ds( + self, data, distro="ubuntu", apply_network=None, instance_id=None + ): def _wait_for_files(flist, _maxwait=None, _naplen=None): - data['waited'] = flist + data["waited"] = flist return [] def _load_possible_azure_ds(seed_dir, cache_dir): yield seed_dir yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - yield from data.get('dsdevs', []) + yield from data.get("dsdevs", []) if cache_dir: yield cache_dir seed_dir = os.path.join(self.paths.seed_dir, "azure") - if data.get('ovfcontent') is not None: - populate_dir(seed_dir, - {'ovf-env.xml': data['ovfcontent']}) + if data.get("ovfcontent") is not None: + populate_dir(seed_dir, {"ovf-env.xml": data["ovfcontent"]}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d self.m_is_platform_viable = mock.MagicMock(autospec=True) self.m_get_metadata_from_fabric = mock.MagicMock( - return_value={'public-keys': []}) + return_value={"public-keys": []} + ) self.m_report_failure_to_fabric = mock.MagicMock(autospec=True) self.m_ephemeral_dhcpv4 = mock.MagicMock() self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() self.m_list_possible_azure_ds = mock.MagicMock( - side_effect=_load_possible_azure_ds) + side_effect=_load_possible_azure_ds + ) if instance_id: self.instance_id = instance_id @@ -688,39 +811,59 @@ scbus-1 on xpt0 bus 0 self.instance_id = EXAMPLE_UUID def _dmi_mocks(key): - if key == 'system-uuid': + if key == "system-uuid": return self.instance_id - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - - self.apply_patches([ - (dsaz, 'list_possible_azure_ds', - self.m_list_possible_azure_ds), - (dsaz, '_is_platform_viable', - self.m_is_platform_viable), - (dsaz, 'get_metadata_from_fabric', - self.m_get_metadata_from_fabric), - (dsaz, 'report_failure_to_fabric', - self.m_report_failure_to_fabric), - (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4), - (dsaz, 'EphemeralDHCPv4WithReporting', - self.m_ephemeral_dhcpv4_with_reporting), - (dsaz, 'get_boot_telemetry', mock.MagicMock()), - (dsaz, 'get_system_info', mock.MagicMock()), - (dsaz.subp, 'which', lambda x: True), - (dsaz.dmi, 'read_dmi_data', mock.MagicMock( - side_effect=_dmi_mocks)), - (dsaz.util, 'wait_for_files', mock.MagicMock( - side_effect=_wait_for_files)), - ]) + elif key == "chassis-asset-tag": + return "7783-7084-3265-9085-8269-3286-77" + + self.apply_patches( + [ + ( + dsaz, + "list_possible_azure_ds", + self.m_list_possible_azure_ds, + ), + (dsaz, "_is_platform_viable", self.m_is_platform_viable), + ( + dsaz, + "get_metadata_from_fabric", + self.m_get_metadata_from_fabric, + ), + ( + dsaz, + "report_failure_to_fabric", + self.m_report_failure_to_fabric, + ), + (dsaz, "EphemeralDHCPv4", self.m_ephemeral_dhcpv4), + ( + dsaz, + "EphemeralDHCPv4WithReporting", + self.m_ephemeral_dhcpv4_with_reporting, + ), + (dsaz, "get_boot_telemetry", mock.MagicMock()), + (dsaz, "get_system_info", mock.MagicMock()), + (dsaz.subp, "which", lambda x: True), + ( + dsaz.dmi, + "read_dmi_data", + mock.MagicMock(side_effect=_dmi_mocks), + ), + ( + dsaz.util, + "wait_for_files", + mock.MagicMock(side_effect=_wait_for_files), + ), + ] + ) if isinstance(distro, str): distro_cls = distros.fetch(distro) - distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) + distro = distro_cls(distro, data.get("sys_cfg", {}), self.paths) dsrc = dsaz.DataSourceAzure( - data.get('sys_cfg', {}), distro=distro, paths=self.paths) + data.get("sys_cfg", {}), distro=distro, paths=self.paths + ) if apply_network is not None: - dsrc.ds_cfg['apply_network_config'] = apply_network + dsrc.ds_cfg["apply_network_config"] = apply_network return dsrc @@ -774,19 +917,18 @@ scbus-1 on xpt0 bus 0 data = {} dsrc = self._get_ds(data) self.m_is_platform_viable.return_value = False - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_report_failure') as m_report_failure: + with mock.patch.object( + dsrc, "crawl_metadata" + ) as m_crawl_metadata, mock.patch.object( + dsrc, "_report_failure" + ) as m_report_failure: ret = dsrc.get_data() self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) self.assertFalse(ret) # Assert that for non viable platforms, # there is no communication with the Azure datasource. - self.assertEqual( - 0, - m_crawl_metadata.call_count) - self.assertEqual( - 0, - m_report_failure.call_count) + self.assertEqual(0, m_crawl_metadata.call_count) + self.assertEqual(0, m_report_failure.call_count) def test_platform_viable_but_no_devs_should_return_no_datasource(self): """For platforms where the Azure platform is viable @@ -797,170 +939,190 @@ scbus-1 on xpt0 bus 0 """ data = {} dsrc = self._get_ds(data) - with mock.patch.object(dsrc, '_report_failure') as m_report_failure: + with mock.patch.object(dsrc, "_report_failure") as m_report_failure: self.m_is_platform_viable.return_value = True ret = dsrc.get_data() self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) self.assertFalse(ret) - self.assertEqual( - 1, - m_report_failure.call_count) + self.assertEqual(1, m_report_failure.call_count) def test_crawl_metadata_exception_returns_no_datasource(self): data = {} dsrc = self._get_ds(data) self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata: m_crawl_metadata.side_effect = Exception ret = dsrc.get_data() self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertEqual( - 1, - m_crawl_metadata.call_count) + self.assertEqual(1, m_crawl_metadata.call_count) self.assertFalse(ret) def test_crawl_metadata_exception_should_report_failure_with_msg(self): data = {} dsrc = self._get_ds(data) self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_report_failure') as m_report_failure: + with mock.patch.object( + dsrc, "crawl_metadata" + ) as m_crawl_metadata, mock.patch.object( + dsrc, "_report_failure" + ) as m_report_failure: m_crawl_metadata.side_effect = Exception dsrc.get_data() - self.assertEqual( - 1, - m_crawl_metadata.call_count) + self.assertEqual(1, m_crawl_metadata.call_count) m_report_failure.assert_called_once_with( - description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE + ) def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self): data = {} dsrc = self._get_ds(data) self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata: m_crawl_metadata.side_effect = Exception dsrc.get_data() - self.assertEqual( - 1, - m_crawl_metadata.call_count) + self.assertEqual(1, m_crawl_metadata.call_count) self.assertIn( - "Could not crawl Azure metadata", - self.logs.getvalue()) + "Could not crawl Azure metadata", self.logs.getvalue() + ) def test_basic_seed_dir(self): - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": {}, + } dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(dsrc.userdata_raw, "") - self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) - self.assertTrue(os.path.isfile( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertEqual('azure', dsrc.cloud_name) - self.assertEqual('azure', dsrc.platform_type) + self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"]) + self.assertTrue( + os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml")) + ) + self.assertEqual("azure", dsrc.cloud_name) + self.assertEqual("azure", dsrc.platform_type) self.assertEqual( - 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform) + "seed-dir (%s/seed/azure)" % self.tmp, dsrc.subplatform + ) def test_basic_dev_file(self): """When a device path is used, present that in subplatform.""" - data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']} + data = {"sys_cfg": {}, "dsdevs": ["/dev/cd0"]} dsrc = self._get_ds(data) # DSAzure will attempt to mount /dev/sr0 first, which should # fail with mount error since the list of devices doesn't have # /dev/sr0 - with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb: + with mock.patch(MOCKPATH + "util.mount_cb") as m_mount_cb: m_mount_cb.side_effect = [ MountFailedError("fail"), - ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) + ({"local-hostname": "me"}, "ud", {"cfg": ""}, {}), ] self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.userdata_raw, 'ud') - self.assertEqual(dsrc.metadata['local-hostname'], 'me') - self.assertEqual('azure', dsrc.cloud_name) - self.assertEqual('azure', dsrc.platform_type) - self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform) + self.assertEqual(dsrc.userdata_raw, "ud") + self.assertEqual(dsrc.metadata["local-hostname"], "me") + self.assertEqual("azure", dsrc.cloud_name) + self.assertEqual("azure", dsrc.platform_type) + self.assertEqual("config-disk (/dev/cd0)", dsrc.subplatform) def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): """get_data on non-Ubuntu will not remove ubuntu net scripts.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": {}, + } - dsrc = self._get_ds(data, distro='debian') + dsrc = self._get_ds(data, distro="debian") dsrc.get_data() self.m_remove_ubuntu_network_scripts.assert_not_called() def test_get_data_on_ubuntu_will_remove_network_scripts(self): """get_data will remove ubuntu net scripts on Ubuntu distro.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } - dsrc = self._get_ds(data, distro='ubuntu') + dsrc = self._get_ds(data, distro="ubuntu") dsrc.get_data() self.m_remove_ubuntu_network_scripts.assert_called_once_with() def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): """When apply_network_config false, do not remove scripts on Ubuntu.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } - dsrc = self._get_ds(data, distro='ubuntu') + dsrc = self._get_ds(data, distro="ubuntu") dsrc.get_data() self.m_remove_ubuntu_network_scripts.assert_not_called() def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): """Return all structured metadata and cache no class attributes.""" yaml_cfg = "" - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, - 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + odata = { + "HostName": "myhost", + "UserName": "myuser", + "UserData": {"text": "FOOBAR", "encoding": "plain"}, + "dscfg": {"text": yaml_cfg, "encoding": "plain"}, + } + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": {}, + } dsrc = self._get_ds(data) expected_cfg = { - 'PreprovisionedVMType': None, - 'PreprovisionedVm': False, - 'datasource': {'Azure': {}}, - 'system_info': {'default_user': {'name': 'myuser'}}} + "PreprovisionedVMType": None, + "PreprovisionedVm": False, + "datasource": {"Azure": {}}, + "system_info": {"default_user": {"name": "myuser"}}, + } expected_metadata = { - 'azure_data': { - 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': NETWORK_METADATA, - 'instance-id': EXAMPLE_UUID, - 'local-hostname': 'myhost', - 'random_seed': 'wild'} + "azure_data": { + "configurationsettype": "LinuxProvisioningConfiguration" + }, + "imds": NETWORK_METADATA, + "instance-id": EXAMPLE_UUID, + "local-hostname": "myhost", + "random_seed": "wild", + } crawled_metadata = dsrc.crawl_metadata() self.assertCountEqual( crawled_metadata.keys(), - ['cfg', 'files', 'metadata', 'userdata_raw']) - self.assertEqual(crawled_metadata['cfg'], expected_cfg) + ["cfg", "files", "metadata", "userdata_raw"], + ) + self.assertEqual(crawled_metadata["cfg"], expected_cfg) self.assertEqual( - list(crawled_metadata['files'].keys()), ['ovf-env.xml']) + list(crawled_metadata["files"].keys()), ["ovf-env.xml"] + ) self.assertIn( - b'<HostName>myhost</HostName>', - crawled_metadata['files']['ovf-env.xml']) - self.assertEqual(crawled_metadata['metadata'], expected_metadata) - self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') + b"<HostName>myhost</HostName>", + crawled_metadata["files"]["ovf-env.xml"], + ) + self.assertEqual(crawled_metadata["metadata"], expected_metadata) + self.assertEqual(crawled_metadata["userdata_raw"], "FOOBAR") self.assertEqual(dsrc.userdata_raw, None) self.assertEqual(dsrc.metadata, {}) self.assertEqual(dsrc._metadata_imds, UNSET) - self.assertFalse(os.path.isfile( - os.path.join(self.waagent_d, 'ovf-env.xml'))) + self.assertFalse( + os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml")) + ) def test_crawl_metadata_raises_invalid_metadata_on_error(self): """crawl_metadata raises an exception on invalid ovf-env.xml.""" - data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} + data = {"ovfcontent": "BOGUS", "sys_cfg": {}} dsrc = self._get_ds(data) - error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' - ' syntax error: line 1, column 0') + error_msg = ( + "BrokenAzureDataSource: Invalid ovf-env.xml:" + " syntax error: line 1, column 0" + ) with self.assertRaises(InvalidMetaDataException) as cm: dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) @@ -971,20 +1133,19 @@ scbus-1 on xpt0 bus 0 platform_settings={"PreprovisionedVm": "False"} ) - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } + data = {"ovfcontent": ovfenv, "sys_cfg": {}} dsrc = self._get_ds(data) dsrc.crawl_metadata() self.assertEqual(1, self.m_get_metadata_from_imds.call_count) @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file") @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds") def test_crawl_metadata_call_imds_twice_with_reprovision( self, poll_imds_func, m_report_ready, m_write, m_dhcp ): @@ -993,21 +1154,20 @@ scbus-1 on xpt0 bus 0 platform_settings={"PreprovisionedVm": "True"} ) - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } + data = {"ovfcontent": ovfenv, "sys_cfg": {}} dsrc = self._get_ds(data) poll_imds_func.return_value = ovfenv dsrc.crawl_metadata() self.assertEqual(2, self.m_get_metadata_from_imds.call_count) @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file") @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds") def test_crawl_metadata_on_reprovision_reports_ready( self, poll_imds_func, m_report_ready, m_write, m_dhcp ): @@ -1016,37 +1176,36 @@ scbus-1 on xpt0 bus 0 platform_settings={"PreprovisionedVm": "True"} ) - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } + data = {"ovfcontent": ovfenv, "sys_cfg": {}} dsrc = self._get_ds(data) poll_imds_func.return_value = ovfenv dsrc.crawl_metadata() self.assertEqual(1, m_report_ready.call_count) @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file") @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds") @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' - '_wait_for_all_nics_ready') + "cloudinit.sources.DataSourceAzure.DataSourceAzure." + "_wait_for_all_nics_ready" + ) def test_crawl_metadata_waits_for_nic_on_savable_vms( self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp ): """If reprovisioning, report ready at the end""" ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Savable", - "PreprovisionedVm": "True"} + platform_settings={ + "PreprovisionedVMType": "Savable", + "PreprovisionedVm": "True", + } ) - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } + data = {"ovfcontent": ovfenv, "sys_cfg": {}} dsrc = self._get_ds(data) poll_imds_func.return_value = ovfenv dsrc.crawl_metadata() @@ -1054,18 +1213,27 @@ scbus-1 on xpt0 bus 0 self.assertEqual(1, detect_nics.call_count) @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + "cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file") @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds") @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' - '_wait_for_all_nics_ready') - @mock.patch('os.path.isfile') + "cloudinit.sources.DataSourceAzure.DataSourceAzure." + "_wait_for_all_nics_ready" + ) + @mock.patch("os.path.isfile") def test_detect_nics_when_marker_present( - self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write, - m_dhcp): + self, + is_file, + detect_nics, + poll_imds_func, + report_ready_func, + m_write, + m_dhcp, + ): """If reprovisioning, wait for nic attach if marker present""" def is_file_ret(key): @@ -1074,10 +1242,7 @@ scbus-1 on xpt0 bus 0 is_file.side_effect = is_file_ret ovfenv = construct_valid_ovf_env() - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } + data = {"ovfcontent": ovfenv, "sys_cfg": {}} dsrc = self._get_ds(data) poll_imds_func.return_value = ovfenv @@ -1085,29 +1250,28 @@ scbus-1 on xpt0 bus 0 self.assertEqual(1, report_ready_func.call_count) self.assertEqual(1, detect_nics.call_count) - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') + @mock.patch("cloudinit.sources.DataSourceAzure.util.write_file") @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect" + ) + @mock.patch( + "cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready" + ) + @mock.patch("cloudinit.sources.DataSourceAzure.readurl") def test_crawl_metadata_on_reprovision_reports_ready_using_lease( - self, m_readurl, m_report_ready, - m_media_switch, m_write + self, m_readurl, m_report_ready, m_media_switch, m_write ): """If reprovisioning, report ready using the obtained lease""" ovfenv = construct_valid_ovf_env( platform_settings={"PreprovisionedVm": "True"} ) - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } + data = {"ovfcontent": ovfenv, "sys_cfg": {}} dsrc = self._get_ds(data) - with mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: + with mock.patch.object( + dsrc.distro.networking, "is_up" + ) as m_dsrc_distro_networking_is_up: # For this mock, net should not be up, # so that cached ephemeral won't be used. @@ -1116,16 +1280,21 @@ scbus-1 on xpt0 bus 0 m_dsrc_distro_networking_is_up.return_value = False lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.return_value = lease + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } + self.m_ephemeral_dhcpv4_with_reporting.return_value.__enter__.return_value = ( # noqa: E501 + lease + ) m_media_switch.return_value = None reprovision_ovfenv = construct_valid_ovf_env() m_readurl.return_value = url_helper.StringResponse( - reprovision_ovfenv.encode('utf-8')) + reprovision_ovfenv.encode("utf-8") + ) dsrc.crawl_metadata() self.assertEqual(2, m_report_ready.call_count) @@ -1133,91 +1302,118 @@ scbus-1 on xpt0 bus 0 def test_waagent_d_has_0700_perms(self): # we expect /var/lib/waagent to be created 0700 - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) ret = dsrc.get_data() self.assertTrue(ret) self.assertTrue(os.path.isdir(self.waagent_d)) self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_network_config_set_from_imds(self, m_driver): """Datasource.network_config returns IMDS network data.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}}, - 'version': 2} + "ethernets": { + "eth0": { + "set-name": "eth0", + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "dhcp6": False, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + } + }, + "version": 2, + } dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_network_config_set_from_imds_route_metric_for_secondary_nic( - self, m_driver): + self, m_driver + ): """Datasource.network_config adds route-metric to secondary nics.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}}}, - 'version': 2} + "ethernets": { + "eth0": { + "set-name": "eth0", + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "dhcp6": False, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + }, + "eth1": { + "set-name": "eth1", + "match": {"macaddress": "22:0d:3a:04:75:98"}, + "dhcp6": False, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 200}, + }, + "eth2": { + "set-name": "eth2", + "match": {"macaddress": "33:0d:3a:04:75:98"}, + "dhcp6": False, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 300}, + }, + }, + "version": 2, + } imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE) + imds_data["network"]["interface"].append(SECONDARY_INTERFACE) third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - imds_data['network']['interface'].append(third_intf) + third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33") + third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0" + third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6" + imds_data["network"]["interface"].append(third_intf) self.m_get_metadata_from_imds.return_value = imds_data dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) def test_network_config_set_from_imds_for_secondary_nic_no_ip( - self, m_driver): + self, m_driver + ): """If an IP address is empty then there should no config for it.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}}, - 'version': 2} + "ethernets": { + "eth0": { + "set-name": "eth0", + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "dhcp6": False, + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + } + }, + "version": 2, + } imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) + imds_data["network"]["interface"].append(SECONDARY_INTERFACE_NO_IP) self.m_get_metadata_from_imds.return_value = imds_data dsrc = self._get_ds(data) dsrc.get_data() @@ -1225,91 +1421,110 @@ scbus-1 on xpt0 bus 0 def test_availability_zone_set_from_imds(self): """Datasource.availability returns IMDS platformFaultDomain.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } dsrc = self._get_ds(data) dsrc.get_data() - self.assertEqual('0', dsrc.availability_zone) + self.assertEqual("0", dsrc.availability_zone) def test_region_set_from_imds(self): """Datasource.region returns IMDS region location.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } dsrc = self._get_ds(data) dsrc.get_data() - self.assertEqual('eastus2', dsrc.region) + self.assertEqual("eastus2", dsrc.region) def test_sys_cfg_set_never_destroy_ntfs(self): - sys_cfg = {'datasource': {'Azure': { - 'never_destroy_ntfs': 'user-supplied-value'}}} - data = {'ovfcontent': construct_valid_ovf_env(data={}), - 'sys_cfg': sys_cfg} + sys_cfg = { + "datasource": { + "Azure": {"never_destroy_ntfs": "user-supplied-value"} + } + } + data = { + "ovfcontent": construct_valid_ovf_env(data={}), + "sys_cfg": sys_cfg, + } dsrc = self._get_ds(data) ret = self._get_and_setup(dsrc) self.assertTrue(ret) - self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), - 'user-supplied-value') + self.assertEqual( + dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), + "user-supplied-value", + ) def test_username_used(self): - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = {"ovfcontent": construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(dsrc.cfg['system_info']['default_user']['name'], - "myuser") + self.assertEqual( + dsrc.cfg["system_info"]["default_user"]["name"], "myuser" + ) def test_password_given(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': "mypass"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + odata = { + "HostName": "myhost", + "UserName": "myuser", + "UserPassword": "mypass", + } + data = {"ovfcontent": construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) - self.assertIn('default_user', dsrc.cfg['system_info']) - defuser = dsrc.cfg['system_info']['default_user'] + self.assertIn("default_user", dsrc.cfg["system_info"]) + defuser = dsrc.cfg["system_info"]["default_user"] # default user should be updated username and should not be locked. - self.assertEqual(defuser['name'], odata['UserName']) - self.assertFalse(defuser['lock_passwd']) + self.assertEqual(defuser["name"], odata["UserName"]) + self.assertFalse(defuser["lock_passwd"]) # passwd is crypt formated string $id$salt$encrypted # encrypting plaintext with salt value of everything up to final '$' # should equal that after the '$' - pos = defuser['passwd'].rfind("$") + 1 - self.assertEqual(defuser['passwd'], - crypt.crypt(odata['UserPassword'], - defuser['passwd'][0:pos])) + pos = defuser["passwd"].rfind("$") + 1 + self.assertEqual( + defuser["passwd"], + crypt.crypt(odata["UserPassword"], defuser["passwd"][0:pos]), + ) # the same hashed value should also be present in cfg['password'] - self.assertEqual(defuser['passwd'], dsrc.cfg['password']) + self.assertEqual(defuser["passwd"], dsrc.cfg["password"]) def test_user_not_locked_if_password_redacted(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': dsaz.DEF_PASSWD_REDACTION} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + odata = { + "HostName": "myhost", + "UserName": "myuser", + "UserPassword": dsaz.DEF_PASSWD_REDACTION, + } + data = {"ovfcontent": construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) - self.assertIn('default_user', dsrc.cfg['system_info']) - defuser = dsrc.cfg['system_info']['default_user'] + self.assertIn("default_user", dsrc.cfg["system_info"]) + defuser = dsrc.cfg["system_info"]["default_user"] # default user should be updated username and should not be locked. - self.assertEqual(defuser['name'], odata['UserName']) - self.assertIn('lock_passwd', defuser) - self.assertFalse(defuser['lock_passwd']) + self.assertEqual(defuser["name"], odata["UserName"]) + self.assertIn("lock_passwd", defuser) + self.assertFalse(defuser["lock_passwd"]) def test_userdata_plain(self): mydata = "FOOBAR" - odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + odata = {"UserData": {"text": mydata, "encoding": "plain"}} + data = {"ovfcontent": construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) ret = dsrc.get_data() @@ -1318,72 +1533,86 @@ scbus-1 on xpt0 bus 0 def test_userdata_found(self): mydata = "FOOBAR" - odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + odata = {"UserData": {"text": b64e(mydata), "encoding": "base64"}} + data = {"ovfcontent": construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) + self.assertEqual(dsrc.userdata_raw, mydata.encode("utf-8")) def test_default_ephemeral_configs_ephemeral_exists(self): # make sure the ephemeral configs are correct if disk present odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": {}, + } orig_exists = dsaz.os.path.exists def changed_exists(path): - return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists( - path) + return ( + True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path) + ) - with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists): dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) cfg = dsrc.get_config_obj() - self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - dsaz.RESOURCE_DISK_PATH) - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) + self.assertEqual( + dsrc.device_name_to_device("ephemeral0"), + dsaz.RESOURCE_DISK_PATH, + ) + assert "disk_setup" in cfg + assert "fs_setup" in cfg + self.assertIsInstance(cfg["disk_setup"], dict) + self.assertIsInstance(cfg["fs_setup"], list) def test_default_ephemeral_configs_ephemeral_does_not_exist(self): # make sure the ephemeral configs are correct if disk not present odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": {}, + } orig_exists = dsaz.os.path.exists def changed_exists(path): - return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists( - path) + return ( + False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(path) + ) - with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + with mock.patch(MOCKPATH + "os.path.exists", new=changed_exists): dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) cfg = dsrc.get_config_obj() - assert 'disk_setup' not in cfg - assert 'fs_setup' not in cfg + assert "disk_setup" not in cfg + assert "fs_setup" not in cfg def test_provide_disk_aliases(self): # Make sure that user can affect disk aliases - dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} - odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64e(yaml.dump(dscfg)), - 'encoding': 'base64'}} - usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, - 'ephemeral0': False}} - userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" + dscfg = {"disk_aliases": {"ephemeral0": "/dev/sdc"}} + odata = { + "HostName": "myhost", + "UserName": "myuser", + "dscfg": {"text": b64e(yaml.dump(dscfg)), "encoding": "base64"}, + } + usercfg = { + "disk_setup": { + "/dev/sdc": {"something": "..."}, + "ephemeral0": False, + } + } + userdata = "#cloud-config" + yaml.dump(usercfg) + "\n" ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata) - data = {'ovfcontent': ovfcontent, 'sys_cfg': {}} + data = {"ovfcontent": ovfcontent, "sys_cfg": {}} dsrc = self._get_ds(data) ret = dsrc.get_data() @@ -1394,92 +1623,95 @@ scbus-1 on xpt0 bus 0 def test_userdata_arrives(self): userdata = "This is my user-data" xml = construct_valid_ovf_env(data={}, userdata=userdata) - data = {'ovfcontent': xml} + data = {"ovfcontent": xml} dsrc = self._get_ds(data) dsrc.get_data() - self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw) + self.assertEqual(userdata.encode("us-ascii"), dsrc.userdata_raw) def test_password_redacted_in_ovf(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': "mypass"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + odata = { + "HostName": "myhost", + "UserName": "myuser", + "UserPassword": "mypass", + } + data = {"ovfcontent": construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) - ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml") # The XML should not be same since the user password is redacted on_disk_ovf = load_file(ovf_env_path) - self.xml_notequals(data['ovfcontent'], on_disk_ovf) + self.xml_notequals(data["ovfcontent"], on_disk_ovf) # Make sure that the redacted password on disk is not used by CI - self.assertNotEqual(dsrc.cfg.get('password'), - dsaz.DEF_PASSWD_REDACTION) + self.assertNotEqual( + dsrc.cfg.get("password"), dsaz.DEF_PASSWD_REDACTION + ) # Make sure that the password was really encrypted et = ET.fromstring(on_disk_ovf) for elem in et.iter(): - if 'UserPassword' in elem.tag: + if "UserPassword" in elem.tag: self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) def test_ovf_env_arrives_in_waagent_dir(self): xml = construct_valid_ovf_env(data={}, userdata="FOODATA") - dsrc = self._get_ds({'ovfcontent': xml}) + dsrc = self._get_ds({"ovfcontent": xml}) dsrc.get_data() # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir) # we expect that the ovf-env.xml file is copied there. - ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + ovf_env_path = os.path.join(self.waagent_d, "ovf-env.xml") self.assertTrue(os.path.exists(ovf_env_path)) self.xml_equals(xml, load_file(ovf_env_path)) def test_ovf_can_include_unicode(self): xml = construct_valid_ovf_env(data={}) - xml = '\ufeff{0}'.format(xml) - dsrc = self._get_ds({'ovfcontent': xml}) + xml = "\ufeff{0}".format(xml) + dsrc = self._get_ds({"ovfcontent": xml}) dsrc.get_data() - def test_dsaz_report_ready_returns_true_when_report_succeeds( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + def test_dsaz_report_ready_returns_true_when_report_succeeds(self): + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) self.assertTrue(dsrc._report_ready(lease=mock.MagicMock())) - def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc(self): + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) self.m_get_metadata_from_fabric.side_effect = Exception self.assertFalse(dsrc._report_ready(lease=mock.MagicMock())) def test_dsaz_report_failure_returns_true_when_report_succeeds(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata: # mock crawl metadata failure to cause report failure m_crawl_metadata.side_effect = Exception self.assertTrue(dsrc._report_failure()) - self.assertEqual( - 1, - self.m_report_failure_to_fabric.call_count) + self.assertEqual(1, self.m_report_failure_to_fabric.call_count) def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ - as m_ephemeral_dhcp_ctx, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: + self, + ): + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) + + with mock.patch.object( + dsrc, "crawl_metadata" + ) as m_crawl_metadata, mock.patch.object( + dsrc, "_ephemeral_dhcp_ctx" + ) as m_ephemeral_dhcp_ctx, mock.patch.object( + dsrc.distro.networking, "is_up" + ) as m_dsrc_distro_networking_is_up: # mock crawl metadata failure to cause report failure m_crawl_metadata.side_effect = Exception # setup mocks to allow using cached ephemeral dhcp lease m_dsrc_distro_networking_is_up.return_value = True - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} + test_lease_dhcp_option_245 = "test_lease_dhcp_option_245" + test_lease = {"unknown-245": test_lease_dhcp_option_245} m_ephemeral_dhcp_ctx.lease = test_lease # We expect 3 calls to report_failure_to_fabric, @@ -1490,91 +1722,97 @@ scbus-1 on xpt0 bus 0 # 3. Using fallback lease to report failure to Azure self.m_report_failure_to_fabric.side_effect = Exception self.assertFalse(dsrc._report_failure()) - self.assertEqual( - 3, - self.m_report_failure_to_fabric.call_count) + self.assertEqual(3, self.m_report_failure_to_fabric.call_count) def test_dsaz_report_failure_description_msg(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata: # mock crawl metadata failure to cause report failure m_crawl_metadata.side_effect = Exception - test_msg = 'Test report failure description message' + test_msg = "Test report failure description message" self.assertTrue(dsrc._report_failure(description=test_msg)) self.m_report_failure_to_fabric.assert_called_once_with( - dhcp_opts=mock.ANY, description=test_msg) + dhcp_opts=mock.ANY, description=test_msg + ) def test_dsaz_report_failure_no_description_msg(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata: m_crawl_metadata.side_effect = Exception self.assertTrue(dsrc._report_failure()) # no description msg self.m_report_failure_to_fabric.assert_called_once_with( - dhcp_opts=mock.ANY, description=None) + dhcp_opts=mock.ANY, description=None + ) def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ - as m_ephemeral_dhcp_ctx, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) + + with mock.patch.object( + dsrc, "crawl_metadata" + ) as m_crawl_metadata, mock.patch.object( + dsrc, "_ephemeral_dhcp_ctx" + ) as m_ephemeral_dhcp_ctx, mock.patch.object( + dsrc.distro.networking, "is_up" + ) as m_dsrc_distro_networking_is_up: # mock crawl metadata failure to cause report failure m_crawl_metadata.side_effect = Exception # setup mocks to allow using cached ephemeral dhcp lease m_dsrc_distro_networking_is_up.return_value = True - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} + test_lease_dhcp_option_245 = "test_lease_dhcp_option_245" + test_lease = {"unknown-245": test_lease_dhcp_option_245} m_ephemeral_dhcp_ctx.lease = test_lease self.assertTrue(dsrc._report_failure()) # ensure called with cached ephemeral dhcp lease option 245 self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) + description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245 + ) # ensure cached ephemeral is cleaned - self.assertEqual( - 1, - m_ephemeral_dhcp_ctx.clean_network.call_count) + self.assertEqual(1, m_ephemeral_dhcp_ctx.clean_network.call_count) def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: + with mock.patch.object( + dsrc, "crawl_metadata" + ) as m_crawl_metadata, mock.patch.object( + dsrc.distro.networking, "is_up" + ) as m_dsrc_distro_networking_is_up: # mock crawl metadata failure to cause report failure m_crawl_metadata.side_effect = Exception # net is not up and cannot use cached ephemeral dhcp m_dsrc_distro_networking_is_up.return_value = False # setup ephemeral dhcp lease discovery mock - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.return_value = test_lease + test_lease_dhcp_option_245 = "test_lease_dhcp_option_245" + test_lease = {"unknown-245": test_lease_dhcp_option_245} + self.m_ephemeral_dhcpv4_with_reporting.return_value.__enter__.return_value = ( # noqa: E501 + test_lease + ) self.assertTrue(dsrc._report_failure()) # ensure called with the newly discovered # ephemeral dhcp lease option 245 self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) + description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245 + ) - def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease(self): + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: + with mock.patch.object( + dsrc, "crawl_metadata" + ) as m_crawl_metadata, mock.patch.object( + dsrc.distro.networking, "is_up" + ) as m_dsrc_distro_networking_is_up: # mock crawl metadata failure to cause report failure m_crawl_metadata.side_effect = Exception @@ -1582,29 +1820,31 @@ scbus-1 on xpt0 bus 0 m_dsrc_distro_networking_is_up.return_value = False # ephemeral dhcp discovery failure, # so cannot use a new ephemeral dhcp - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.side_effect = Exception + self.m_ephemeral_dhcpv4_with_reporting.return_value.__enter__.side_effect = ( # noqa: E501 + Exception + ) self.assertTrue(dsrc._report_failure()) # ensure called with fallback lease self.m_report_failure_to_fabric.assert_called_once_with( description=mock.ANY, - fallback_lease_file=dsrc.dhclient_lease_file) + fallback_lease_file=dsrc.dhclient_lease_file, + ) def test_exception_fetching_fabric_data_doesnt_propagate(self): """Errors communicating with fabric should warn, but return True.""" - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) self.m_get_metadata_from_fabric.side_effect = Exception ret = self._get_and_setup(dsrc) self.assertTrue(ret) def test_fabric_data_included_in_metadata(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.m_get_metadata_from_fabric.return_value = {'test': 'value'} + dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) + self.m_get_metadata_from_fabric.return_value = {"test": "value"} ret = self._get_and_setup(dsrc) self.assertTrue(ret) - self.assertEqual('value', dsrc.metadata['test']) + self.assertEqual("value", dsrc.metadata["test"]) def test_instance_id_case_insensitive(self): """Return the previous iid when current is a case-insensitive match.""" @@ -1612,152 +1852,180 @@ scbus-1 on xpt0 bus 0 upper_iid = EXAMPLE_UUID.upper() # lowercase current UUID ds = self._get_ds( - {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid + {"ovfcontent": construct_valid_ovf_env()}, instance_id=lower_iid ) # UPPERCASE previous write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - upper_iid) + os.path.join(self.paths.cloud_dir, "data", "instance-id"), + upper_iid, + ) ds.get_data() - self.assertEqual(upper_iid, ds.metadata['instance-id']) + self.assertEqual(upper_iid, ds.metadata["instance-id"]) # UPPERCASE current UUID ds = self._get_ds( - {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid + {"ovfcontent": construct_valid_ovf_env()}, instance_id=upper_iid ) # lowercase previous write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - lower_iid) + os.path.join(self.paths.cloud_dir, "data", "instance-id"), + lower_iid, + ) ds.get_data() - self.assertEqual(lower_iid, ds.metadata['instance-id']) + self.assertEqual(lower_iid, ds.metadata["instance-id"]) def test_instance_id_endianness(self): """Return the previous iid when dmi uuid is the byteswapped iid.""" - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) # byte-swapped previous write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + os.path.join(self.paths.cloud_dir, "data", "instance-id"), + "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8", + ) ds.get_data() self.assertEqual( - '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8", ds.metadata["instance-id"] + ) # not byte-swapped previous write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + os.path.join(self.paths.cloud_dir, "data", "instance-id"), + "644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8", + ) ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) + self.assertEqual(self.instance_id, ds.metadata["instance-id"]) def test_instance_id_from_dmidecode_used(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) + self.assertEqual(self.instance_id, ds.metadata["instance-id"]) def test_instance_id_from_dmidecode_used_for_builtin(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()}) ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) + self.assertEqual(self.instance_id, ds.metadata["instance-id"]) - @mock.patch(MOCKPATH + 'util.is_FreeBSD') - @mock.patch(MOCKPATH + '_check_freebsd_cdrom') - def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, - m_is_FreeBSD): + @mock.patch(MOCKPATH + "util.is_FreeBSD") + @mock.patch(MOCKPATH + "_check_freebsd_cdrom") + def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, m_is_FreeBSD): """On FreeBSD, possible devs should show /dev/cd0.""" m_is_FreeBSD.return_value = True m_check_fbsd_cdrom.return_value = True possible_ds = [] - for src in dsaz.list_possible_azure_ds( - "seed_dir", "cache_dir"): + for src in dsaz.list_possible_azure_ds("seed_dir", "cache_dir"): possible_ds.append(src) - self.assertEqual(possible_ds, ["seed_dir", - dsaz.DEFAULT_PROVISIONING_ISO_DEV, - "/dev/cd0", - "cache_dir"]) self.assertEqual( - [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) + possible_ds, + [ + "seed_dir", + dsaz.DEFAULT_PROVISIONING_ISO_DEV, + "/dev/cd0", + "cache_dir", + ], + ) + self.assertEqual( + [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list + ) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') + @mock.patch( + "cloudinit.sources.DataSourceAzure.device_driver", return_value=None + ) + @mock.patch("cloudinit.net.generate_fallback_config") def test_imds_network_config(self, mock_fallback, m_driver): """Network config is generated from IMDS network data when present.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) expected_cfg = { - 'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, - 'version': 2} + "ethernets": { + "eth0": { + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": False, + "match": {"macaddress": "00:0d:3a:04:75:98"}, + "set-name": "eth0", + } + }, + "version": 2, + } self.assertEqual(expected_cfg, dsrc.network_config) mock_fallback.assert_not_called() - @mock.patch('cloudinit.net.get_interface_mac') - @mock.patch('cloudinit.net.get_devicelist') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net.generate_fallback_config') + @mock.patch("cloudinit.net.get_interface_mac") + @mock.patch("cloudinit.net.get_devicelist") + @mock.patch("cloudinit.net.device_driver") + @mock.patch("cloudinit.net.generate_fallback_config") def test_imds_network_ignored_when_apply_network_config_false( - self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): + self, mock_fallback, mock_dd, mock_devlist, mock_get_mac + ): """When apply_network_config is False, use fallback instead of IMDS.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, + } fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] + "version": 1, + "config": [ + { + "type": "physical", + "name": "eth0", + "mac_address": "00:11:22:33:44:55", + "params": {"driver": "hv_netsvc"}, + "subnets": [{"type": "dhcp"}], + } + ], } mock_fallback.return_value = fallback_config - mock_devlist.return_value = ['eth0'] - mock_dd.return_value = ['hv_netsvc'] - mock_get_mac.return_value = '00:11:22:33:44:55' + mock_devlist.return_value = ["eth0"] + mock_dd.return_value = ["hv_netsvc"] + mock_get_mac.return_value = "00:11:22:33:44:55" dsrc = self._get_ds(data) self.assertTrue(dsrc.get_data()) self.assertEqual(dsrc.network_config, fallback_config) - @mock.patch('cloudinit.net.get_interface_mac') - @mock.patch('cloudinit.net.get_devicelist') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net.generate_fallback_config', autospec=True) - def test_fallback_network_config(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): + @mock.patch("cloudinit.net.get_interface_mac") + @mock.patch("cloudinit.net.get_devicelist") + @mock.patch("cloudinit.net.device_driver") + @mock.patch("cloudinit.net.generate_fallback_config", autospec=True) + def test_fallback_network_config( + self, mock_fallback, mock_dd, mock_devlist, mock_get_mac + ): """On absent IMDS network data, generate network fallback config.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": {}, + } fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] + "version": 1, + "config": [ + { + "type": "physical", + "name": "eth0", + "mac_address": "00:11:22:33:44:55", + "params": {"driver": "hv_netsvc"}, + "subnets": [{"type": "dhcp"}], + } + ], } mock_fallback.return_value = fallback_config - mock_devlist.return_value = ['eth0'] - mock_dd.return_value = ['hv_netsvc'] - mock_get_mac.return_value = '00:11:22:33:44:55' + mock_devlist.return_value = ["eth0"] + mock_dd.return_value = ["hv_netsvc"] + mock_get_mac.return_value = "00:11:22:33:44:55" dsrc = self._get_ds(data) # Represent empty response from network imds @@ -1768,37 +2036,41 @@ scbus-1 on xpt0 bus 0 netconfig = dsrc.network_config self.assertEqual(netconfig, fallback_config) mock_fallback.assert_called_with( - blacklist_drivers=['mlx4_core', 'mlx5_core'], - config_driver=True) + blacklist_drivers=["mlx4_core", "mlx5_core"], config_driver=True + ) - @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True) - def test_blacklist_through_distro( - self, m_net_get_interfaces): + @mock.patch(MOCKPATH + "net.get_interfaces", autospec=True) + def test_blacklist_through_distro(self, m_net_get_interfaces): """Verify Azure DS updates blacklist drivers in the distro's - networking object.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + networking object.""" + odata = {"HostName": "myhost", "UserName": "myuser"} + data = { + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": {}, + } - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", {}, self.paths) dsrc = self._get_ds(data, distro=distro) dsrc.get_data() - self.assertEqual(distro.networking.blacklist_drivers, - dsaz.BLACKLIST_DRIVERS) + self.assertEqual( + distro.networking.blacklist_drivers, dsaz.BLACKLIST_DRIVERS + ) distro.networking.get_interfaces_by_mac() m_net_get_interfaces.assert_called_with( - blacklist_drivers=dsaz.BLACKLIST_DRIVERS) + blacklist_drivers=dsaz.BLACKLIST_DRIVERS + ) @mock.patch( - 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') + "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates" + ) def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } dsrc = self._get_ds(data) dsrc.get_data() @@ -1808,32 +2080,32 @@ scbus-1 on xpt0 bus 0 self.assertEqual(m_parse_certificates.call_count, 0) def test_key_without_crlf_valid(self): - test_key = 'ssh-rsa somerandomkeystuff some comment' + test_key = "ssh-rsa somerandomkeystuff some comment" assert True is dsaz._key_is_openssh_formatted(test_key) def test_key_with_crlf_invalid(self): - test_key = 'ssh-rsa someran\r\ndomkeystuff some comment' + test_key = "ssh-rsa someran\r\ndomkeystuff some comment" assert False is dsaz._key_is_openssh_formatted(test_key) def test_key_endswith_crlf_valid(self): - test_key = 'ssh-rsa somerandomkeystuff some comment\r\n' + test_key = "ssh-rsa somerandomkeystuff some comment\r\n" assert True is dsaz._key_is_openssh_formatted(test_key) @mock.patch( - 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + "cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates" + ) + @mock.patch(MOCKPATH + "get_metadata_from_imds") def test_get_public_ssh_keys_with_no_openssh_format( - self, - m_get_metadata_from_imds, - m_parse_certificates): + self, m_get_metadata_from_imds, m_parse_certificates + ): imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' + imds_data["compute"]["publicKeys"][0]["keyData"] = "no-openssh-format" m_get_metadata_from_imds.return_value = imds_data - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } dsrc = self._get_ds(data) dsrc.get_data() @@ -1842,38 +2114,37 @@ scbus-1 on xpt0 bus 0 self.assertEqual(ssh_keys, []) self.assertEqual(m_parse_certificates.call_count, 0) - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_get_public_ssh_keys_without_imds( - self, - m_get_metadata_from_imds): + @mock.patch(MOCKPATH + "get_metadata_from_imds") + def test_get_public_ssh_keys_without_imds(self, m_get_metadata_from_imds): m_get_metadata_from_imds.return_value = dict() - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } dsrc = self._get_ds(data) - dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']} + dsaz.get_metadata_from_fabric.return_value = {"public-keys": ["key2"]} dsrc.get_data() dsrc.setup(True) ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ['key2']) + self.assertEqual(ssh_keys, ["key2"]) - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + "get_metadata_from_imds") def test_imds_api_version_wanted_nonexistent( - self, - m_get_metadata_from_imds): + self, m_get_metadata_from_imds + ): def get_metadata_from_imds_side_eff(*args, **kwargs): - if kwargs['api_version'] == dsaz.IMDS_VER_WANT: + if kwargs["api_version"] == dsaz.IMDS_VER_WANT: raise url_helper.UrlError("No IMDS version", code=400) return NETWORK_METADATA + m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } dsrc = self._get_ds(data) dsrc.get_data() @@ -1881,86 +2152,86 @@ scbus-1 on xpt0 bus 0 self.assertTrue(dsrc.failed_desired_api_version) @mock.patch( - MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) + MOCKPATH + "get_metadata_from_imds", return_value=NETWORK_METADATA + ) def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } dsrc = self._get_ds(data) dsrc.get_data() self.assertIsNotNone(dsrc.metadata) self.assertFalse(dsrc.failed_desired_api_version) - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + "get_metadata_from_imds") def test_hostname_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) imds_data_with_os_profile["compute"]["osProfile"] = dict( adminUsername="username1", computerName="hostname1", - disablePasswordAuthentication="true" + disablePasswordAuthentication="true", ) m_get_metadata_from_imds.return_value = imds_data_with_os_profile dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + "get_metadata_from_imds") def test_username_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) imds_data_with_os_profile["compute"]["osProfile"] = dict( adminUsername="username1", computerName="hostname1", - disablePasswordAuthentication="true" + disablePasswordAuthentication="true", ) m_get_metadata_from_imds.return_value = imds_data_with_os_profile dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual( - dsrc.cfg["system_info"]["default_user"]["name"], - "username1" + dsrc.cfg["system_info"]["default_user"]["name"], "username1" ) - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + "get_metadata_from_imds") def test_disable_password_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) imds_data_with_os_profile["compute"]["osProfile"] = dict( adminUsername="username1", computerName="hostname1", - disablePasswordAuthentication="true" + disablePasswordAuthentication="true", ) m_get_metadata_from_imds.return_value = imds_data_with_os_profile dsrc = self._get_ds(data) dsrc.get_data() self.assertTrue(dsrc.metadata["disable_password"]) - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + "get_metadata_from_imds") def test_userdata_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} + odata = {"HostName": "myhost", "UserName": "myuser"} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } userdata = "userdataImds" imds_data = copy.deepcopy(NETWORK_METADATA) @@ -1974,20 +2245,22 @@ scbus-1 on xpt0 bus 0 dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) + self.assertEqual(dsrc.userdata_raw, userdata.encode("utf-8")) - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + "get_metadata_from_imds") def test_userdata_from_imds_with_customdata_from_OVF( - self, m_get_metadata_from_imds): + self, m_get_metadata_from_imds + ): userdataOVF = "userdataOVF" odata = { - 'HostName': "myhost", 'UserName': "myuser", - 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} + "HostName": "myhost", + "UserName": "myuser", + "UserData": {"text": b64e(userdataOVF), "encoding": "base64"}, } - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}} data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg + "ovfcontent": construct_valid_ovf_env(data=odata), + "sys_cfg": sys_cfg, } userdataImds = "userdataImds" @@ -2002,7 +2275,7 @@ scbus-1 on xpt0 bus 0 dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) + self.assertEqual(dsrc.userdata_raw, userdataOVF.encode("utf-8")) class TestLoadAzureDsDir(CiTestCase): @@ -2017,39 +2290,40 @@ class TestLoadAzureDsDir(CiTestCase): with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: dsaz.load_azure_ds_dir(self.source_dir) self.assertEqual( - 'No ovf-env file found', - str(context_manager.exception)) + "No ovf-env file found", str(context_manager.exception) + ) def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" - ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') - with open(ovf_path, 'wb') as stream: - stream.write(b'invalid xml') + ovf_path = os.path.join(self.source_dir, "ovf-env.xml") + with open(ovf_path, "wb") as stream: + stream.write(b"invalid xml") with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: dsaz.load_azure_ds_dir(self.source_dir) self.assertEqual( - 'Invalid ovf-env.xml: syntax error: line 1, column 0', - str(context_manager.exception)) + "Invalid ovf-env.xml: syntax error: line 1, column 0", + str(context_manager.exception), + ) class TestReadAzureOvf(CiTestCase): - def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "<foo>" + construct_valid_ovf_env(data={}) - self.assertRaises(dsaz.BrokenAzureDataSource, - dsaz.read_azure_ovf, invalid_xml) + self.assertRaises( + dsaz.BrokenAzureDataSource, dsaz.read_azure_ovf, invalid_xml + ) def test_load_with_pubkeys(self): - mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] - pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] + mypklist = [{"fingerprint": "fp1", "path": "path1", "value": ""}] + pubkeys = [(x["fingerprint"], x["path"], x["value"]) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) (_md, _ud, cfg) = dsaz.read_azure_ovf(content) for mypk in mypklist: - self.assertIn(mypk, cfg['_pubkeys']) + self.assertIn(mypk, cfg["_pubkeys"]) class TestCanDevBeReformatted(CiTestCase): - warning_file = 'dataloss_warning_readme.txt' + warning_file = "dataloss_warning_readme.txt" def _domock(self, mockpath, sattr=None): patcher = mock.patch(mockpath) @@ -2060,42 +2334,42 @@ class TestCanDevBeReformatted(CiTestCase): bypath = {} for path, data in devs.items(): bypath[path] = data - if 'realpath' in data: - bypath[data['realpath']] = data - for ppath, pdata in data.get('partitions', {}).items(): + if "realpath" in data: + bypath[data["realpath"]] = data + for ppath, pdata in data.get("partitions", {}).items(): bypath[ppath] = pdata - if 'realpath' in data: - bypath[pdata['realpath']] = pdata + if "realpath" in data: + bypath[pdata["realpath"]] = pdata def realpath(d): - return bypath[d].get('realpath', d) + return bypath[d].get("realpath", d) def partitions_on_device(devpath): - parts = bypath.get(devpath, {}).get('partitions', {}) + parts = bypath.get(devpath, {}).get("partitions", {}) ret = [] for path, data in parts.items(): - ret.append((data.get('num'), realpath(path))) + ret.append((data.get("num"), realpath(path))) # return sorted by partition number return sorted(ret, key=lambda d: d[0]) def mount_cb(device, callback, mtype, update_env_for_mount): - self.assertEqual('ntfs', mtype) - self.assertEqual('C', update_env_for_mount.get('LANG')) + self.assertEqual("ntfs", mtype) + self.assertEqual("C", update_env_for_mount.get("LANG")) p = self.tmp_dir() - for f in bypath.get(device).get('files', []): + for f in bypath.get(device).get("files", []): write_file(os.path.join(p, f), content=f) return callback(p) def has_ntfs_fs(device): - return bypath.get(device, {}).get('fs') == 'ntfs' + return bypath.get(device, {}).get("fs") == "ntfs" p = MOCKPATH - self._domock(p + "_partitions_on_device", 'm_partitions_on_device') - self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem') - self._domock(p + "util.mount_cb", 'm_mount_cb') - self._domock(p + "os.path.realpath", 'm_realpath') - self._domock(p + "os.path.exists", 'm_exists') - self._domock(p + "util.SeLinuxGuard", 'm_selguard') + self._domock(p + "_partitions_on_device", "m_partitions_on_device") + self._domock(p + "_has_ntfs_filesystem", "m_has_ntfs_filesystem") + self._domock(p + "util.mount_cb", "m_mount_cb") + self._domock(p + "os.path.realpath", "m_realpath") + self._domock(p + "os.path.exists", "m_exists") + self._domock(p + "util.SeLinuxGuard", "m_selguard") self.m_exists.side_effect = lambda p: p in bypath self.m_realpath.side_effect = realpath @@ -2107,330 +2381,433 @@ class TestCanDevBeReformatted(CiTestCase): def test_three_partitions_is_false(self): """A disk with 3 partitions can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2}, - '/dev/sda3': {'num': 3}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": {"num": 1}, + "/dev/sda2": {"num": 2}, + "/dev/sda3": {"num": 3}, + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertFalse(value) self.assertIn("3 or more", msg.lower()) def test_no_partitions_is_false(self): """A disk with no partitions can not be formatted.""" - self.patchup({'/dev/sda': {}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup({"/dev/sda": {}}) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertFalse(value) self.assertIn("not partitioned", msg.lower()) def test_two_partitions_not_ntfs_false(self): """2 partitions and 2nd not ntfs can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": {"num": 1}, + "/dev/sda2": {"num": 2, "fs": "ext4", "files": []}, + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_two_partitions_ntfs_populated_false(self): """2 partitions and populated ntfs fs on 2nd can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ntfs', - 'files': ['secret.txt']}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": {"num": 1}, + "/dev/sda2": { + "num": 2, + "fs": "ntfs", + "files": ["secret.txt"], + }, + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_two_partitions_ntfs_empty_is_true(self): """2 partitions and empty ntfs fs on 2nd can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": {"num": 1}, + "/dev/sda2": {"num": 2, "fs": "ntfs", "files": []}, + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_not_ntfs_false(self): """1 partition witih fs other than ntfs can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'zfs'}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": {"num": 1, "fs": "zfs"}, + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) def test_one_partition_ntfs_populated_false(self): """1 mountable ntfs partition with many files can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['file1.txt', 'file2.exe']}, - }}}) - with mock.patch.object(dsaz.LOG, 'warning') as warning: - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": { + "num": 1, + "fs": "ntfs", + "files": ["file1.txt", "file2.exe"], + }, + } + } + } + ) + with mock.patch.object(dsaz.LOG, "warning") as warning: + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) wmsg = warning.call_args[0][0] - self.assertIn("looks like you're using NTFS on the ephemeral disk", - wmsg) + self.assertIn( + "looks like you're using NTFS on the ephemeral disk", wmsg + ) self.assertFalse(value) self.assertIn("files on it", msg.lower()) def test_one_partition_ntfs_empty_is_true(self): """1 mountable ntfs partition and no files can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []} + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): """1 mountable ntfs partition and only warn file can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['dataloss_warning_readme.txt']} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": { + "num": 1, + "fs": "ntfs", + "files": ["dataloss_warning_readme.txt"], + } + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_one_partition_through_realpath_is_true(self): """A symlink to a device with 1 ntfs partition can be formatted.""" - epath = '/dev/disk/cloud/azure_resource' - self.patchup({ - epath: { - 'realpath': '/dev/sdb', - 'partitions': { - epath + '-part1': { - 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], - 'realpath': '/dev/sdb1'} - }}}) - value, msg = dsaz.can_dev_be_reformatted(epath, - preserve_ntfs=False) + epath = "/dev/disk/cloud/azure_resource" + self.patchup( + { + epath: { + "realpath": "/dev/sdb", + "partitions": { + epath + + "-part1": { + "num": 1, + "fs": "ntfs", + "files": [self.warning_file], + "realpath": "/dev/sdb1", + } + }, + } + } + ) + value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) def test_three_partition_through_realpath_is_false(self): """A symlink to a device with 3 partitions can not be formatted.""" - epath = '/dev/disk/cloud/azure_resource' - self.patchup({ - epath: { - 'realpath': '/dev/sdb', - 'partitions': { - epath + '-part1': { - 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], - 'realpath': '/dev/sdb1'}, - epath + '-part2': {'num': 2, 'fs': 'ext3', - 'realpath': '/dev/sdb2'}, - epath + '-part3': {'num': 3, 'fs': 'ext', - 'realpath': '/dev/sdb3'} - }}}) - value, msg = dsaz.can_dev_be_reformatted(epath, - preserve_ntfs=False) + epath = "/dev/disk/cloud/azure_resource" + self.patchup( + { + epath: { + "realpath": "/dev/sdb", + "partitions": { + epath + + "-part1": { + "num": 1, + "fs": "ntfs", + "files": [self.warning_file], + "realpath": "/dev/sdb1", + }, + epath + + "-part2": { + "num": 2, + "fs": "ext3", + "realpath": "/dev/sdb2", + }, + epath + + "-part3": { + "num": 3, + "fs": "ext", + "realpath": "/dev/sdb3", + }, + }, + } + } + ) + value, msg = dsaz.can_dev_be_reformatted(epath, preserve_ntfs=False) self.assertFalse(value) self.assertIn("3 or more", msg.lower()) def test_ntfs_mount_errors_true(self): """can_dev_be_reformatted does not fail if NTFS is unknown fstype.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} - }}}) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": {"num": 1, "fs": "ntfs", "files": []} + } + } + } + ) error_msgs = [ "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL - "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES + "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'", # SLES ] for err_msg in error_msgs: self.m_mount_cb.side_effect = MountFailedError( - "Failed mounting %s to %s due to: \nUnexpected.\n%s" % - ('/dev/sda', '/fake-tmp/dir', err_msg)) + "Failed mounting %s to %s due to: \nUnexpected.\n%s" + % ("/dev/sda", "/fake-tmp/dir", err_msg) + ) - value, msg = dsaz.can_dev_be_reformatted('/dev/sda', - preserve_ntfs=False) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=False + ) self.assertTrue(value) - self.assertIn('cannot mount NTFS, assuming', msg) + self.assertIn("cannot mount NTFS, assuming", msg) def test_never_destroy_ntfs_config_false(self): """Normally formattable situation with never_destroy_ntfs set.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['dataloss_warning_readme.txt']} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=True) + self.patchup( + { + "/dev/sda": { + "partitions": { + "/dev/sda1": { + "num": 1, + "fs": "ntfs", + "files": ["dataloss_warning_readme.txt"], + } + } + } + } + ) + value, msg = dsaz.can_dev_be_reformatted( + "/dev/sda", preserve_ntfs=True + ) self.assertFalse(value) - self.assertIn("config says to never destroy NTFS " - "(datasource.Azure.never_destroy_ntfs)", msg) + self.assertIn( + "config says to never destroy NTFS " + "(datasource.Azure.never_destroy_ntfs)", + msg, + ) class TestClearCachedData(CiTestCase): - def test_clear_cached_attrs_clears_imds(self): """All class attributes are reset to defaults, including imds data.""" tmp = self.tmp_dir() - paths = helpers.Paths( - {'cloud_dir': tmp, 'run_dir': tmp}) + paths = helpers.Paths({"cloud_dir": tmp, "run_dir": tmp}) dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths) clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] - dsrc.metadata = 'md' - dsrc.userdata = 'ud' - dsrc._metadata_imds = 'imds' + dsrc.metadata = "md" + dsrc.userdata = "ud" + dsrc._metadata_imds = "imds" dsrc._dirty_cache = True dsrc.clear_cached_attrs() self.assertEqual( - [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], - clean_values) + [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], clean_values + ) class TestAzureNetExists(CiTestCase): - def test_azure_net_must_exist_for_legacy_objpkl(self): """DataSourceAzureNet must exist for old obj.pkl files - that reference it.""" + that reference it.""" self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) class TestPreprovisioningReadAzureOvfFlag(CiTestCase): - def test_read_azure_ovf_with_true_flag(self): """The read_azure_ovf method should set the PreprovisionedVM - cfg flag if the proper setting is present.""" + cfg flag if the proper setting is present.""" content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"}) + platform_settings={"PreprovisionedVm": "True"} + ) ret = dsaz.read_azure_ovf(content) cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) + self.assertTrue(cfg["PreprovisionedVm"]) def test_read_azure_ovf_with_false_flag(self): """The read_azure_ovf method should set the PreprovisionedVM - cfg flag to false if the proper setting is false.""" + cfg flag to false if the proper setting is false.""" content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "False"}) + platform_settings={"PreprovisionedVm": "False"} + ) ret = dsaz.read_azure_ovf(content) cfg = ret[2] - self.assertFalse(cfg['PreprovisionedVm']) + self.assertFalse(cfg["PreprovisionedVm"]) def test_read_azure_ovf_without_flag(self): """The read_azure_ovf method should not set the - PreprovisionedVM cfg flag.""" + PreprovisionedVM cfg flag.""" content = construct_valid_ovf_env() ret = dsaz.read_azure_ovf(content) cfg = ret[2] - self.assertFalse(cfg['PreprovisionedVm']) + self.assertFalse(cfg["PreprovisionedVm"]) self.assertEqual(None, cfg["PreprovisionedVMType"]) def test_read_azure_ovf_with_running_type(self): """The read_azure_ovf method should set PreprovisionedVMType - cfg flag to Running.""" + cfg flag to Running.""" content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Running", - "PreprovisionedVm": "True"}) + platform_settings={ + "PreprovisionedVMType": "Running", + "PreprovisionedVm": "True", + } + ) ret = dsaz.read_azure_ovf(content) cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - self.assertEqual("Running", cfg['PreprovisionedVMType']) + self.assertTrue(cfg["PreprovisionedVm"]) + self.assertEqual("Running", cfg["PreprovisionedVMType"]) def test_read_azure_ovf_with_savable_type(self): """The read_azure_ovf method should set PreprovisionedVMType - cfg flag to Savable.""" + cfg flag to Savable.""" content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Savable", - "PreprovisionedVm": "True"}) + platform_settings={ + "PreprovisionedVMType": "Savable", + "PreprovisionedVm": "True", + } + ) ret = dsaz.read_azure_ovf(content) cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - self.assertEqual("Savable", cfg['PreprovisionedVMType']) + self.assertTrue(cfg["PreprovisionedVm"]) + self.assertEqual("Savable", cfg["PreprovisionedVMType"]) -@mock.patch('os.path.isfile') +@mock.patch("os.path.isfile") class TestPreprovisioningShouldReprovision(CiTestCase): - def setUp(self): super(TestPreprovisioningShouldReprovision, self).setUp() tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + self.waagent_d = self.tmp_path("/var/lib/waagent", tmp) + self.paths = helpers.Paths({"cloud_dir": tmp}) + dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d - @mock.patch(MOCKPATH + 'util.write_file') + @mock.patch(MOCKPATH + "util.write_file") def test__should_reprovision_with_true_cfg(self, isfile, write_f): """The _should_reprovision method should return true with config - flag present.""" + flag present.""" isfile.return_value = False dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'PreprovisionedVm': True}, None))) + self.assertTrue( + dsa._should_reprovision( + (None, None, {"PreprovisionedVm": True}, None) + ) + ) def test__should_reprovision_with_file_existing(self, isfile): """The _should_reprovision method should return True if the sentinal - exists.""" + exists.""" isfile.return_value = True dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'preprovisionedvm': False}, None))) + self.assertTrue( + dsa._should_reprovision( + (None, None, {"preprovisionedvm": False}, None) + ) + ) def test__should_reprovision_returns_false(self, isfile): """The _should_reprovision method should return False - if config and sentinal are not present.""" + if config and sentinal are not present.""" isfile.return_value = False dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) self.assertFalse(dsa._should_reprovision((None, None, {}, None))) - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + @mock.patch(MOCKPATH + "util.write_file", autospec=True) def test__should_reprovision_uses_imds_md(self, write_file, isfile): """The _should_reprovision method should be able to retrieve the preprovisioning VM type from imds metadata""" isfile.return_value = False dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {}, None), - {'extended': {'compute': {'ppsType': 'Running'}}})) - self.assertFalse(dsa._should_reprovision( - (None, None, {}, None), - {})) - self.assertFalse(dsa._should_reprovision( - (None, None, {}, None), - {'extended': {'compute': {"hasCustomData": False}}})) - - @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds') + self.assertTrue( + dsa._should_reprovision( + (None, None, {}, None), + {"extended": {"compute": {"ppsType": "Running"}}}, + ) + ) + self.assertFalse(dsa._should_reprovision((None, None, {}, None), {})) + self.assertFalse( + dsa._should_reprovision( + (None, None, {}, None), + {"extended": {"compute": {"hasCustomData": False}}}, + ) + ) + + @mock.patch(MOCKPATH + "DataSourceAzure._poll_imds") def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): """_reprovision will poll IMDS.""" isfile.return_value = False hostname = "myhost" username = "myuser" - odata = {'HostName': hostname, 'UserName': username} + odata = {"HostName": hostname, "UserName": username} _poll_imds.return_value = construct_valid_ovf_env(data=odata) dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) dsa._reprovision() @@ -2438,18 +2815,19 @@ class TestPreprovisioningShouldReprovision(CiTestCase): class TestPreprovisioningHotAttachNics(CiTestCase): - def setUp(self): super(TestPreprovisioningHotAttachNics, self).setUp() self.tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event', - autospec=True) - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp) + self.paths = helpers.Paths({"cloud_dir": self.tmp}) + dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d + self.paths = helpers.Paths({"cloud_dir": self.tmp}) + + @mock.patch( + "cloudinit.sources.helpers.netlink.wait_for_nic_detach_event", + autospec=True, + ) + @mock.patch(MOCKPATH + "util.write_file", autospec=True) def test_nic_detach_writes_marker(self, m_writefile, m_detach): """When we detect that a nic gets detached, we write a marker for it""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) @@ -2458,16 +2836,17 @@ class TestPreprovisioningHotAttachNics(CiTestCase): m_detach.assert_called_with(nl_sock) self.assertEqual(1, m_detach.call_count) m_writefile.assert_called_with( - dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY) + dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY + ) - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + @mock.patch(MOCKPATH + "util.write_file", autospec=True) + @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface") + @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting") + @mock.patch(MOCKPATH + "DataSourceAzure._report_ready") + @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach") def test_detect_nic_attach_reports_ready_and_waits_for_detach( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, - m_writefile): + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_writefile + ): """Report ready first and then wait for nic detach""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) dsa._wait_for_all_nics_ready() @@ -2476,16 +2855,18 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertEqual(1, m_detach.call_count) self.assertEqual(1, m_writefile.call_count) self.assertEqual(1, m_dhcp.call_count) - m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE, - mock.ANY) - - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + m_writefile.assert_called_with( + dsaz.REPORTED_READY_MARKER_FILE, mock.ANY + ) + + @mock.patch("os.path.isfile") + @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface") + @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting") + @mock.patch(MOCKPATH + "DataSourceAzure._report_ready") + @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach") def test_detect_nic_attach_skips_report_ready_when_marker_present( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile + ): """Skip reporting ready if we already have a marker file.""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) @@ -2499,13 +2880,14 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertEqual(0, m_dhcp.call_count) self.assertEqual(1, m_detach.call_count) - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + @mock.patch("os.path.isfile") + @mock.patch(MOCKPATH + "DataSourceAzure.fallback_interface") + @mock.patch(MOCKPATH + "EphemeralDHCPv4WithReporting") + @mock.patch(MOCKPATH + "DataSourceAzure._report_ready") + @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach") def test_detect_nic_attach_skips_nic_detach_when_marker_present( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile + ): """Skip wait for nic detach if it already happened.""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) @@ -2516,22 +2898,32 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertEqual(0, m_dhcp.call_count) self.assertEqual(0, m_detach.call_count) - @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True) - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') - @mock.patch('cloudinit.sources.net.find_fallback_nic') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up", autospec=True) + @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event") + @mock.patch("cloudinit.sources.net.find_fallback_nic") + @mock.patch(MOCKPATH + "get_metadata_from_imds") + @mock.patch(MOCKPATH + "EphemeralDHCPv4") + @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach") + @mock.patch("os.path.isfile") def test_wait_for_nic_attach_if_no_fallback_interface( - self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, - m_attach, m_link_up): + self, + m_isfile, + m_detach, + m_dhcpv4, + m_imds, + m_fallback_if, + m_attach, + m_link_up, + ): """Wait for nic attach if we do not have a fallback interface""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } m_isfile.return_value = True m_attach.return_value = "eth0" @@ -2550,22 +2942,32 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertEqual(1, m_link_up.call_count) m_link_up.assert_called_with(mock.ANY, "eth0") - @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') - @mock.patch('cloudinit.sources.net.find_fallback_nic') - @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + "DataSourceAzure.wait_for_link_up") + @mock.patch("cloudinit.sources.helpers.netlink.wait_for_nic_attach_event") + @mock.patch("cloudinit.sources.net.find_fallback_nic") + @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback") + @mock.patch(MOCKPATH + "EphemeralDHCPv4") + @mock.patch(MOCKPATH + "DataSourceAzure._wait_for_nic_detach") + @mock.patch("os.path.isfile") def test_wait_for_nic_attach_multinic_attach( - self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, - m_attach, m_link_up): + self, + m_isfile, + m_detach, + m_dhcpv4, + m_imds, + m_fallback_if, + m_attach, + m_link_up, + ): """Wait for nic attach if we do not have a fallback interface""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } m_attach_call_count = 0 def nic_attach_ret(nl_sock, nics_found): @@ -2580,15 +2982,15 @@ class TestPreprovisioningHotAttachNics(CiTestCase): # Simulate two NICs by adding the same one twice. md = { "interface": [ - IMDS_NETWORK_METADATA['interface'][0], - IMDS_NETWORK_METADATA['interface'][0] + IMDS_NETWORK_METADATA["interface"][0], + IMDS_NETWORK_METADATA["interface"][0], ] } def network_metadata_ret(ifname, retries, type, exc_cb, infinite): if ifname == "eth0": return md - raise requests.Timeout('Fake connection timeout') + raise requests.Timeout("Fake connection timeout") m_isfile.return_value = True m_attach.side_effect = nic_attach_ret @@ -2607,25 +3009,29 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertEqual(1, m_imds.call_count) self.assertEqual(2, m_link_up.call_count) - @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + "DataSourceAzure.get_imds_data_with_api_fallback") + @mock.patch(MOCKPATH + "EphemeralDHCPv4") def test_check_if_nic_is_primary_retries_on_failures( - self, m_dhcpv4, m_imds): + self, m_dhcpv4, m_imds + ): """Retry polling for network metadata on all failures except timeout and network unreachable errors""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } eth0Retries = [] eth1Retries = [] # Simulate two NICs by adding the same one twice. md = { "interface": [ - IMDS_NETWORK_METADATA['interface'][0], - IMDS_NETWORK_METADATA['interface'][0] + IMDS_NETWORK_METADATA["interface"][0], + IMDS_NETWORK_METADATA["interface"][0], ] } @@ -2645,9 +3051,9 @@ class TestPreprovisioningHotAttachNics(CiTestCase): # We are expected to retry for a certain period for both # timeout errors and network unreachable errors. if _ < 5: - cause = requests.Timeout('Fake connection timeout') + cause = requests.Timeout("Fake connection timeout") else: - cause = requests.ConnectionError('Network Unreachable') + cause = requests.ConnectionError("Network Unreachable") error = url_helper.UrlError(cause=cause) eth1Retries.append(exc_cb("Connection timeout", error)) # Should stop retrying after 10 retries @@ -2679,31 +3085,31 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertTrue(eth1Retries[i]) self.assertFalse(eth1Retries[10]) - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_returns_if_already_up( - self, m_is_link_up): + @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up") + def test_wait_for_link_up_returns_if_already_up(self, m_is_link_up): """Waiting for link to be up should return immediately if the link is - already up.""" + already up.""" - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", {}, self.paths) dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) m_is_link_up.return_value = True dsa.wait_for_link_up("eth0") self.assertEqual(1, m_is_link_up.call_count) - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - @mock.patch(MOCKPATH + 'util.write_file') - @mock.patch('cloudinit.net.read_sys_net') - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + @mock.patch(MOCKPATH + "net.is_up", autospec=True) + @mock.patch(MOCKPATH + "util.write_file") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up") def test_wait_for_link_up_checks_link_after_sleep( - self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up): + self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up + ): """Waiting for link to be up should return immediately if the link is - already up.""" + already up.""" - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", {}, self.paths) dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) m_try_set_link_up.return_value = False @@ -2718,21 +3124,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase): m_is_up.side_effect = is_up_mock - with mock.patch('cloudinit.sources.DataSourceAzure.sleep'): + with mock.patch("cloudinit.sources.DataSourceAzure.sleep"): dsa.wait_for_link_up("eth0") self.assertEqual(2, m_try_set_link_up.call_count) self.assertEqual(2, m_is_up.call_count) - @mock.patch(MOCKPATH + 'util.write_file') - @mock.patch('cloudinit.net.read_sys_net') - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + @mock.patch(MOCKPATH + "util.write_file") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.distros.networking.LinuxNetworking.try_set_link_up") def test_wait_for_link_up_writes_to_device_file( - self, m_is_link_up, m_read_sys_net, m_writefile): + self, m_is_link_up, m_read_sys_net, m_writefile + ): """Waiting for link to be up should return immediately if the link is - already up.""" + already up.""" - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", {}, self.paths) dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) callcount = 0 @@ -2751,48 +3158,59 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertEqual(1, m_read_sys_net.call_count) self.assertEqual(2, m_writefile.call_count) - @mock.patch('cloudinit.sources.helpers.netlink.' - 'create_bound_netlink_socket') + @mock.patch( + "cloudinit.sources.helpers.netlink.create_bound_netlink_socket" + ) def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket): """Waiting for all nics should raise exception if netlink socket - creation fails.""" + creation fails.""" m_socket.side_effect = netlink.NetlinkCreateSocketError - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", {}, self.paths) dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - self.assertRaises(netlink.NetlinkCreateSocketError, - dsa._wait_for_all_nics_ready) + self.assertRaises( + netlink.NetlinkCreateSocketError, dsa._wait_for_all_nics_ready + ) # dsa._wait_for_all_nics_ready() -@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') -@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') -@mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') -@mock.patch('requests.Session.request') -@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') +@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network") +@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") +@mock.patch( + "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect" +) +@mock.patch("requests.Session.request") +@mock.patch(MOCKPATH + "DataSourceAzure._report_ready") class TestPreprovisioningPollIMDS(CiTestCase): - def setUp(self): super(TestPreprovisioningPollIMDS, self).setUp() self.tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - @mock.patch('time.sleep', mock.MagicMock()) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready, - m_request, m_media_switch, m_dhcp, - m_net): + self.waagent_d = self.tmp_path("/var/lib/waagent", self.tmp) + self.paths = helpers.Paths({"cloud_dir": self.tmp}) + dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d + + @mock.patch("time.sleep", mock.MagicMock()) + @mock.patch(MOCKPATH + "EphemeralDHCPv4") + def test_poll_imds_re_dhcp_on_timeout( + self, + m_dhcpv4, + m_report_ready, + m_request, + m_media_switch, + m_dhcp, + m_net, + ): """The poll_imds will retry DHCP on IMDS timeout.""" - report_file = self.tmp_path('report_marker', self.tmp) + report_file = self.tmp_path("report_marker", self.tmp) lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } m_dhcp.return_value = [lease] m_media_switch.return_value = None dhcp_ctx = mock.MagicMock(lease=lease) @@ -2804,7 +3222,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): def fake_timeout_once(**kwargs): self.tries += 1 if self.tries == 1: - raise requests.Timeout('Fake connection timeout') + raise requests.Timeout("Fake connection timeout") elif self.tries in (2, 3): response = requests.Response() response.status_code = 404 if self.tries == 2 else 410 @@ -2817,41 +3235,54 @@ class TestPreprovisioningPollIMDS(CiTestCase): m_request.side_effect = fake_timeout_once dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file): dsa._poll_imds() self.assertEqual(m_report_ready.call_count, 1) m_report_ready.assert_called_with(lease=lease) - self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls') - self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS') + self.assertEqual(3, m_dhcpv4.call_count, "Expected 3 DHCP calls") + self.assertEqual(4, self.tries, "Expected 4 total reads from IMDS") - @mock.patch('os.path.isfile') + @mock.patch("os.path.isfile") def test_poll_imds_skips_dhcp_if_ctx_present( - self, m_isfile, report_ready_func, fake_resp, m_media_switch, - m_dhcp, m_net): + self, + m_isfile, + report_ready_func, + fake_resp, + m_media_switch, + m_dhcp, + m_net, + ): """The poll_imds function should reuse the dhcp ctx if it is already - present. This happens when we wait for nic to be hot-attached before - polling for reprovisiondata. Note that if this ctx is set when - _poll_imds is called, then it is not expected to be waiting for - media_disconnect_connect either.""" - report_file = self.tmp_path('report_marker', self.tmp) + present. This happens when we wait for nic to be hot-attached before + polling for reprovisiondata. Note that if this ctx is set when + _poll_imds is called, then it is not expected to be waiting for + media_disconnect_connect either.""" + report_file = self.tmp_path("report_marker", self.tmp) m_isfile.return_value = True dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx" - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file): dsa._poll_imds() self.assertEqual(0, m_dhcp.call_count) self.assertEqual(0, m_media_switch.call_count) - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch("os.path.isfile") + @mock.patch(MOCKPATH + "EphemeralDHCPv4") def test_poll_imds_does_dhcp_on_retries_if_ctx_present( - self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request, - m_media_switch, m_dhcp, m_net): + self, + m_ephemeral_dhcpv4, + m_isfile, + report_ready_func, + m_request, + m_media_switch, + m_dhcp, + m_net, + ): """The poll_imds function should reuse the dhcp ctx if it is already - present. This happens when we wait for nic to be hot-attached before - polling for reprovisiondata. Note that if this ctx is set when - _poll_imds is called, then it is not expected to be waiting for - media_disconnect_connect either.""" + present. This happens when we wait for nic to be hot-attached before + polling for reprovisiondata. Note that if this ctx is set when + _poll_imds is called, then it is not expected to be waiting for + media_disconnect_connect either.""" tries = 0 @@ -2859,15 +3290,16 @@ class TestPreprovisioningPollIMDS(CiTestCase): nonlocal tries tries += 1 if tries == 1: - raise requests.Timeout('Fake connection timeout') + raise requests.Timeout("Fake connection timeout") return mock.MagicMock(status_code=200, text="good", content="good") m_request.side_effect = fake_timeout_once - report_file = self.tmp_path('report_marker', self.tmp) + report_file = self.tmp_path("report_marker", self.tmp) m_isfile.return_value = True dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\ - mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx: + with mock.patch( + MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file + ), mock.patch.object(dsa, "_ephemeral_dhcp_ctx") as m_dhcp_ctx: m_dhcp_ctx.obtain_lease.return_value = "Dummy lease" dsa._ephemeral_dhcp_ctx = m_dhcp_ctx dsa._poll_imds() @@ -2877,145 +3309,189 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.assertEqual(2, m_request.call_count) def test_does_not_poll_imds_report_ready_when_marker_file_exists( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net + ): """poll_imds should not call report ready when the reported ready marker file exists""" - report_file = self.tmp_path('report_marker', self.tmp) - write_file(report_file, content='dont run report_ready :)') - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] + report_file = self.tmp_path("report_marker", self.tmp) + write_file(report_file, content="dont run report_ready :)") + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } + ] m_media_switch.return_value = None dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file): dsa._poll_imds() self.assertEqual(m_report_ready.call_count, 0) def test_poll_imds_report_ready_success_writes_marker_file( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net + ): """poll_imds should write the report_ready marker file if reporting ready succeeds""" - report_file = self.tmp_path('report_marker', self.tmp) - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] + report_file = self.tmp_path("report_marker", self.tmp) + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } + ] m_media_switch.return_value = None dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) self.assertFalse(os.path.exists(report_file)) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file): dsa._poll_imds() self.assertEqual(m_report_ready.call_count, 1) self.assertTrue(os.path.exists(report_file)) def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net + ): """poll_imds should write the report_ready marker file if reporting ready succeeds""" - report_file = self.tmp_path('report_marker', self.tmp) - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] + report_file = self.tmp_path("report_marker", self.tmp) + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } + ] m_media_switch.return_value = None m_report_ready.return_value = False dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) self.assertFalse(os.path.exists(report_file)) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - self.assertRaises( - InvalidMetaDataException, - dsa._poll_imds) + with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file): + self.assertRaises(InvalidMetaDataException, dsa._poll_imds) self.assertEqual(m_report_ready.call_count, 1) self.assertFalse(os.path.exists(report_file)) -@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock()) -@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock()) -@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock()) -@mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') -@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True) -@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') -@mock.patch('requests.Session.request') +@mock.patch(MOCKPATH + "DataSourceAzure._report_ready", mock.MagicMock()) +@mock.patch(MOCKPATH + "subp.subp", mock.MagicMock()) +@mock.patch(MOCKPATH + "util.write_file", mock.MagicMock()) +@mock.patch( + "cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect" +) +@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network", autospec=True) +@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") +@mock.patch("requests.Session.request") class TestAzureDataSourcePreprovisioning(CiTestCase): - def setUp(self): super(TestAzureDataSourcePreprovisioning, self).setUp() tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + self.waagent_d = self.tmp_path("/var/lib/waagent", tmp) + self.paths = helpers.Paths({"cloud_dir": tmp}) + dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d - def test_poll_imds_returns_ovf_env(self, m_request, - m_dhcp, m_net, - m_media_switch): + def test_poll_imds_returns_ovf_env( + self, m_request, m_dhcp, m_net, m_media_switch + ): """The _poll_imds method should return the ovf_env.xml.""" m_media_switch.return_value = None - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] - url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + } + ] + url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01" host = "169.254.169.254" full_url = url.format(host) - m_request.return_value = mock.MagicMock(status_code=200, text="ovf", - content="ovf") + m_request.return_value = mock.MagicMock( + status_code=200, text="ovf", content="ovf" + ) dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) self.assertTrue(len(dsa._poll_imds()) > 0) - self.assertEqual(m_request.call_args_list, - [mock.call(allow_redirects=True, - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs() - }, method='GET', - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url)]) + self.assertEqual( + m_request.call_args_list, + [ + mock.call( + allow_redirects=True, + headers={ + "Metadata": "true", + "User-Agent": "Cloud-Init/%s" % vs(), + }, + method="GET", + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url, + ) + ], + ) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) + broadcast="192.168.2.255", + interface="eth9", + ip="192.168.2.9", + prefix_or_mask="255.255.255.0", + router="192.168.2.1", + static_routes=None, + ) self.assertEqual(m_net.call_count, 2) - def test__reprovision_calls__poll_imds(self, m_request, - m_dhcp, m_net, - m_media_switch): + def test__reprovision_calls__poll_imds( + self, m_request, m_dhcp, m_net, m_media_switch + ): """The _reprovision method should call poll IMDS.""" m_media_switch.return_value = None - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "unknown-245": "624c3620", + } + ] + url = "http://{0}/metadata/reprovisiondata?api-version=2019-06-01" host = "169.254.169.254" full_url = url.format(host) hostname = "myhost" username = "myuser" - odata = {'HostName': hostname, 'UserName': username} + odata = {"HostName": hostname, "UserName": username} content = construct_valid_ovf_env(data=odata) - m_request.return_value = mock.MagicMock(status_code=200, text=content, - content=content) + m_request.return_value = mock.MagicMock( + status_code=200, text=content, content=content + ) dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) md, _ud, cfg, _d = dsa._reprovision() - self.assertEqual(md['local-hostname'], hostname) - self.assertEqual(cfg['system_info']['default_user']['name'], username) + self.assertEqual(md["local-hostname"], hostname) + self.assertEqual(cfg["system_info"]["default_user"]["name"], username) self.assertIn( mock.call( allow_redirects=True, headers={ - 'Metadata': 'true', - 'User-Agent': 'Cloud-Init/%s' % vs() + "Metadata": "true", + "User-Agent": "Cloud-Init/%s" % vs(), }, - method='GET', + method="GET", timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url + url=full_url, ), - m_request.call_args_list) + m_request.call_args_list, + ) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) + broadcast="192.168.2.255", + interface="eth9", + ip="192.168.2.9", + prefix_or_mask="255.255.255.0", + router="192.168.2.1", + static_routes=None, + ) self.assertEqual(m_net.call_count, 2) @@ -3029,36 +3505,42 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): def test_remove_network_scripts_removes_both_files_and_directories(self): """Any files or directories in paths are removed when present.""" - file1 = self.tmp_path('file1', dir=self.tmp) - subdir = self.tmp_path('sub1', dir=self.tmp) - subfile = self.tmp_path('leaf1', dir=subdir) - write_file(file1, 'file1content') - write_file(subfile, 'leafcontent') + file1 = self.tmp_path("file1", dir=self.tmp) + subdir = self.tmp_path("sub1", dir=self.tmp) + subfile = self.tmp_path("leaf1", dir=subdir) + write_file(file1, "file1content") + write_file(subfile, "leafcontent") dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) for path in (file1, subdir, subfile): - self.assertFalse(os.path.exists(path), - 'Found unremoved: %s' % path) + self.assertFalse( + os.path.exists(path), "Found unremoved: %s" % path + ) expected_logs = [ - 'INFO: Removing Ubuntu extended network scripts because cloud-init' - ' updates Azure network configuration on the following events:' + "INFO: Removing Ubuntu extended network scripts because cloud-init" + " updates Azure network configuration on the following events:" " ['boot', 'boot-legacy']", - 'Recursively deleting %s' % subdir, - 'Attempting to remove %s' % file1] + "Recursively deleting %s" % subdir, + "Attempting to remove %s" % file1, + ] for log in expected_logs: self.assertIn(log, self.logs.getvalue()) def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): """Any files or directories absent are skipped without error.""" - dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ - self.tmp_path('nodirhere/', dir=self.tmp), - self.tmp_path('notfilehere', dir=self.tmp)]) - self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs - - @mock.patch(MOCKPATH + 'os.path.exists') - def test_remove_network_scripts_default_removes_stock_scripts(self, - m_exists): + dsaz.maybe_remove_ubuntu_network_config_scripts( + paths=[ + self.tmp_path("nodirhere/", dir=self.tmp), + self.tmp_path("notfilehere", dir=self.tmp), + ] + ) + self.assertNotIn("/not/a", self.logs.getvalue()) # No delete logs + + @mock.patch(MOCKPATH + "os.path.exists") + def test_remove_network_scripts_default_removes_stock_scripts( + self, m_exists + ): """Azure's stock ubuntu image scripts and artifacts are removed.""" # Report path absent on all to avoid delete operation m_exists.return_value = False @@ -3070,24 +3552,25 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): class TestWBIsPlatformViable(CiTestCase): """White box tests for _is_platform_viable.""" + with_logs = True - @mock.patch(MOCKPATH + 'dmi.read_dmi_data') + @mock.patch(MOCKPATH + "dmi.read_dmi_data") def test_true_on_non_azure_chassis(self, m_read_dmi_data): """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG - self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) + self.assertTrue(dsaz._is_platform_viable("doesnotmatter")) - @mock.patch(MOCKPATH + 'os.path.exists') - @mock.patch(MOCKPATH + 'dmi.read_dmi_data') + @mock.patch(MOCKPATH + "os.path.exists") + @mock.patch(MOCKPATH + "dmi.read_dmi_data") def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): """Return True if ovf-env.xml exists in known seed dirs.""" # Non-matching Azure chassis-asset-tag - m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + "X" m_exist.return_value = True - self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) - m_exist.called_once_with('/other/seed/dir') + self.assertTrue(dsaz._is_platform_viable("/some/seed/dir")) + m_exist.called_once_with("/other/seed/dir") def test_false_on_no_matching_azure_criteria(self): """Report non-azure on unmatched asset tag, ovf-env absent and no dev. @@ -3096,17 +3579,25 @@ class TestWBIsPlatformViable(CiTestCase): AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs and no devices have a label starting with prefix 'rd_rdfe_'. """ - self.assertFalse(wrap_and_call( - MOCKPATH, - {'os.path.exists': False, - # Non-matching Azure chassis-asset-tag - 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', - 'subp.which': None}, - dsaz._is_platform_viable, 'doesnotmatter')) + self.assertFalse( + wrap_and_call( + MOCKPATH, + { + "os.path.exists": False, + # Non-matching Azure chassis-asset-tag + "dmi.read_dmi_data": dsaz.AZURE_CHASSIS_ASSET_TAG + "X", + "subp.which": None, + }, + dsaz._is_platform_viable, + "doesnotmatter", + ) + ) self.assertIn( "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( - dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), - self.logs.getvalue()) + dsaz.AZURE_CHASSIS_ASSET_TAG + "X" + ), + self.logs.getvalue(), + ) class TestRandomSeed(CiTestCase): @@ -3120,13 +3611,14 @@ class TestRandomSeed(CiTestCase): path = resourceLocation("azure/non_unicode_random_string") result = dsaz._get_random_seed(path) - obj = {'seed': result} + obj = {"seed": result} try: serialized = json_dumps(obj) deserialized = load_json(serialized) except UnicodeDecodeError: self.fail("Non-serializable random seed returned") - self.assertEqual(deserialized['seed'], result) + self.assertEqual(deserialized["seed"], result) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py index 24c582c2..6f7f2890 100644 --- a/tests/unittests/sources/test_azure_helper.py +++ b/tests/unittests/sources/test_azure_helper.py @@ -9,10 +9,9 @@ from xml.etree import ElementTree from xml.sax.saxutils import escape, unescape from cloudinit.sources.helpers import azure as azure_helper -from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir - -from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim +from cloudinit.util import load_file +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir GOAL_STATE_TEMPLATE = """\ <?xml version="1.0" encoding="utf-8"?> @@ -52,7 +51,7 @@ GOAL_STATE_TEMPLATE = """\ </GoalState> """ -HEALTH_REPORT_XML_TEMPLATE = '''\ +HEALTH_REPORT_XML_TEMPLATE = """\ <?xml version="1.0" encoding="utf-8"?> <Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> @@ -70,14 +69,16 @@ HEALTH_REPORT_XML_TEMPLATE = '''\ </RoleInstanceList> </Container> </Health> -''' +""" -HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\ +HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent( + """\ <Details> <SubStatus>{health_substatus}</SubStatus> <Description>{health_description}</Description> </Details> - ''') + """ +) HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512 @@ -87,24 +88,27 @@ class SentinelException(Exception): class TestFindEndpoint(CiTestCase): - def setUp(self): super(TestFindEndpoint, self).setUp() patches = ExitStack() self.addCleanup(patches.close) self.load_file = patches.enter_context( - mock.patch.object(azure_helper.util, 'load_file')) + mock.patch.object(azure_helper.util, "load_file") + ) self.dhcp_options = patches.enter_context( - mock.patch.object(wa_shim, '_load_dhclient_json')) + mock.patch.object(wa_shim, "_load_dhclient_json") + ) self.networkd_leases = patches.enter_context( - mock.patch.object(wa_shim, '_networkd_get_value_from_leases')) + mock.patch.object(wa_shim, "_networkd_get_value_from_leases") + ) self.networkd_leases.return_value = None def test_missing_file(self): - """wa_shim find_endpoint uses default endpoint if leasefile not found + """wa_shim find_endpoint uses default endpoint if + leasefile not found """ self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @@ -112,80 +116,93 @@ class TestFindEndpoint(CiTestCase): """wa_shim find_endpoint uses default endpoint if leasefile is found but does not contain DHCP Option 245 (whose value is the endpoint) """ - self.load_file.return_value = '' - self.dhcp_options.return_value = {'eth0': {'key': 'value'}} + self.load_file.return_value = "" + self.dhcp_options.return_value = {"eth0": {"key": "value"}} self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @staticmethod def _build_lease_content(encoded_address): endpoint = azure_helper._get_dhcp_endpoint_option_name() - return '\n'.join([ - 'lease {', - ' interface "eth0";', - ' option {0} {1};'.format(endpoint, encoded_address), - '}']) + return "\n".join( + [ + "lease {", + ' interface "eth0";', + " option {0} {1};".format(endpoint, encoded_address), + "}", + ] + ) def test_from_dhcp_client(self): self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} - self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) + self.assertEqual("5.4.3.2", wa_shim.find_endpoint(None)) def test_latest_lease_used(self): - encoded_addresses = ['5:4:3:2', '4:3:2:1'] - file_content = '\n'.join([self._build_lease_content(encoded_address) - for encoded_address in encoded_addresses]) + encoded_addresses = ["5:4:3:2", "4:3:2:1"] + file_content = "\n".join( + [ + self._build_lease_content(encoded_address) + for encoded_address in encoded_addresses + ] + ) self.load_file.return_value = file_content - self.assertEqual(encoded_addresses[-1].replace(':', '.'), - wa_shim.find_endpoint("foobar")) + self.assertEqual( + encoded_addresses[-1].replace(":", "."), + wa_shim.find_endpoint("foobar"), + ) class TestExtractIpAddressFromLeaseValue(CiTestCase): - def test_hex_string(self): - ip_address, encoded_address = '98.76.54.32', '62:4c:36:20' + ip_address, encoded_address = "98.76.54.32", "62:4c:36:20" self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + ip_address, wa_shim.get_ip_from_lease_value(encoded_address) + ) def test_hex_string_with_single_character_part(self): - ip_address, encoded_address = '4.3.2.1', '4:3:2:1' + ip_address, encoded_address = "4.3.2.1", "4:3:2:1" self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + ip_address, wa_shim.get_ip_from_lease_value(encoded_address) + ) def test_packed_string(self): - ip_address, encoded_address = '98.76.54.32', 'bL6 ' + ip_address, encoded_address = "98.76.54.32", "bL6 " self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + ip_address, wa_shim.get_ip_from_lease_value(encoded_address) + ) def test_packed_string_with_escaped_quote(self): - ip_address, encoded_address = '100.72.34.108', 'dH\\"l' + ip_address, encoded_address = "100.72.34.108", 'dH\\"l' self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + ip_address, wa_shim.get_ip_from_lease_value(encoded_address) + ) def test_packed_string_containing_a_colon(self): - ip_address, encoded_address = '100.72.58.108', 'dH:l' + ip_address, encoded_address = "100.72.58.108", "dH:l" self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + ip_address, wa_shim.get_ip_from_lease_value(encoded_address) + ) class TestGoalStateParsing(CiTestCase): default_parameters = { - 'incarnation': 1, - 'container_id': 'MyContainerId', - 'instance_id': 'MyInstanceId', - 'certificates_url': 'MyCertificatesUrl', + "incarnation": 1, + "container_id": "MyContainerId", + "instance_id": "MyInstanceId", + "certificates_url": "MyCertificatesUrl", } def _get_formatted_goal_state_xml_string(self, **kwargs): parameters = self.default_parameters.copy() parameters.update(kwargs) xml = GOAL_STATE_TEMPLATE.format(**parameters) - if parameters['certificates_url'] is None: + if parameters["certificates_url"] is None: new_xml_lines = [] for line in xml.splitlines(): - if 'Certificates' in line: + if "Certificates" in line: continue new_xml_lines.append(line) - xml = '\n'.join(new_xml_lines) + xml = "\n".join(new_xml_lines) return xml def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs): @@ -195,17 +212,17 @@ class TestGoalStateParsing(CiTestCase): return azure_helper.GoalState(xml, m_azure_endpoint_client) def test_incarnation_parsed_correctly(self): - incarnation = '123' + incarnation = "123" goal_state = self._get_goal_state(incarnation=incarnation) self.assertEqual(incarnation, goal_state.incarnation) def test_container_id_parsed_correctly(self): - container_id = 'TestContainerId' + container_id = "TestContainerId" goal_state = self._get_goal_state(container_id=container_id) self.assertEqual(container_id, goal_state.container_id) def test_instance_id_parsed_correctly(self): - instance_id = 'TestInstanceId' + instance_id = "TestInstanceId" goal_state = self._get_goal_state(instance_id=instance_id) self.assertEqual(instance_id, goal_state.instance_id) @@ -214,67 +231,72 @@ class TestGoalStateParsing(CiTestCase): previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" self.assertTrue( - azure_helper.is_byte_swapped(previous_iid, current_iid)) + azure_helper.is_byte_swapped(previous_iid, current_iid) + ) def test_instance_id_no_byte_swap_same_instance_id(self): previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" self.assertFalse( - azure_helper.is_byte_swapped(previous_iid, current_iid)) + azure_helper.is_byte_swapped(previous_iid, current_iid) + ) def test_instance_id_no_byte_swap_diff_instance_id(self): previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" self.assertFalse( - azure_helper.is_byte_swapped(previous_iid, current_iid)) + azure_helper.is_byte_swapped(previous_iid, current_iid) + ) def test_certificates_xml_parsed_and_fetched_correctly(self): m_azure_endpoint_client = mock.MagicMock() - certificates_url = 'TestCertificatesUrl' + certificates_url = "TestCertificatesUrl" goal_state = self._get_goal_state( m_azure_endpoint_client=m_azure_endpoint_client, - certificates_url=certificates_url) + certificates_url=certificates_url, + ) certificates_xml = goal_state.certificates_xml self.assertEqual(1, m_azure_endpoint_client.get.call_count) self.assertEqual( - certificates_url, - m_azure_endpoint_client.get.call_args[0][0]) + certificates_url, m_azure_endpoint_client.get.call_args[0][0] + ) self.assertTrue( - m_azure_endpoint_client.get.call_args[1].get( - 'secure', False)) + m_azure_endpoint_client.get.call_args[1].get("secure", False) + ) self.assertEqual( - m_azure_endpoint_client.get.return_value.contents, - certificates_xml) + m_azure_endpoint_client.get.return_value.contents, certificates_xml + ) def test_missing_certificates_skips_http_get(self): m_azure_endpoint_client = mock.MagicMock() goal_state = self._get_goal_state( m_azure_endpoint_client=m_azure_endpoint_client, - certificates_url=None) + certificates_url=None, + ) certificates_xml = goal_state.certificates_xml self.assertEqual(0, m_azure_endpoint_client.get.call_count) self.assertIsNone(certificates_xml) def test_invalid_goal_state_xml_raises_parse_error(self): - xml = 'random non-xml data' + xml = "random non-xml data" with self.assertRaises(ElementTree.ParseError): azure_helper.GoalState(xml, mock.MagicMock()) def test_missing_container_id_in_goal_state_xml_raises_exc(self): xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('<ContainerId>.*</ContainerId>', '', xml) + xml = re.sub("<ContainerId>.*</ContainerId>", "", xml) with self.assertRaises(azure_helper.InvalidGoalStateXMLException): azure_helper.GoalState(xml, mock.MagicMock()) def test_missing_instance_id_in_goal_state_xml_raises_exc(self): xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('<InstanceId>.*</InstanceId>', '', xml) + xml = re.sub("<InstanceId>.*</InstanceId>", "", xml) with self.assertRaises(azure_helper.InvalidGoalStateXMLException): azure_helper.GoalState(xml, mock.MagicMock()) def test_missing_incarnation_in_goal_state_xml_raises_exc(self): xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('<Incarnation>.*</Incarnation>', '', xml) + xml = re.sub("<Incarnation>.*</Incarnation>", "", xml) with self.assertRaises(azure_helper.InvalidGoalStateXMLException): azure_helper.GoalState(xml, mock.MagicMock()) @@ -282,8 +304,8 @@ class TestGoalStateParsing(CiTestCase): class TestAzureEndpointHttpClient(CiTestCase): regular_headers = { - 'x-ms-agent-name': 'WALinuxAgent', - 'x-ms-version': '2012-11-30', + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": "2012-11-30", } def setUp(self): @@ -291,43 +313,48 @@ class TestAzureEndpointHttpClient(CiTestCase): patches = ExitStack() self.addCleanup(patches.close) self.m_http_with_retries = patches.enter_context( - mock.patch.object(azure_helper, 'http_with_retries')) + mock.patch.object(azure_helper, "http_with_retries") + ) def test_non_secure_get(self): client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - url = 'MyTestUrl' + url = "MyTestUrl" response = client.get(url, secure=False) self.assertEqual(1, self.m_http_with_retries.call_count) self.assertEqual(self.m_http_with_retries.return_value, response) self.assertEqual( mock.call(url, headers=self.regular_headers), - self.m_http_with_retries.call_args) + self.m_http_with_retries.call_args, + ) def test_non_secure_get_raises_exception(self): client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - url = 'MyTestUrl' + url = "MyTestUrl" self.m_http_with_retries.side_effect = SentinelException self.assertRaises(SentinelException, client.get, url, secure=False) self.assertEqual(1, self.m_http_with_retries.call_count) def test_secure_get(self): - url = 'MyTestUrl' + url = "MyTestUrl" m_certificate = mock.MagicMock() expected_headers = self.regular_headers.copy() - expected_headers.update({ - "x-ms-cipher-name": "DES_EDE3_CBC", - "x-ms-guest-agent-public-x509-cert": m_certificate, - }) + expected_headers.update( + { + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": m_certificate, + } + ) client = azure_helper.AzureEndpointHttpClient(m_certificate) response = client.get(url, secure=True) self.assertEqual(1, self.m_http_with_retries.call_count) self.assertEqual(self.m_http_with_retries.return_value, response) self.assertEqual( mock.call(url, headers=expected_headers), - self.m_http_with_retries.call_args) + self.m_http_with_retries.call_args, + ) def test_secure_get_raises_exception(self): - url = 'MyTestUrl' + url = "MyTestUrl" client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) self.m_http_with_retries.side_effect = SentinelException self.assertRaises(SentinelException, client.get, url, secure=True) @@ -335,44 +362,50 @@ class TestAzureEndpointHttpClient(CiTestCase): def test_post(self): m_data = mock.MagicMock() - url = 'MyTestUrl' + url = "MyTestUrl" client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) response = client.post(url, data=m_data) self.assertEqual(1, self.m_http_with_retries.call_count) self.assertEqual(self.m_http_with_retries.return_value, response) self.assertEqual( mock.call(url, data=m_data, headers=self.regular_headers), - self.m_http_with_retries.call_args) + self.m_http_with_retries.call_args, + ) def test_post_raises_exception(self): m_data = mock.MagicMock() - url = 'MyTestUrl' + url = "MyTestUrl" client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) self.m_http_with_retries.side_effect = SentinelException self.assertRaises(SentinelException, client.post, url, data=m_data) self.assertEqual(1, self.m_http_with_retries.call_count) def test_post_with_extra_headers(self): - url = 'MyTestUrl' + url = "MyTestUrl" client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - extra_headers = {'test': 'header'} + extra_headers = {"test": "header"} client.post(url, extra_headers=extra_headers) expected_headers = self.regular_headers.copy() expected_headers.update(extra_headers) self.assertEqual(1, self.m_http_with_retries.call_count) self.assertEqual( mock.call(url, data=mock.ANY, headers=expected_headers), - self.m_http_with_retries.call_args) + self.m_http_with_retries.call_args, + ) def test_post_with_sleep_with_extra_headers_raises_exception(self): m_data = mock.MagicMock() - url = 'MyTestUrl' - extra_headers = {'test': 'header'} + url = "MyTestUrl" + extra_headers = {"test": "header"} client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) self.m_http_with_retries.side_effect = SentinelException self.assertRaises( - SentinelException, client.post, - url, data=m_data, extra_headers=extra_headers) + SentinelException, + client.post, + url, + data=m_data, + extra_headers=extra_headers, + ) self.assertEqual(1, self.m_http_with_retries.call_count) @@ -392,128 +425,139 @@ class TestAzureHelperHttpWithRetries(CiTestCase): self.m_readurl = patches.enter_context( mock.patch.object( - azure_helper.url_helper, 'readurl', mock.MagicMock())) + azure_helper.url_helper, "readurl", mock.MagicMock() + ) + ) self.m_sleep = patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', autospec=True)) + mock.patch.object(azure_helper.time, "sleep", autospec=True) + ) def test_http_with_retries(self): - self.m_readurl.return_value = 'TestResp' + self.m_readurl.return_value = "TestResp" self.assertEqual( - azure_helper.http_with_retries('testurl'), - self.m_readurl.return_value) + azure_helper.http_with_retries("testurl"), + self.m_readurl.return_value, + ) self.assertEqual(self.m_readurl.call_count, 1) - def test_http_with_retries_propagates_readurl_exc_and_logs_exc( - self): + def test_http_with_retries_propagates_readurl_exc_and_logs_exc(self): self.m_readurl.side_effect = SentinelException self.assertRaises( - SentinelException, azure_helper.http_with_retries, 'testurl') + SentinelException, azure_helper.http_with_retries, "testurl" + ) self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts) self.assertIsNotNone( re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) + r"Failed HTTP request with Azure endpoint \S* during " + r"attempt \d+ with exception: \S*", + self.logs.getvalue(), + ) + ) self.assertIsNone( re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) + r"Successful HTTP request with Azure endpoint \S* after " + r"\d+ attempts", + self.logs.getvalue(), + ) + ) def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc( - self): - self.m_readurl.side_effect = \ - [SentinelException] * self.periodic_logging_attempts + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - response = azure_helper.http_with_retries('testurl') - self.assertEqual( - response, - self.m_readurl.return_value) + self, + ): + self.m_readurl.side_effect = [ + SentinelException + ] * self.periodic_logging_attempts + ["TestResp"] + self.m_readurl.return_value = "TestResp" + + response = azure_helper.http_with_retries("testurl") + self.assertEqual(response, self.m_readurl.return_value) self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts + 1) + self.m_readurl.call_count, self.periodic_logging_attempts + 1 + ) # Ensure that cloud-init did sleep between each failed request self.assertEqual( - self.m_sleep.call_count, - self.periodic_logging_attempts) + self.m_sleep.call_count, self.periodic_logging_attempts + ) self.m_sleep.assert_called_with(self.sleep_duration_between_retries) def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): - self.m_readurl.side_effect = \ - [SentinelException] * self.periodic_logging_attempts + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' + self.m_readurl.side_effect = [ + SentinelException + ] * self.periodic_logging_attempts + ["TestResp"] + self.m_readurl.return_value = "TestResp" - azure_helper.http_with_retries('testurl') + azure_helper.http_with_retries("testurl") self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts + 1) + self.m_readurl.call_count, self.periodic_logging_attempts + 1 + ) self.assertIsNotNone( re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) + r"Failed HTTP request with Azure endpoint \S* during " + r"attempt \d+ with exception: \S*", + self.logs.getvalue(), + ) + ) self.assertIsNotNone( re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) + r"Successful HTTP request with Azure endpoint \S* after " + r"\d+ attempts", + self.logs.getvalue(), + ) + ) def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg( - self): - self.m_readurl.side_effect = \ - [SentinelException] * \ - (self.periodic_logging_attempts - 1) + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - azure_helper.http_with_retries('testurl') + self, + ): + self.m_readurl.side_effect = [SentinelException] * ( + self.periodic_logging_attempts - 1 + ) + ["TestResp"] + self.m_readurl.return_value = "TestResp" + + azure_helper.http_with_retries("testurl") self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts) + self.m_readurl.call_count, self.periodic_logging_attempts + ) self.assertIsNone( re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) + r"Failed HTTP request with Azure endpoint \S* during " + r"attempt \d+ with exception: \S*", + self.logs.getvalue(), + ) + ) self.assertIsNotNone( re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) + r"Successful HTTP request with Azure endpoint \S* after " + r"\d+ attempts", + self.logs.getvalue(), + ) + ) def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self): testurl = mock.MagicMock() kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock(), + "headers": mock.MagicMock(), + "data": mock.MagicMock(), # timeout kwarg should not be modified or deleted if present - 'timeout': mock.MagicMock() + "timeout": mock.MagicMock(), } azure_helper.http_with_retries(testurl, **kwargs) self.m_readurl.assert_called_once_with(testurl, **kwargs) def test_http_with_retries_adds_timeout_kwarg_if_not_present(self): testurl = mock.MagicMock() - kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock() - } + kwargs = {"headers": mock.MagicMock(), "data": mock.MagicMock()} expected_kwargs = copy.deepcopy(kwargs) - expected_kwargs['timeout'] = self.default_readurl_timeout + expected_kwargs["timeout"] = self.default_readurl_timeout azure_helper.http_with_retries(testurl, **kwargs) self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) - def test_http_with_retries_deletes_retries_kwargs_passed_in( - self): + def test_http_with_retries_deletes_retries_kwargs_passed_in(self): """http_with_retries already implements retry logic, so url_helper.readurl should not have retries. http_with_retries should delete kwargs that @@ -521,44 +565,44 @@ class TestAzureHelperHttpWithRetries(CiTestCase): """ testurl = mock.MagicMock() kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock(), - 'timeout': mock.MagicMock(), - 'retries': mock.MagicMock(), - 'infinite': mock.MagicMock() + "headers": mock.MagicMock(), + "data": mock.MagicMock(), + "timeout": mock.MagicMock(), + "retries": mock.MagicMock(), + "infinite": mock.MagicMock(), } expected_kwargs = copy.deepcopy(kwargs) - expected_kwargs.pop('retries', None) - expected_kwargs.pop('infinite', None) + expected_kwargs.pop("retries", None) + expected_kwargs.pop("infinite", None) azure_helper.http_with_retries(testurl, **kwargs) self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) self.assertIn( - 'retries kwarg passed in for communication with Azure endpoint.', - self.logs.getvalue()) + "retries kwarg passed in for communication with Azure endpoint.", + self.logs.getvalue(), + ) self.assertIn( - 'infinite kwarg passed in for communication with Azure endpoint.', - self.logs.getvalue()) + "infinite kwarg passed in for communication with Azure endpoint.", + self.logs.getvalue(), + ) class TestOpenSSLManager(CiTestCase): - def setUp(self): super(TestOpenSSLManager, self).setUp() patches = ExitStack() self.addCleanup(patches.close) self.subp = patches.enter_context( - mock.patch.object(azure_helper.subp, 'subp')) + mock.patch.object(azure_helper.subp, "subp") + ) try: - self.open = patches.enter_context( - mock.patch('__builtin__.open')) + self.open = patches.enter_context(mock.patch("__builtin__.open")) except ImportError: - self.open = patches.enter_context( - mock.patch('builtins.open')) + self.open = patches.enter_context(mock.patch("builtins.open")) - @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) - @mock.patch.object(azure_helper.temp_utils, 'mkdtemp') + @mock.patch.object(azure_helper, "cd", mock.MagicMock()) + @mock.patch.object(azure_helper.temp_utils, "mkdtemp") def test_openssl_manager_creates_a_tmpdir(self, mkdtemp): manager = azure_helper.OpenSSLManager() self.assertEqual(mkdtemp.return_value, manager.tmpdir) @@ -567,16 +611,16 @@ class TestOpenSSLManager(CiTestCase): subp_directory = {} def capture_directory(*args, **kwargs): - subp_directory['path'] = os.getcwd() + subp_directory["path"] = os.getcwd() self.subp.side_effect = capture_directory manager = azure_helper.OpenSSLManager() - self.assertEqual(manager.tmpdir, subp_directory['path']) + self.assertEqual(manager.tmpdir, subp_directory["path"]) manager.clean_up() - @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) - @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock()) - @mock.patch.object(azure_helper.util, 'del_dir') + @mock.patch.object(azure_helper, "cd", mock.MagicMock()) + @mock.patch.object(azure_helper.temp_utils, "mkdtemp", mock.MagicMock()) + @mock.patch.object(azure_helper.util, "del_dir") def test_clean_up(self, del_dir): manager = azure_helper.OpenSSLManager() manager.clean_up() @@ -584,43 +628,42 @@ class TestOpenSSLManager(CiTestCase): class TestOpenSSLManagerActions(CiTestCase): - def setUp(self): super(TestOpenSSLManagerActions, self).setUp() self.allowed_subp = True def _data_file(self, name): - path = 'tests/data/azure' + path = "tests/data/azure" return os.path.join(path, name) @unittest.skip("todo move to cloud_test") def test_pubkey_extract(self): - cert = load_file(self._data_file('pubkey_extract_cert')) - good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + cert = load_file(self._data_file("pubkey_extract_cert")) + good_key = load_file(self._data_file("pubkey_extract_ssh_key")) sslmgr = azure_helper.OpenSSLManager() key = sslmgr._get_ssh_key_from_cert(cert) self.assertEqual(good_key, key) - good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + good_fingerprint = "073E19D14D1C799224C6A0FD8DDAB6A8BF27D473" fingerprint = sslmgr._get_fingerprint_from_cert(cert) self.assertEqual(good_fingerprint, fingerprint) @unittest.skip("todo move to cloud_test") - @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + @mock.patch.object(azure_helper.OpenSSLManager, "_decrypt_certs_from_xml") def test_parse_certificates(self, mock_decrypt_certs): """Azure control plane puts private keys as well as certificates - into the Certificates XML object. Make sure only the public keys - from certs are extracted and that fingerprints are converted to - the form specified in the ovf-env.xml file. + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. """ - cert_contents = load_file(self._data_file('parse_certificates_pem')) - fingerprints = load_file(self._data_file( - 'parse_certificates_fingerprints') + cert_contents = load_file(self._data_file("parse_certificates_pem")) + fingerprints = load_file( + self._data_file("parse_certificates_fingerprints") ).splitlines() mock_decrypt_certs.return_value = cert_contents sslmgr = azure_helper.OpenSSLManager() - keys_by_fp = sslmgr.parse_certificates('') + keys_by_fp = sslmgr.parse_certificates("") for fp in keys_by_fp.keys(): self.assertIn(fp, fingerprints) for fp in fingerprints: @@ -632,21 +675,23 @@ class TestGoalStateHealthReporter(CiTestCase): maxDiff = None default_parameters = { - 'incarnation': 1634, - 'container_id': 'MyContainerId', - 'instance_id': 'MyInstanceId' + "incarnation": 1634, + "container_id": "MyContainerId", + "instance_id": "MyInstanceId", } - test_azure_endpoint = 'TestEndpoint' - test_health_report_url = 'http://{0}/machine?comp=health'.format( - test_azure_endpoint) - test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'} + test_azure_endpoint = "TestEndpoint" + test_health_report_url = "http://{0}/machine?comp=health".format( + test_azure_endpoint + ) + test_default_headers = {"Content-Type": "text/xml; charset=utf-8"} - provisioning_success_status = 'Ready' - provisioning_not_ready_status = 'NotReady' - provisioning_failure_substatus = 'ProvisioningFailed' + provisioning_success_status = "Ready" + provisioning_not_ready_status = "NotReady" + provisioning_failure_substatus = "ProvisioningFailed" provisioning_failure_err_description = ( - 'Test error message containing provisioning failure details') + "Test error message containing provisioning failure details" + ) def setUp(self): super(TestGoalStateHealthReporter, self).setUp() @@ -654,22 +699,28 @@ class TestGoalStateHealthReporter(CiTestCase): self.addCleanup(patches.close) patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) + mock.patch.object(azure_helper.time, "sleep", mock.MagicMock()) + ) self.read_file_or_url = patches.enter_context( - mock.patch.object(azure_helper.url_helper, 'read_file_or_url')) + mock.patch.object(azure_helper.url_helper, "read_file_or_url") + ) self.post = patches.enter_context( - mock.patch.object(azure_helper.AzureEndpointHttpClient, - 'post')) + mock.patch.object(azure_helper.AzureEndpointHttpClient, "post") + ) self.GoalState = patches.enter_context( - mock.patch.object(azure_helper, 'GoalState')) - self.GoalState.return_value.container_id = \ - self.default_parameters['container_id'] - self.GoalState.return_value.instance_id = \ - self.default_parameters['instance_id'] - self.GoalState.return_value.incarnation = \ - self.default_parameters['incarnation'] + mock.patch.object(azure_helper, "GoalState") + ) + self.GoalState.return_value.container_id = self.default_parameters[ + "container_id" + ] + self.GoalState.return_value.instance_id = self.default_parameters[ + "instance_id" + ] + self.GoalState.return_value.incarnation = self.default_parameters[ + "incarnation" + ] def _text_from_xpath_in_xroot(self, xroot, xpath): element = xroot.find(xpath) @@ -685,34 +736,41 @@ class TestGoalStateHealthReporter(CiTestCase): def _get_report_ready_health_document(self): return self._get_formatted_health_report_xml_string( - incarnation=escape(str(self.default_parameters['incarnation'])), - container_id=escape(self.default_parameters['container_id']), - instance_id=escape(self.default_parameters['instance_id']), + incarnation=escape(str(self.default_parameters["incarnation"])), + container_id=escape(self.default_parameters["container_id"]), + instance_id=escape(self.default_parameters["instance_id"]), health_status=escape(self.provisioning_success_status), - health_detail_subsection='') + health_detail_subsection="", + ) def _get_report_failure_health_document(self): - health_detail_subsection = \ + health_detail_subsection = ( self._get_formatted_health_detail_subsection_xml_string( health_substatus=escape(self.provisioning_failure_substatus), health_description=escape( - self.provisioning_failure_err_description)) + self.provisioning_failure_err_description + ), + ) + ) return self._get_formatted_health_report_xml_string( - incarnation=escape(str(self.default_parameters['incarnation'])), - container_id=escape(self.default_parameters['container_id']), - instance_id=escape(self.default_parameters['instance_id']), + incarnation=escape(str(self.default_parameters["incarnation"])), + container_id=escape(self.default_parameters["container_id"]), + instance_id=escape(self.default_parameters["instance_id"]), health_status=escape(self.provisioning_not_ready_status), - health_detail_subsection=health_detail_subsection) + health_detail_subsection=health_detail_subsection, + ) def test_send_ready_signal_sends_post_request(self): with mock.patch.object( - azure_helper.GoalStateHealthReporter, - 'build_report') as m_build_report: + azure_helper.GoalStateHealthReporter, "build_report" + ) as m_build_report: client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - client, self.test_azure_endpoint) + client, + self.test_azure_endpoint, + ) reporter.send_ready_signal() self.assertEqual(1, self.post.call_count) @@ -720,73 +778,94 @@ class TestGoalStateHealthReporter(CiTestCase): mock.call( self.test_health_report_url, data=m_build_report.return_value, - extra_headers=self.test_default_headers), - self.post.call_args) + extra_headers=self.test_default_headers, + ), + self.post.call_args, + ) def test_send_failure_signal_sends_post_request(self): with mock.patch.object( - azure_helper.GoalStateHealthReporter, - 'build_report') as m_build_report: + azure_helper.GoalStateHealthReporter, "build_report" + ) as m_build_report: client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - client, self.test_azure_endpoint) + client, + self.test_azure_endpoint, + ) reporter.send_failure_signal( - description=self.provisioning_failure_err_description) + description=self.provisioning_failure_err_description + ) self.assertEqual(1, self.post.call_count) self.assertEqual( mock.call( self.test_health_report_url, data=m_build_report.return_value, - extra_headers=self.test_default_headers), - self.post.call_args) + extra_headers=self.test_default_headers, + ), + self.post.call_args, + ) def test_build_report_for_ready_signal_health_document(self): health_document = self._get_report_ready_health_document() reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) + self.test_azure_endpoint, + ) generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_success_status) + incarnation=self.default_parameters["incarnation"], + container_id=self.default_parameters["container_id"], + instance_id=self.default_parameters["instance_id"], + status=self.provisioning_success_status, + ) self.assertEqual(health_document, generated_health_document) generated_xroot = ElementTree.fromstring(generated_health_document) self.assertEqual( self._text_from_xpath_in_xroot( - generated_xroot, './GoalStateIncarnation'), - str(self.default_parameters['incarnation'])) + generated_xroot, "./GoalStateIncarnation" + ), + str(self.default_parameters["incarnation"]), + ) self.assertEqual( self._text_from_xpath_in_xroot( - generated_xroot, './Container/ContainerId'), - str(self.default_parameters['container_id'])) + generated_xroot, "./Container/ContainerId" + ), + str(self.default_parameters["container_id"]), + ) self.assertEqual( self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/InstanceId'), - str(self.default_parameters['instance_id'])) + generated_xroot, "./Container/RoleInstanceList/Role/InstanceId" + ), + str(self.default_parameters["instance_id"]), + ) self.assertEqual( self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/State'), - escape(self.provisioning_success_status)) + "./Container/RoleInstanceList/Role/Health/State", + ), + escape(self.provisioning_success_status), + ) self.assertIsNone( self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/Details')) + "./Container/RoleInstanceList/Role/Health/Details", + ) + ) self.assertIsNone( self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/SubStatus')) + "./Container/RoleInstanceList/Role/Health/Details/SubStatus", + ) + ) self.assertIsNone( self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') + "./Container/RoleInstanceList/Role/Health/Details/Description", + ) ) def test_build_report_for_failure_signal_health_document(self): @@ -794,120 +873,143 @@ class TestGoalStateHealthReporter(CiTestCase): reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) + self.test_azure_endpoint, + ) generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], + incarnation=self.default_parameters["incarnation"], + container_id=self.default_parameters["container_id"], + instance_id=self.default_parameters["instance_id"], status=self.provisioning_not_ready_status, substatus=self.provisioning_failure_substatus, - description=self.provisioning_failure_err_description) + description=self.provisioning_failure_err_description, + ) self.assertEqual(health_document, generated_health_document) generated_xroot = ElementTree.fromstring(generated_health_document) self.assertEqual( self._text_from_xpath_in_xroot( - generated_xroot, './GoalStateIncarnation'), - str(self.default_parameters['incarnation'])) + generated_xroot, "./GoalStateIncarnation" + ), + str(self.default_parameters["incarnation"]), + ) self.assertEqual( self._text_from_xpath_in_xroot( - generated_xroot, './Container/ContainerId'), - self.default_parameters['container_id']) + generated_xroot, "./Container/ContainerId" + ), + self.default_parameters["container_id"], + ) self.assertEqual( self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/InstanceId'), - self.default_parameters['instance_id']) + generated_xroot, "./Container/RoleInstanceList/Role/InstanceId" + ), + self.default_parameters["instance_id"], + ) self.assertEqual( self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/State'), - escape(self.provisioning_not_ready_status)) + "./Container/RoleInstanceList/Role/Health/State", + ), + escape(self.provisioning_not_ready_status), + ) self.assertEqual( self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/' - 'SubStatus'), - escape(self.provisioning_failure_substatus)) + "./Container/RoleInstanceList/Role/Health/Details/SubStatus", + ), + escape(self.provisioning_failure_substatus), + ) self.assertEqual( self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/' - 'Description'), - escape(self.provisioning_failure_err_description)) + "./Container/RoleInstanceList/Role/Health/Details/Description", + ), + escape(self.provisioning_failure_err_description), + ) def test_send_ready_signal_calls_build_report(self): with mock.patch.object( - azure_helper.GoalStateHealthReporter, 'build_report' + azure_helper.GoalStateHealthReporter, "build_report" ) as m_build_report: reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) + self.test_azure_endpoint, + ) reporter.send_ready_signal() self.assertEqual(1, m_build_report.call_count) self.assertEqual( mock.call( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_success_status), - m_build_report.call_args) + incarnation=self.default_parameters["incarnation"], + container_id=self.default_parameters["container_id"], + instance_id=self.default_parameters["instance_id"], + status=self.provisioning_success_status, + ), + m_build_report.call_args, + ) def test_send_failure_signal_calls_build_report(self): with mock.patch.object( - azure_helper.GoalStateHealthReporter, 'build_report' + azure_helper.GoalStateHealthReporter, "build_report" ) as m_build_report: reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) + self.test_azure_endpoint, + ) reporter.send_failure_signal( - description=self.provisioning_failure_err_description) + description=self.provisioning_failure_err_description + ) self.assertEqual(1, m_build_report.call_count) self.assertEqual( mock.call( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], + incarnation=self.default_parameters["incarnation"], + container_id=self.default_parameters["container_id"], + instance_id=self.default_parameters["instance_id"], status=self.provisioning_not_ready_status, substatus=self.provisioning_failure_substatus, - description=self.provisioning_failure_err_description), - m_build_report.call_args) + description=self.provisioning_failure_err_description, + ), + m_build_report.call_args, + ) def test_build_report_escapes_chars(self): - incarnation = 'jd8\'9*&^<\'A><A[p&o+\"SD()*&&&LKAJSD23' - container_id = '&&<\"><><ds8\'9+7&d9a86!@($09asdl;<>' - instance_id = 'Opo>>>jas\'&d;[p&fp\"a<<!!@&&' - health_status = '&<897\"6&>&aa\'sd!@&!)((*<&>' - health_substatus = '&as\"d<<a&s>d<\'^@!5&6<7' - health_description = '&&&>!#$\"&&<as\'1!@$d&>><>&\"sd<67<]>>' - - health_detail_subsection = \ + incarnation = "jd8'9*&^<'A><A[p&o+\"SD()*&&&LKAJSD23" + container_id = "&&<\"><><ds8'9+7&d9a86!@($09asdl;<>" + instance_id = "Opo>>>jas'&d;[p&fp\"a<<!!@&&" + health_status = "&<897\"6&>&aa'sd!@&!)((*<&>" + health_substatus = "&as\"d<<a&s>d<'^@!5&6<7" + health_description = '&&&>!#$"&&<as\'1!@$d&>><>&"sd<67<]>>' + + health_detail_subsection = ( self._get_formatted_health_detail_subsection_xml_string( health_substatus=escape(health_substatus), - health_description=escape(health_description)) + health_description=escape(health_description), + ) + ) health_document = self._get_formatted_health_report_xml_string( incarnation=escape(incarnation), container_id=escape(container_id), instance_id=escape(instance_id), health_status=escape(health_status), - health_detail_subsection=health_detail_subsection) + health_detail_subsection=health_detail_subsection, + ) reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) + self.test_azure_endpoint, + ) generated_health_document = reporter.build_report( incarnation=incarnation, container_id=container_id, instance_id=instance_id, status=health_status, substatus=health_substatus, - description=health_description) + description=health_description, + ) self.assertEqual(health_document, generated_health_document) @@ -915,26 +1017,31 @@ class TestGoalStateHealthReporter(CiTestCase): reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100 + self.test_azure_endpoint, + ) + long_err_msg = "a9&ea8>>>e as1< d\"q2*&(^%'a=5<" * 100 generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], + incarnation=self.default_parameters["incarnation"], + container_id=self.default_parameters["container_id"], + instance_id=self.default_parameters["instance_id"], status=self.provisioning_not_ready_status, substatus=self.provisioning_failure_substatus, - description=long_err_msg) + description=long_err_msg, + ) generated_xroot = ElementTree.fromstring(generated_health_document) generated_health_report_description = self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') + "./Container/RoleInstanceList/Role/Health/Details/Description", + ) self.assertEqual( len(unescape(generated_health_report_description)), - HEALTH_REPORT_DESCRIPTION_TRIM_LEN) + HEALTH_REPORT_DESCRIPTION_TRIM_LEN, + ) def test_trim_description_then_escape_conforms_to_len_limits_worst_case( - self): + self, + ): """When unescaped characters are XML-escaped, the length increases. Char Escape String < < @@ -963,46 +1070,53 @@ class TestGoalStateHealthReporter(CiTestCase): reporter = azure_helper.GoalStateHealthReporter( azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - long_err_msg = '\'\"' * 10000 + self.test_azure_endpoint, + ) + long_err_msg = "'\"" * 10000 generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], + incarnation=self.default_parameters["incarnation"], + container_id=self.default_parameters["container_id"], + instance_id=self.default_parameters["instance_id"], status=self.provisioning_not_ready_status, substatus=self.provisioning_failure_substatus, - description=long_err_msg) + description=long_err_msg, + ) generated_xroot = ElementTree.fromstring(generated_health_document) generated_health_report_description = self._text_from_xpath_in_xroot( generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') + "./Container/RoleInstanceList/Role/Health/Details/Description", + ) # The escaped description string should be less than # the Azure platform limit for the escaped description string. self.assertLessEqual(len(generated_health_report_description), 4096) class TestWALinuxAgentShim(CiTestCase): - def setUp(self): super(TestWALinuxAgentShim, self).setUp() patches = ExitStack() self.addCleanup(patches.close) self.AzureEndpointHttpClient = patches.enter_context( - mock.patch.object(azure_helper, 'AzureEndpointHttpClient')) + mock.patch.object(azure_helper, "AzureEndpointHttpClient") + ) self.find_endpoint = patches.enter_context( - mock.patch.object(wa_shim, 'find_endpoint')) + mock.patch.object(wa_shim, "find_endpoint") + ) self.GoalState = patches.enter_context( - mock.patch.object(azure_helper, 'GoalState')) + mock.patch.object(azure_helper, "GoalState") + ) self.OpenSSLManager = patches.enter_context( - mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True)) + mock.patch.object(azure_helper, "OpenSSLManager", autospec=True) + ) patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) + mock.patch.object(azure_helper.time, "sleep", mock.MagicMock()) + ) - self.test_incarnation = 'TestIncarnation' - self.test_container_id = 'TestContainerId' - self.test_instance_id = 'TestInstanceId' + self.test_incarnation = "TestIncarnation" + self.test_container_id = "TestContainerId" + self.test_instance_id = "TestInstanceId" self.GoalState.return_value.incarnation = self.test_incarnation self.GoalState.return_value.container_id = self.test_container_id self.GoalState.return_value.instance_id = self.test_instance_id @@ -1010,7 +1124,7 @@ class TestWALinuxAgentShim(CiTestCase): def test_eject_iso_is_called(self): shim = wa_shim() with mock.patch.object( - shim, 'eject_iso', autospec=True + shim, "eject_iso", autospec=True ) as m_eject_iso: shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") m_eject_iso.assert_called_once_with("/dev/sr0") @@ -1019,102 +1133,113 @@ class TestWALinuxAgentShim(CiTestCase): shim = wa_shim() shim.register_with_azure_and_fetch_data() self.assertEqual( - [mock.call(None)], - self.AzureEndpointHttpClient.call_args_list) + [mock.call(None)], self.AzureEndpointHttpClient.call_args_list + ) def test_http_client_does_not_use_certificate_for_report_failure(self): shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") self.assertEqual( - [mock.call(None)], - self.AzureEndpointHttpClient.call_args_list) + [mock.call(None)], self.AzureEndpointHttpClient.call_args_list + ) def test_correct_url_used_for_goalstate_during_report_ready(self): - self.find_endpoint.return_value = 'test_endpoint' + self.find_endpoint.return_value = "test_endpoint" shim = wa_shim() shim.register_with_azure_and_fetch_data() m_get = self.AzureEndpointHttpClient.return_value.get self.assertEqual( - [mock.call('http://test_endpoint/machine/?comp=goalstate')], - m_get.call_args_list) + [mock.call("http://test_endpoint/machine/?comp=goalstate")], + m_get.call_args_list, + ) self.assertEqual( - [mock.call( - m_get.return_value.contents, - self.AzureEndpointHttpClient.return_value, - False - )], - self.GoalState.call_args_list) + [ + mock.call( + m_get.return_value.contents, + self.AzureEndpointHttpClient.return_value, + False, + ) + ], + self.GoalState.call_args_list, + ) def test_correct_url_used_for_goalstate_during_report_failure(self): - self.find_endpoint.return_value = 'test_endpoint' + self.find_endpoint.return_value = "test_endpoint" shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") m_get = self.AzureEndpointHttpClient.return_value.get self.assertEqual( - [mock.call('http://test_endpoint/machine/?comp=goalstate')], - m_get.call_args_list) + [mock.call("http://test_endpoint/machine/?comp=goalstate")], + m_get.call_args_list, + ) self.assertEqual( - [mock.call( - m_get.return_value.contents, - self.AzureEndpointHttpClient.return_value, - False - )], - self.GoalState.call_args_list) + [ + mock.call( + m_get.return_value.contents, + self.AzureEndpointHttpClient.return_value, + False, + ) + ], + self.GoalState.call_args_list, + ) def test_certificates_used_to_determine_public_keys(self): # if register_with_azure_and_fetch_data() isn't passed some info about # the user's public keys, there's no point in even trying to parse the # certificates shim = wa_shim() - mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, - {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] - certs = {'fp1': 'expected-key', - 'fp2': 'should-not-be-found', - 'fp3': 'expected-no-value-key', - } + mypk = [ + {"fingerprint": "fp1", "path": "path1"}, + {"fingerprint": "fp3", "path": "path3", "value": ""}, + ] + certs = { + "fp1": "expected-key", + "fp2": "should-not-be-found", + "fp3": "expected-no-value-key", + } sslmgr = self.OpenSSLManager.return_value sslmgr.parse_certificates.return_value = certs data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - sslmgr.parse_certificates.call_args_list) - self.assertIn('expected-key', data['public-keys']) - self.assertIn('expected-no-value-key', data['public-keys']) - self.assertNotIn('should-not-be-found', data['public-keys']) + sslmgr.parse_certificates.call_args_list, + ) + self.assertIn("expected-key", data["public-keys"]) + self.assertIn("expected-no-value-key", data["public-keys"]) + self.assertNotIn("should-not-be-found", data["public-keys"]) def test_absent_certificates_produces_empty_public_keys(self): - mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] + mypk = [{"fingerprint": "fp1", "path": "path1"}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) - self.assertEqual([], data['public-keys']) + self.assertEqual([], data["public-keys"]) def test_correct_url_used_for_report_ready(self): - self.find_endpoint.return_value = 'test_endpoint' + self.find_endpoint.return_value = "test_endpoint" shim = wa_shim() shim.register_with_azure_and_fetch_data() - expected_url = 'http://test_endpoint/machine?comp=health' + expected_url = "http://test_endpoint/machine?comp=health" self.assertEqual( [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - self.AzureEndpointHttpClient.return_value.post - .call_args_list) + self.AzureEndpointHttpClient.return_value.post.call_args_list, + ) def test_correct_url_used_for_report_failure(self): - self.find_endpoint.return_value = 'test_endpoint' + self.find_endpoint.return_value = "test_endpoint" shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - expected_url = 'http://test_endpoint/machine?comp=health' + shim.register_with_azure_and_report_failure(description="TestDesc") + expected_url = "http://test_endpoint/machine?comp=health" self.assertEqual( [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - self.AzureEndpointHttpClient.return_value.post - .call_args_list) + self.AzureEndpointHttpClient.return_value.post.call_args_list, + ) def test_goal_state_values_used_for_report_ready(self): shim = wa_shim() shim.register_with_azure_and_fetch_data() posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data'] + self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"] ) self.assertIn(self.test_incarnation, posted_document) self.assertIn(self.test_container_id, posted_document) @@ -1122,10 +1247,9 @@ class TestWALinuxAgentShim(CiTestCase): def test_goal_state_values_used_for_report_failure(self): shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data'] + self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"] ) self.assertIn(self.test_incarnation, posted_document) self.assertIn(self.test_container_id, posted_document) @@ -1138,57 +1262,66 @@ class TestWALinuxAgentShim(CiTestCase): incarnation=escape(self.test_incarnation), container_id=escape(self.test_container_id), instance_id=escape(self.test_instance_id), - health_status=escape('Ready'), - health_detail_subsection='') + health_status=escape("Ready"), + health_detail_subsection="", + ) posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data']) + self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"] + ) self.assertEqual(health_document, posted_document) def test_xml_elems_in_report_failure_post(self): shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") health_document = HEALTH_REPORT_XML_TEMPLATE.format( incarnation=escape(self.test_incarnation), container_id=escape(self.test_container_id), instance_id=escape(self.test_instance_id), - health_status=escape('NotReady'), - health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE - .format( - health_substatus=escape('ProvisioningFailed'), - health_description=escape('TestDesc'))) + health_status=escape("NotReady"), + health_detail_subsection=( + HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format( + health_substatus=escape("ProvisioningFailed"), + health_description=escape("TestDesc"), + ) + ), + ) posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data']) + self.AzureEndpointHttpClient.return_value.post.call_args[1]["data"] + ) self.assertEqual(health_document, posted_document) - @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) + @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True) def test_register_with_azure_and_fetch_data_calls_send_ready_signal( - self, m_goal_state_health_reporter): + self, m_goal_state_health_reporter + ): shim = wa_shim() shim.register_with_azure_and_fetch_data() self.assertEqual( 1, - m_goal_state_health_reporter.return_value.send_ready_signal - .call_count) + m_goal_state_health_reporter.return_value.send_ready_signal.call_count, # noqa: E501 + ) - @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) + @mock.patch.object(azure_helper, "GoalStateHealthReporter", autospec=True) def test_register_with_azure_and_report_failure_calls_send_failure_signal( - self, m_goal_state_health_reporter): + self, m_goal_state_health_reporter + ): shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - m_goal_state_health_reporter.return_value.send_failure_signal \ - .assert_called_once_with(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") + m_goal_state_health_reporter.return_value.send_failure_signal.assert_called_once_with( # noqa: E501 + description="TestDesc" + ) def test_register_with_azure_and_report_failure_does_not_need_certificates( - self): + self, + ): shim = wa_shim() with mock.patch.object( - shim, '_fetch_goal_state_from_azure', autospec=True + shim, "_fetch_goal_state_from_azure", autospec=True ) as m_fetch_goal_state_from_azure: - shim.register_with_azure_and_report_failure(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") m_fetch_goal_state_from_azure.assert_called_once_with( - need_certificate=False) + need_certificate=False + ) def test_clean_up_can_be_called_at_any_time(self): shim = wa_shim() @@ -1197,7 +1330,7 @@ class TestWALinuxAgentShim(CiTestCase): def test_openssl_manager_not_instantiated_by_shim_report_status(self): shim = wa_shim() shim.register_with_azure_and_fetch_data() - shim.register_with_azure_and_report_failure(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") shim.clean_up() self.OpenSSLManager.assert_not_called() @@ -1209,178 +1342,204 @@ class TestWALinuxAgentShim(CiTestCase): def test_clean_up_after_report_failure(self): shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') + shim.register_with_azure_and_report_failure(description="TestDesc") shim.clean_up() self.OpenSSLManager.return_value.clean_up.assert_not_called() def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self): - self.AzureEndpointHttpClient.return_value.get \ - .side_effect = SentinelException + self.AzureEndpointHttpClient.return_value.get.side_effect = ( + SentinelException + ) shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) + self.assertRaises( + SentinelException, shim.register_with_azure_and_fetch_data + ) def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self): - self.AzureEndpointHttpClient.return_value.get \ - .side_effect = SentinelException + self.AzureEndpointHttpClient.return_value.get.side_effect = ( + SentinelException + ) shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') + self.assertRaises( + SentinelException, + shim.register_with_azure_and_report_failure, + description="TestDesc", + ) def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self): self.GoalState.side_effect = SentinelException shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) + self.assertRaises( + SentinelException, shim.register_with_azure_and_fetch_data + ) def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc( - self): + self, + ): self.GoalState.side_effect = SentinelException shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') + self.assertRaises( + SentinelException, + shim.register_with_azure_and_report_failure, + description="TestDesc", + ) def test_failure_to_send_report_ready_health_doc_bubbles_up(self): - self.AzureEndpointHttpClient.return_value.post \ - .side_effect = SentinelException + self.AzureEndpointHttpClient.return_value.post.side_effect = ( + SentinelException + ) shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) + self.assertRaises( + SentinelException, shim.register_with_azure_and_fetch_data + ) def test_failure_to_send_report_failure_health_doc_bubbles_up(self): - self.AzureEndpointHttpClient.return_value.post \ - .side_effect = SentinelException + self.AzureEndpointHttpClient.return_value.post.side_effect = ( + SentinelException + ) shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') + self.assertRaises( + SentinelException, + shim.register_with_azure_and_report_failure, + description="TestDesc", + ) class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): - def setUp(self): super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp() patches = ExitStack() self.addCleanup(patches.close) self.m_shim = patches.enter_context( - mock.patch.object(azure_helper, 'WALinuxAgentShim')) + mock.patch.object(azure_helper, "WALinuxAgentShim") + ) def test_data_from_shim_returned(self): ret = azure_helper.get_metadata_from_fabric() self.assertEqual( - self.m_shim.return_value.register_with_azure_and_fetch_data - .return_value, - ret) + self.m_shim.return_value.register_with_azure_and_fetch_data.return_value, # noqa: E501 + ret, + ) def test_success_calls_clean_up(self): azure_helper.get_metadata_from_fabric() self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) - def test_failure_in_registration_propagates_exc_and_calls_clean_up( - self): - self.m_shim.return_value.register_with_azure_and_fetch_data \ - .side_effect = SentinelException - self.assertRaises(SentinelException, - azure_helper.get_metadata_from_fabric) + def test_failure_in_registration_propagates_exc_and_calls_clean_up(self): + self.m_shim.return_value.register_with_azure_and_fetch_data.side_effect = ( # noqa: E501 + SentinelException + ) + self.assertRaises( + SentinelException, azure_helper.get_metadata_from_fabric + ) self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) def test_calls_shim_register_with_azure_and_fetch_data(self): m_pubkey_info = mock.MagicMock() azure_helper.get_metadata_from_fabric( - pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") + pubkey_info=m_pubkey_info, iso_dev="/dev/sr0" + ) self.assertEqual( 1, - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_count) + self.m_shim.return_value.register_with_azure_and_fetch_data.call_count, # noqa: E501 + ) self.assertEqual( mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_args) + self.m_shim.return_value.register_with_azure_and_fetch_data.call_args, # noqa: E501 + ) def test_instantiates_shim_with_kwargs(self): m_fallback_lease_file = mock.MagicMock() m_dhcp_options = mock.MagicMock() azure_helper.get_metadata_from_fabric( - fallback_lease_file=m_fallback_lease_file, - dhcp_opts=m_dhcp_options) + fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options + ) self.assertEqual(1, self.m_shim.call_count) self.assertEqual( mock.call( fallback_lease_file=m_fallback_lease_file, - dhcp_options=m_dhcp_options), - self.m_shim.call_args) + dhcp_options=m_dhcp_options, + ), + self.m_shim.call_args, + ) class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase): - def setUp(self): super( - TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp() + TestGetMetadataGoalStateXMLAndReportFailureToFabric, self + ).setUp() patches = ExitStack() self.addCleanup(patches.close) self.m_shim = patches.enter_context( - mock.patch.object(azure_helper, 'WALinuxAgentShim')) + mock.patch.object(azure_helper, "WALinuxAgentShim") + ) def test_success_calls_clean_up(self): azure_helper.report_failure_to_fabric() - self.assertEqual( - 1, - self.m_shim.return_value.clean_up.call_count) + self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up( - self): - self.m_shim.return_value.register_with_azure_and_report_failure \ - .side_effect = SentinelException - self.assertRaises(SentinelException, - azure_helper.report_failure_to_fabric) - self.assertEqual( - 1, - self.m_shim.return_value.clean_up.call_count) + self, + ): + self.m_shim.return_value.register_with_azure_and_report_failure.side_effect = ( # noqa: E501 + SentinelException + ) + self.assertRaises( + SentinelException, azure_helper.report_failure_to_fabric + ) + self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) def test_report_failure_to_fabric_with_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric(description='TestDesc') - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with(description='TestDesc') + self, + ): + azure_helper.report_failure_to_fabric(description="TestDesc") + self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501 + description="TestDesc" + ) def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure( - self): + self, + ): azure_helper.report_failure_to_fabric() # default err message description should be shown to the user # if no description is passed in - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with( - description=azure_helper - .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501 + description=( + azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE + ) + ) def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric(description='') + self, + ): + azure_helper.report_failure_to_fabric(description="") # default err message description should be shown to the user # if an empty description is passed in - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with( - description=azure_helper - .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + self.m_shim.return_value.register_with_azure_and_report_failure.assert_called_once_with( # noqa: E501 + description=( + azure_helper.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE + ) + ) def test_instantiates_shim_with_kwargs(self): m_fallback_lease_file = mock.MagicMock() m_dhcp_options = mock.MagicMock() azure_helper.report_failure_to_fabric( - fallback_lease_file=m_fallback_lease_file, - dhcp_opts=m_dhcp_options) + fallback_lease_file=m_fallback_lease_file, dhcp_opts=m_dhcp_options + ) self.m_shim.assert_called_once_with( fallback_lease_file=m_fallback_lease_file, - dhcp_options=m_dhcp_options) + dhcp_options=m_dhcp_options, + ) class TestExtractIpAddressFromNetworkd(CiTestCase): - azure_lease = dedent("""\ + azure_lease = dedent( + """\ # This is private data. Do not parse. ADDRESS=10.132.0.5 NETMASK=255.255.255.255 @@ -1399,7 +1558,8 @@ class TestExtractIpAddressFromNetworkd(CiTestCase): ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 CLIENTID=ff405663a200020000ab11332859494d7a8b4c OPTION_245=624c3620 - """) + """ + ) def setUp(self): super(TestExtractIpAddressFromNetworkd, self).setUp() @@ -1408,21 +1568,25 @@ class TestExtractIpAddressFromNetworkd(CiTestCase): def test_no_valid_leases_is_none(self): """No valid leases should return None.""" self.assertIsNone( - wa_shim._networkd_get_value_from_leases(self.lease_d)) + wa_shim._networkd_get_value_from_leases(self.lease_d) + ) def test_option_245_is_found_in_single(self): """A single valid lease with 245 option should return it.""" - populate_dir(self.lease_d, {'9': self.azure_lease}) + populate_dir(self.lease_d, {"9": self.azure_lease}) self.assertEqual( - '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d)) + "624c3620", wa_shim._networkd_get_value_from_leases(self.lease_d) + ) def test_option_245_not_found_returns_None(self): """A valid lease, but no option 245 should return None.""" populate_dir( self.lease_d, - {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")}) + {"9": self.azure_lease.replace("OPTION_245", "OPTION_999")}, + ) self.assertIsNone( - wa_shim._networkd_get_value_from_leases(self.lease_d)) + wa_shim._networkd_get_value_from_leases(self.lease_d) + ) def test_multiple_returns_first(self): """Somewhat arbitrarily return the first address when multiple. @@ -1432,10 +1596,14 @@ class TestExtractIpAddressFromNetworkd(CiTestCase): myval = "624c3601" populate_dir( self.lease_d, - {'9': self.azure_lease, - '2': self.azure_lease.replace("624c3620", myval)}) + { + "9": self.azure_lease, + "2": self.azure_lease.replace("624c3620", myval), + }, + ) self.assertEqual( - myval, wa_shim._networkd_get_value_from_leases(self.lease_d)) + myval, wa_shim._networkd_get_value_from_leases(self.lease_d) + ) # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py index 2eae16ee..a2f26245 100644 --- a/tests/unittests/sources/test_cloudsigma.py +++ b/tests/unittests/sources/test_cloudsigma.py @@ -2,12 +2,9 @@ import copy +from cloudinit import distros, helpers, sources from cloudinit.cs_utils import Cepko -from cloudinit import distros -from cloudinit import helpers -from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma - from tests.unittests import helpers as test_helpers SERVER_CONTEXT = { @@ -28,10 +25,10 @@ SERVER_CONTEXT = { "vendor_data": { "location": "zrh", "cloudinit": "#cloud-config\n\n...", - } + }, } -DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma' +DS_PATH = "cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma" class CepkoMock(Cepko): @@ -45,41 +42,48 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() - self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) - self.add_patch(DS_PATH + '.is_running_in_cloudsigma', - "m_is_container", return_value=True) + self.paths = helpers.Paths({"run_dir": self.tmp_dir()}) + self.add_patch( + DS_PATH + ".is_running_in_cloudsigma", + "m_is_container", + return_value=True, + ) distro_cls = distros.fetch("ubuntu") distro = distro_cls("ubuntu", cfg={}, paths=self.paths) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - sys_cfg={}, distro=distro, paths=self.paths) + sys_cfg={}, distro=distro, paths=self.paths + ) self.datasource.cepko = CepkoMock(SERVER_CONTEXT) def test_get_hostname(self): self.datasource.get_data() self.assertEqual("test_server", self.datasource.get_hostname()) - self.datasource.metadata['name'] = '' + self.datasource.metadata["name"] = "" self.assertEqual("65b2fb23", self.datasource.get_hostname()) - utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8') - self.datasource.metadata['name'] = utf8_hostname + utf8_hostname = b"\xd1\x82\xd0\xb5\xd1\x81\xd1\x82".decode("utf-8") + self.datasource.metadata["name"] = utf8_hostname self.assertEqual("65b2fb23", self.datasource.get_hostname()) def test_get_public_ssh_keys(self): self.datasource.get_data() - self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']], - self.datasource.get_public_ssh_keys()) + self.assertEqual( + [SERVER_CONTEXT["meta"]["ssh_public_key"]], + self.datasource.get_public_ssh_keys(), + ) def test_get_instance_id(self): self.datasource.get_data() - self.assertEqual(SERVER_CONTEXT['uuid'], - self.datasource.get_instance_id()) + self.assertEqual( + SERVER_CONTEXT["uuid"], self.datasource.get_instance_id() + ) def test_platform(self): """All platform-related attributes are set.""" self.datasource.get_data() - self.assertEqual(self.datasource.cloud_name, 'cloudsigma') - self.assertEqual(self.datasource.platform_type, 'cloudsigma') - self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') + self.assertEqual(self.datasource.cloud_name, "cloudsigma") + self.assertEqual(self.datasource.platform_type, "cloudsigma") + self.assertEqual(self.datasource.subplatform, "cepko (/dev/ttyS1)") def test_metadata(self): self.datasource.get_data() @@ -87,22 +91,26 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def test_user_data(self): self.datasource.get_data() - self.assertEqual(self.datasource.userdata_raw, - SERVER_CONTEXT['meta']['cloudinit-user-data']) + self.assertEqual( + self.datasource.userdata_raw, + SERVER_CONTEXT["meta"]["cloudinit-user-data"], + ) def test_encoded_user_data(self): encoded_context = copy.deepcopy(SERVER_CONTEXT) - encoded_context['meta']['base64_fields'] = 'cloudinit-user-data' - encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK' + encoded_context["meta"]["base64_fields"] = "cloudinit-user-data" + encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK" self.datasource.cepko = CepkoMock(encoded_context) self.datasource.get_data() - self.assertEqual(self.datasource.userdata_raw, b'hi world\n') + self.assertEqual(self.datasource.userdata_raw, b"hi world\n") def test_vendor_data(self): self.datasource.get_data() - self.assertEqual(self.datasource.vendordata_raw, - SERVER_CONTEXT['vendor_data']['cloudinit']) + self.assertEqual( + self.datasource.vendordata_raw, + SERVER_CONTEXT["vendor_data"]["cloudinit"], + ) def test_lack_of_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) @@ -125,13 +133,13 @@ class DsLoads(test_helpers.TestCase): def test_get_datasource_list_returns_in_local(self): deps = (sources.DEP_FILESYSTEM,) ds_list = DataSourceCloudSigma.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceCloudSigma.DataSourceCloudSigma]) + self.assertEqual(ds_list, [DataSourceCloudSigma.DataSourceCloudSigma]) def test_list_sources_finds_ds(self): found = sources.list_sources( - ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources']) - self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], - found) + ["CloudSigma"], (sources.DEP_FILESYSTEM,), ["cloudinit.sources"] + ) + self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py index 2b1a1b70..f7c69f91 100644 --- a/tests/unittests/sources/test_cloudstack.py +++ b/tests/unittests/sources/test_cloudstack.py @@ -1,80 +1,90 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import helpers -from cloudinit import util -from cloudinit.sources.DataSourceCloudStack import ( - DataSourceCloudStack, get_latest_lease) - -from tests.unittests.helpers import CiTestCase, ExitStack, mock - import os import time -MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' -DS_PATH = MOD_PATH + '.DataSourceCloudStack' +from cloudinit import helpers, util +from cloudinit.sources.DataSourceCloudStack import ( + DataSourceCloudStack, + get_latest_lease, +) +from tests.unittests.helpers import CiTestCase, ExitStack, mock + +MOD_PATH = "cloudinit.sources.DataSourceCloudStack" +DS_PATH = MOD_PATH + ".DataSourceCloudStack" class TestCloudStackPasswordFetching(CiTestCase): - def setUp(self): super(TestCloudStackPasswordFetching, self).setUp() self.patches = ExitStack() self.addCleanup(self.patches.close) mod_name = MOD_PATH - self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) - self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) + self.patches.enter_context(mock.patch("{0}.ec2".format(mod_name))) + self.patches.enter_context(mock.patch("{0}.uhelp".format(mod_name))) default_gw = "192.201.20.0" get_latest_lease = mock.MagicMock(return_value=None) - self.patches.enter_context(mock.patch( - mod_name + '.get_latest_lease', get_latest_lease)) + self.patches.enter_context( + mock.patch(mod_name + ".get_latest_lease", get_latest_lease) + ) get_default_gw = mock.MagicMock(return_value=default_gw) - self.patches.enter_context(mock.patch( - mod_name + '.get_default_gateway', get_default_gw)) + self.patches.enter_context( + mock.patch(mod_name + ".get_default_gateway", get_default_gw) + ) get_networkd_server_address = mock.MagicMock(return_value=None) - self.patches.enter_context(mock.patch( - mod_name + '.dhcp.networkd_get_option_from_leases', - get_networkd_server_address)) + self.patches.enter_context( + mock.patch( + mod_name + ".dhcp.networkd_get_option_from_leases", + get_networkd_server_address, + ) + ) self.tmp = self.tmp_dir() def _set_password_server_response(self, response_string): - subp = mock.MagicMock(return_value=(response_string, '')) + subp = mock.MagicMock(return_value=(response_string, "")) self.patches.enter_context( - mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp', - subp)) + mock.patch( + "cloudinit.sources.DataSourceCloudStack.subp.subp", subp + ) + ) return subp def test_empty_password_doesnt_create_config(self): - self._set_password_server_response('') + self._set_password_server_response("") ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) + {}, None, helpers.Paths({"run_dir": self.tmp}) + ) ds.get_data() self.assertEqual({}, ds.get_config_obj()) def test_saved_password_doesnt_create_config(self): - self._set_password_server_response('saved_password') + self._set_password_server_response("saved_password") ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) + {}, None, helpers.Paths({"run_dir": self.tmp}) + ) ds.get_data() self.assertEqual({}, ds.get_config_obj()) - @mock.patch(DS_PATH + '.wait_for_metadata_service') + @mock.patch(DS_PATH + ".wait_for_metadata_service") def test_password_sets_password(self, m_wait): m_wait.return_value = True - password = 'SekritSquirrel' + password = "SekritSquirrel" self._set_password_server_response(password) ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) + {}, None, helpers.Paths({"run_dir": self.tmp}) + ) ds.get_data() - self.assertEqual(password, ds.get_config_obj()['password']) + self.assertEqual(password, ds.get_config_obj()["password"]) - @mock.patch(DS_PATH + '.wait_for_metadata_service') + @mock.patch(DS_PATH + ".wait_for_metadata_service") def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): m_wait.return_value = True - self._set_password_server_response('bad_request') + self._set_password_server_response("bad_request") ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) + {}, None, helpers.Paths({"run_dir": self.tmp}) + ) self.assertTrue(ds.get_data()) def assertRequestTypesSent(self, subp, expected_request_types): @@ -82,42 +92,44 @@ class TestCloudStackPasswordFetching(CiTestCase): for call in subp.call_args_list: args = call[0][0] for arg in args: - if arg.startswith('DomU_Request'): + if arg.startswith("DomU_Request"): request_types.append(arg.split()[1]) self.assertEqual(expected_request_types, request_types) - @mock.patch(DS_PATH + '.wait_for_metadata_service') + @mock.patch(DS_PATH + ".wait_for_metadata_service") def test_valid_response_means_password_marked_as_saved(self, m_wait): m_wait.return_value = True - password = 'SekritSquirrel' + password = "SekritSquirrel" subp = self._set_password_server_response(password) ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) + {}, None, helpers.Paths({"run_dir": self.tmp}) + ) ds.get_data() - self.assertRequestTypesSent(subp, - ['send_my_password', 'saved_password']) + self.assertRequestTypesSent( + subp, ["send_my_password", "saved_password"] + ) def _check_password_not_saved_for(self, response_string): subp = self._set_password_server_response(response_string) ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: + {}, None, helpers.Paths({"run_dir": self.tmp}) + ) + with mock.patch(DS_PATH + ".wait_for_metadata_service") as m_wait: m_wait.return_value = True ds.get_data() - self.assertRequestTypesSent(subp, ['send_my_password']) + self.assertRequestTypesSent(subp, ["send_my_password"]) def test_password_not_saved_if_empty(self): - self._check_password_not_saved_for('') + self._check_password_not_saved_for("") def test_password_not_saved_if_already_saved(self): - self._check_password_not_saved_for('saved_password') + self._check_password_not_saved_for("saved_password") def test_password_not_saved_if_bad_request(self): - self._check_password_not_saved_for('bad_request') + self._check_password_not_saved_for("bad_request") class TestGetLatestLease(CiTestCase): - def _populate_dir_list(self, bdir, files): """populate_dir_list([(name, data), (name, data)]) @@ -133,8 +145,9 @@ class TestGetLatestLease(CiTestCase): def _pop_and_test(self, files, expected): lease_d = self.tmp_dir() self._populate_dir_list(lease_d, files) - self.assertEqual(self.tmp_path(expected, lease_d), - get_latest_lease(lease_d)) + self.assertEqual( + self.tmp_path(expected, lease_d), get_latest_lease(lease_d) + ) def test_skips_dhcpv6_files(self): """files started with dhclient6 should be skipped.""" @@ -161,9 +174,15 @@ class TestGetLatestLease(CiTestCase): def test_ignores_by_extension(self): """only .lease or .leases file should be considered.""" - self._pop_and_test(["dhclient.lease", "dhclient.lease.bk", - "dhclient.lease-old", "dhclient.leaselease"], - "dhclient.lease") + self._pop_and_test( + [ + "dhclient.lease", + "dhclient.lease.bk", + "dhclient.lease-old", + "dhclient.leaselease", + ], + "dhclient.lease", + ) def test_selects_newest_matching(self): """If multiple files match, the newest written should be used.""" diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py index bb8fa530..a5bdb629 100644 --- a/tests/unittests/sources/test_common.py +++ b/tests/unittests/sources/test_common.py @@ -1,39 +1,34 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import settings -from cloudinit import sources -from cloudinit import type_utils -from cloudinit.sources import ( - DataSource, - DataSourceAliYun as AliYun, - DataSourceAltCloud as AltCloud, - DataSourceAzure as Azure, - DataSourceBigstep as Bigstep, - DataSourceCloudSigma as CloudSigma, - DataSourceCloudStack as CloudStack, - DataSourceConfigDrive as ConfigDrive, - DataSourceDigitalOcean as DigitalOcean, - DataSourceEc2 as Ec2, - DataSourceExoscale as Exoscale, - DataSourceGCE as GCE, - DataSourceHetzner as Hetzner, - DataSourceIBMCloud as IBMCloud, - DataSourceLXD as LXD, - DataSourceMAAS as MAAS, - DataSourceNoCloud as NoCloud, - DataSourceOpenNebula as OpenNebula, - DataSourceOpenStack as OpenStack, - DataSourceOracle as Oracle, - DataSourceOVF as OVF, - DataSourceRbxCloud as RbxCloud, - DataSourceScaleway as Scaleway, - DataSourceSmartOS as SmartOS, - DataSourceUpCloud as UpCloud, - DataSourceVultr as Vultr, - DataSourceVMware as VMware, -) +from cloudinit import settings, sources, type_utils +from cloudinit.sources import DataSource +from cloudinit.sources import DataSourceAliYun as AliYun +from cloudinit.sources import DataSourceAltCloud as AltCloud +from cloudinit.sources import DataSourceAzure as Azure +from cloudinit.sources import DataSourceBigstep as Bigstep +from cloudinit.sources import DataSourceCloudSigma as CloudSigma +from cloudinit.sources import DataSourceCloudStack as CloudStack +from cloudinit.sources import DataSourceConfigDrive as ConfigDrive +from cloudinit.sources import DataSourceDigitalOcean as DigitalOcean +from cloudinit.sources import DataSourceEc2 as Ec2 +from cloudinit.sources import DataSourceExoscale as Exoscale +from cloudinit.sources import DataSourceGCE as GCE +from cloudinit.sources import DataSourceHetzner as Hetzner +from cloudinit.sources import DataSourceIBMCloud as IBMCloud +from cloudinit.sources import DataSourceLXD as LXD +from cloudinit.sources import DataSourceMAAS as MAAS +from cloudinit.sources import DataSourceNoCloud as NoCloud from cloudinit.sources import DataSourceNone as DSNone - +from cloudinit.sources import DataSourceOpenNebula as OpenNebula +from cloudinit.sources import DataSourceOpenStack as OpenStack +from cloudinit.sources import DataSourceOracle as Oracle +from cloudinit.sources import DataSourceOVF as OVF +from cloudinit.sources import DataSourceRbxCloud as RbxCloud +from cloudinit.sources import DataSourceScaleway as Scaleway +from cloudinit.sources import DataSourceSmartOS as SmartOS +from cloudinit.sources import DataSourceUpCloud as UpCloud +from cloudinit.sources import DataSourceVMware as VMware +from cloudinit.sources import DataSourceVultr as Vultr from tests.unittests import helpers as test_helpers DEFAULT_LOCAL = [ @@ -78,24 +73,27 @@ DEFAULT_NETWORK = [ class ExpectedDataSources(test_helpers.TestCase): - builtin_list = settings.CFG_BUILTIN['datasource_list'] + builtin_list = settings.CFG_BUILTIN["datasource_list"] deps_local = [sources.DEP_FILESYSTEM] deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] pkg_list = [type_utils.obj_name(sources)] def test_expected_default_local_sources_found(self): found = sources.list_sources( - self.builtin_list, self.deps_local, self.pkg_list) + self.builtin_list, self.deps_local, self.pkg_list + ) self.assertEqual(set(DEFAULT_LOCAL), set(found)) def test_expected_default_network_sources_found(self): found = sources.list_sources( - self.builtin_list, self.deps_network, self.pkg_list) + self.builtin_list, self.deps_network, self.pkg_list + ) self.assertEqual(set(DEFAULT_NETWORK), set(found)) def test_expected_nondefault_network_sources_found(self): found = sources.list_sources( - ['AliYun'], self.deps_network, self.pkg_list) + ["AliYun"], self.deps_network, self.pkg_list + ) self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) @@ -103,19 +101,23 @@ class TestDataSourceInvariants(test_helpers.TestCase): def test_data_sources_have_valid_network_config_sources(self): for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: for cfg_src in ds.network_config_sources: - fail_msg = ('{} has an invalid network_config_sources entry:' - ' {}'.format(str(ds), cfg_src)) - self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), - fail_msg) + fail_msg = ( + "{} has an invalid network_config_sources entry:" + " {}".format(str(ds), cfg_src) + ) + self.assertTrue( + hasattr(sources.NetworkConfigSource, cfg_src), fail_msg + ) def test_expected_dsname_defined(self): for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: fail_msg = ( - '{} has an invalid / missing dsname property: {}'.format( + "{} has an invalid / missing dsname property: {}".format( str(ds), str(ds.dsname) ) ) self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg) self.assertIsNotNone(ds.dsname) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py index 775d0622..1fc40a0e 100644 --- a/tests/unittests/sources/test_configdrive.py +++ b/tests/unittests/sources/test_configdrive.py @@ -1,139 +1,229 @@ # This file is part of cloud-init. See LICENSE file for license information. -from copy import copy, deepcopy import json import os +from copy import copy, deepcopy -from cloudinit import helpers -from cloudinit.net import eni -from cloudinit.net import network_state -from cloudinit import settings +from cloudinit import helpers, settings, util +from cloudinit.net import eni, network_state from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit.sources.helpers import openstack -from cloudinit import util - from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir - -PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n" EC2_META = { - 'ami-id': 'ami-00000001', - 'ami-launch-index': 0, - 'ami-manifest-path': 'FIXME', - 'block-device-mapping': { - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': '/dev/sda1', - 'swap': 'sda3'}, - 'hostname': 'sm-foo-test.novalocal', - 'instance-action': 'none', - 'instance-id': 'i-00000001', - 'instance-type': 'm1.tiny', - 'local-hostname': 'sm-foo-test.novalocal', - 'local-ipv4': None, - 'placement': {'availability-zone': 'nova'}, - 'public-hostname': 'sm-foo-test.novalocal', - 'public-ipv4': '', - 'public-keys': {'0': {'openssh-key': PUBKEY}}, - 'reservation-id': 'r-iru5qm4m', - 'security-groups': ['default'] + "ami-id": "ami-00000001", + "ami-launch-index": 0, + "ami-manifest-path": "FIXME", + "block-device-mapping": { + "ami": "sda1", + "ephemeral0": "sda2", + "root": "/dev/sda1", + "swap": "sda3", + }, + "hostname": "sm-foo-test.novalocal", + "instance-action": "none", + "instance-id": "i-00000001", + "instance-type": "m1.tiny", + "local-hostname": "sm-foo-test.novalocal", + "local-ipv4": None, + "placement": {"availability-zone": "nova"}, + "public-hostname": "sm-foo-test.novalocal", + "public-ipv4": "", + "public-keys": {"0": {"openssh-key": PUBKEY}}, + "reservation-id": "r-iru5qm4m", + "security-groups": ["default"], } -USER_DATA = b'#!/bin/sh\necho This is user data\n' +USER_DATA = b"#!/bin/sh\necho This is user data\n" OSTACK_META = { - 'availability_zone': 'nova', - 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, - {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], - 'hostname': 'sm-foo-test.novalocal', - 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} - -CONTENT_0 = b'This is contents of /etc/foo.cfg\n' -CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' + "availability_zone": "nova", + "files": [ + {"content_path": "/content/0000", "path": "/etc/foo.cfg"}, + {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"}, + ], + "hostname": "sm-foo-test.novalocal", + "meta": {"dsmode": "local", "my-meta": "my-value"}, + "name": "sm-foo-test", + "public_keys": {"mykey": PUBKEY}, + "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c", +} + +CONTENT_0 = b"This is contents of /etc/foo.cfg\n" +CONTENT_1 = b"# this is /etc/bar/bar.cfg\n" NETWORK_DATA = { - 'services': [ - {'type': 'dns', 'address': '199.204.44.24'}, - {'type': 'dns', 'address': '199.204.47.54'} + "services": [ + {"type": "dns", "address": "199.204.44.24"}, + {"type": "dns", "address": "199.204.47.54"}, + ], + "links": [ + { + "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd", + "ethernet_mac_address": "fa:16:3e:69:b0:58", + "type": "ovs", + "mtu": None, + "id": "tap2ecc7709-b3", + }, + { + "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33", + "ethernet_mac_address": "fa:16:3e:d4:57:ad", + "type": "ovs", + "mtu": None, + "id": "tap2f88d109-5b", + }, + { + "vif_id": "1a5382f8-04c5-4d75-ab98-d666c1ef52cc", + "ethernet_mac_address": "fa:16:3e:05:30:fe", + "type": "ovs", + "mtu": None, + "id": "tap1a5382f8-04", + "name": "nic0", + }, ], - 'links': [ - {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', - 'ethernet_mac_address': 'fa:16:3e:69:b0:58', - 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, - {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', - 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', - 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, - {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc', - 'ethernet_mac_address': 'fa:16:3e:05:30:fe', - 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'} + "networks": [ + { + "link": "tap2ecc7709-b3", + "type": "ipv4_dhcp", + "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235", + "id": "network0", + }, + { + "link": "tap2f88d109-5b", + "type": "ipv4_dhcp", + "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54", + "id": "network1", + }, + { + "link": "tap1a5382f8-04", + "type": "ipv4_dhcp", + "network_id": "dab2ba57-cae2-4311-a5ed-010b263891f5", + "id": "network2", + }, ], - 'networks': [ - {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp', - 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', - 'id': 'network0'}, - {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp', - 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', - 'id': 'network1'}, - {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp', - 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5', - 'id': 'network2'} - ] } NETWORK_DATA_2 = { "services": [ {"type": "dns", "address": "1.1.1.191"}, - {"type": "dns", "address": "1.1.1.4"}], + {"type": "dns", "address": "1.1.1.4"}, + ], "networks": [ - {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4", - "netmask": "255.255.255.248", "link": "eth0", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "2.2.2.9"}], - "ip_address": "2.2.2.10", "id": "network0-ipv4"}, - {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4", - "netmask": "255.255.255.224", "link": "eth1", - "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}], + { + "network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", + "type": "ipv4", + "netmask": "255.255.255.248", + "link": "eth0", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "2.2.2.9", + } + ], + "ip_address": "2.2.2.10", + "id": "network0-ipv4", + }, + { + "network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", + "type": "ipv4", + "netmask": "255.255.255.224", + "link": "eth1", + "routes": [], + "ip_address": "3.3.3.24", + "id": "network1-ipv4", + }, + ], "links": [ - {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500, - "type": "vif", "id": "eth0", "vif_id": "vif-foo1"}, - {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500, - "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}] + { + "ethernet_mac_address": "fa:16:3e:dd:50:9a", + "mtu": 1500, + "type": "vif", + "id": "eth0", + "vif_id": "vif-foo1", + }, + { + "ethernet_mac_address": "fa:16:3e:a8:14:69", + "mtu": 1500, + "type": "vif", + "id": "eth1", + "vif_id": "vif-foo2", + }, + ], } # This network data ha 'tap' or null type for a link. NETWORK_DATA_3 = { - "services": [{"type": "dns", "address": "172.16.36.11"}, - {"type": "dns", "address": "172.16.36.12"}], + "services": [ + {"type": "dns", "address": "172.16.36.11"}, + {"type": "dns", "address": "172.16.36.12"}, + ], "networks": [ - {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", - "type": "ipv4", "netmask": "255.255.255.128", - "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18", - "id": "network0", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "172.17.48.1"}]}, - {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", - "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::", - "link": "tap77a0dc5b-72", - "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", - "id": "network1", - "routes": [{"netmask": "::", "network": "::", - "gateway": "fdb8:52d0:9d14::1"}]}, - {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", - "type": "ipv4", "netmask": "255.255.255.128", - "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13", - "id": "network2", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "172.16.48.1"}, - {"netmask": "255.255.0.0", "network": "172.16.0.0", - "gateway": "172.16.48.1"}]}], + { + "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv4", + "netmask": "255.255.255.128", + "link": "tap77a0dc5b-72", + "ip_address": "172.17.48.18", + "id": "network0", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.17.48.1", + } + ], + }, + { + "network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv6", + "netmask": "ffff:ffff:ffff:ffff::", + "link": "tap77a0dc5b-72", + "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", + "id": "network1", + "routes": [ + { + "netmask": "::", + "network": "::", + "gateway": "fdb8:52d0:9d14::1", + } + ], + }, + { + "network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", + "type": "ipv4", + "netmask": "255.255.255.128", + "link": "tap7d6b7bec-93", + "ip_address": "172.16.48.13", + "id": "network2", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.16.48.1", + }, + { + "netmask": "255.255.0.0", + "network": "172.16.0.0", + "gateway": "172.16.48.1", + }, + ], + }, + ], "links": [ - {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None, - "type": "tap", "id": "tap77a0dc5b-72", - "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"}, - {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None, - "type": None, "id": "tap7d6b7bec-93", - "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"} - ] + { + "ethernet_mac_address": "fa:16:3e:dd:50:9a", + "mtu": None, + "type": "tap", + "id": "tap77a0dc5b-72", + "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48", + }, + { + "ethernet_mac_address": "fa:16:3e:a8:14:69", + "mtu": None, + "type": None, + "id": "tap7d6b7bec-93", + "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5", + }, + ], } BOND_MAC = "fa:16:3e:b3:72:36" @@ -143,122 +233,182 @@ NETWORK_DATA_BOND = { {"type": "dns", "address": "1.1.1.4"}, ], "networks": [ - {"id": "network2-ipv4", "ip_address": "2.2.2.13", - "link": "vlan2", "netmask": "255.255.255.248", - "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", - "type": "ipv4", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "2.2.2.9"}]}, - {"id": "network3-ipv4", "ip_address": "10.0.1.5", - "link": "vlan3", "netmask": "255.255.255.248", - "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", - "type": "ipv4", - "routes": [{"netmask": "255.255.255.255", - "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + { + "id": "network2-ipv4", + "ip_address": "2.2.2.13", + "link": "vlan2", + "netmask": "255.255.255.248", + "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", + "type": "ipv4", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "2.2.2.9", + } + ], + }, + { + "id": "network3-ipv4", + "ip_address": "10.0.1.5", + "link": "vlan3", + "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [ + { + "netmask": "255.255.255.255", + "network": "192.168.1.0", + "gateway": "10.0.1.1", + } + ], + }, ], "links": [ - {"ethernet_mac_address": "0c:c4:7a:34:6e:3c", - "id": "eth0", "mtu": 1500, "type": "phy"}, - {"ethernet_mac_address": "0c:c4:7a:34:6e:3d", - "id": "eth1", "mtu": 1500, "type": "phy"}, - {"bond_links": ["eth0", "eth1"], - "bond_miimon": 100, "bond_mode": "4", - "bond_xmit_hash_policy": "layer3+4", - "ethernet_mac_address": BOND_MAC, - "id": "bond0", "type": "bond"}, - {"ethernet_mac_address": "fa:16:3e:b3:72:30", - "id": "vlan2", "type": "vlan", "vlan_id": 602, - "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, - {"ethernet_mac_address": "fa:16:3e:66:ab:a6", - "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0", - "vlan_mac_address": "fa:16:3e:66:ab:a6"} - ] + { + "ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "id": "eth0", + "mtu": 1500, + "type": "phy", + }, + { + "ethernet_mac_address": "0c:c4:7a:34:6e:3d", + "id": "eth1", + "mtu": 1500, + "type": "phy", + }, + { + "bond_links": ["eth0", "eth1"], + "bond_miimon": 100, + "bond_mode": "4", + "bond_xmit_hash_policy": "layer3+4", + "ethernet_mac_address": BOND_MAC, + "id": "bond0", + "type": "bond", + }, + { + "ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan2", + "type": "vlan", + "vlan_id": 602, + "vlan_link": "bond0", + "vlan_mac_address": "fa:16:3e:b3:72:30", + }, + { + "ethernet_mac_address": "fa:16:3e:66:ab:a6", + "id": "vlan3", + "type": "vlan", + "vlan_id": 612, + "vlan_link": "bond0", + "vlan_mac_address": "fa:16:3e:66:ab:a6", + }, + ], } NETWORK_DATA_VLAN = { "services": [{"type": "dns", "address": "1.1.1.191"}], "networks": [ - {"id": "network1-ipv4", "ip_address": "10.0.1.5", - "link": "vlan1", "netmask": "255.255.255.248", - "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", - "type": "ipv4", - "routes": [{"netmask": "255.255.255.255", - "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + { + "id": "network1-ipv4", + "ip_address": "10.0.1.5", + "link": "vlan1", + "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [ + { + "netmask": "255.255.255.255", + "network": "192.168.1.0", + "gateway": "10.0.1.1", + } + ], + } ], "links": [ - {"ethernet_mac_address": "fa:16:3e:69:b0:58", - "id": "eth0", "mtu": 1500, "type": "phy"}, - {"ethernet_mac_address": "fa:16:3e:b3:72:30", - "id": "vlan1", "type": "vlan", "vlan_id": 602, - "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, - ] + { + "ethernet_mac_address": "fa:16:3e:69:b0:58", + "id": "eth0", + "mtu": 1500, + "type": "phy", + }, + { + "ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan1", + "type": "vlan", + "vlan_id": 602, + "vlan_link": "eth0", + "vlan_mac_address": "fa:16:3e:b3:72:30", + }, + ], } KNOWN_MACS = { - 'fa:16:3e:69:b0:58': 'enp0s1', - 'fa:16:3e:d4:57:ad': 'enp0s2', - 'fa:16:3e:dd:50:9a': 'foo1', - 'fa:16:3e:a8:14:69': 'foo2', - 'fa:16:3e:ed:9a:59': 'foo3', - '0c:c4:7a:34:6e:3d': 'oeth1', - '0c:c4:7a:34:6e:3c': 'oeth0', + "fa:16:3e:69:b0:58": "enp0s1", + "fa:16:3e:d4:57:ad": "enp0s2", + "fa:16:3e:dd:50:9a": "foo1", + "fa:16:3e:a8:14:69": "foo2", + "fa:16:3e:ed:9a:59": "foo3", + "0c:c4:7a:34:6e:3d": "oeth1", + "0c:c4:7a:34:6e:3c": "oeth0", } CFG_DRIVE_FILES_V2 = { - 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), - 'ec2/2009-04-04/user-data': USER_DATA, - 'ec2/latest/meta-data.json': json.dumps(EC2_META), - 'ec2/latest/user-data': USER_DATA, - 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META), - 'openstack/2012-08-10/user_data': USER_DATA, - 'openstack/content/0000': CONTENT_0, - 'openstack/content/0001': CONTENT_1, - 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), - 'openstack/latest/user_data': USER_DATA, - 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA), - 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META), - 'openstack/2015-10-15/user_data': USER_DATA, - 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} + "ec2/2009-04-04/meta-data.json": json.dumps(EC2_META), + "ec2/2009-04-04/user-data": USER_DATA, + "ec2/latest/meta-data.json": json.dumps(EC2_META), + "ec2/latest/user-data": USER_DATA, + "openstack/2012-08-10/meta_data.json": json.dumps(OSTACK_META), + "openstack/2012-08-10/user_data": USER_DATA, + "openstack/content/0000": CONTENT_0, + "openstack/content/0001": CONTENT_1, + "openstack/latest/meta_data.json": json.dumps(OSTACK_META), + "openstack/latest/user_data": USER_DATA, + "openstack/latest/network_data.json": json.dumps(NETWORK_DATA), + "openstack/2015-10-15/meta_data.json": json.dumps(OSTACK_META), + "openstack/2015-10-15/user_data": USER_DATA, + "openstack/2015-10-15/network_data.json": json.dumps(NETWORK_DATA), +} M_PATH = "cloudinit.sources.DataSourceConfigDrive." class TestConfigDriveDataSource(CiTestCase): - def setUp(self): super(TestConfigDriveDataSource, self).setUp() self.add_patch( - M_PATH + "util.find_devs_with", - "m_find_devs_with", return_value=[]) + M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[] + ) self.tmp = self.tmp_dir() def test_ec2_metadata(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) found = ds.read_config_drive(self.tmp) - self.assertTrue('ec2-metadata' in found) - ec2_md = found['ec2-metadata'] + self.assertTrue("ec2-metadata" in found) + ec2_md = found["ec2-metadata"] self.assertEqual(EC2_META, ec2_md) def test_dev_os_remap(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) found = ds.read_config_drive(self.tmp) - cfg_ds.metadata = found['metadata'] + cfg_ds.metadata = found["metadata"] name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', + "ami": "/dev/vda1", + "root": "/dev/vda1", + "ephemeral0": "/dev/vda2", + "swap": "/dev/vda3", } for name, dev_name in name_tests.items(): with ExitStack() as mocks: - provided_name = dev_name[len('/dev/'):] + provided_name = dev_name[len("/dev/") :] provided_name = "s" + provided_name[1:] find_mock = mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - return_value=[provided_name])) + mock.patch.object( + util, "find_devs_with", return_value=[provided_name] + ) + ) # We want os.path.exists() to return False on its first call, # and True on its second call. We use a handy generator as # the mock side effect for this. The mocked function returns @@ -267,9 +417,12 @@ class TestConfigDriveDataSource(CiTestCase): def exists_side_effect(): yield False yield True + exists_mock = mocks.enter_context( - mock.patch.object(os.path, 'exists', - side_effect=exists_side_effect())) + mock.patch.object( + os.path, "exists", side_effect=exists_side_effect() + ) + ) self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) @@ -277,26 +430,28 @@ class TestConfigDriveDataSource(CiTestCase): def test_dev_os_map(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) found = ds.read_config_drive(self.tmp) - os_md = found['metadata'] + os_md = found["metadata"] cfg_ds.metadata = os_md name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', + "ami": "/dev/vda1", + "root": "/dev/vda1", + "ephemeral0": "/dev/vda2", + "swap": "/dev/vda3", } for name, dev_name in name_tests.items(): with ExitStack() as mocks: find_mock = mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - return_value=[dev_name])) + mock.patch.object( + util, "find_devs_with", return_value=[dev_name] + ) + ) exists_mock = mocks.enter_context( - mock.patch.object(os.path, 'exists', - return_value=True)) + mock.patch.object(os.path, "exists", return_value=True) + ) self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) @@ -304,22 +459,22 @@ class TestConfigDriveDataSource(CiTestCase): def test_dev_ec2_remap(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) found = ds.read_config_drive(self.tmp) - ec2_md = found['ec2-metadata'] - os_md = found['metadata'] + ec2_md = found["ec2-metadata"] + os_md = found["metadata"] cfg_ds.ec2_metadata = ec2_md cfg_ds.metadata = os_md name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', + "ami": "/dev/vda1", + "root": "/dev/vda1", + "ephemeral0": "/dev/vda2", + "swap": "/dev/vda3", None: None, - 'bob': None, - 'root2k': None, + "bob": None, + "root2k": None, } for name, dev_name in name_tests.items(): # We want os.path.exists() to return False on its first call, @@ -329,8 +484,10 @@ class TestConfigDriveDataSource(CiTestCase): def exists_side_effect(): yield False yield True - with mock.patch.object(os.path, 'exists', - side_effect=exists_side_effect()): + + with mock.patch.object( + os.path, "exists", side_effect=exists_side_effect() + ): self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) # We don't assert the call count for os.path.exists() because # not all of the entries in name_tests results in two calls to @@ -339,25 +496,25 @@ class TestConfigDriveDataSource(CiTestCase): def test_dev_ec2_map(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) found = ds.read_config_drive(self.tmp) - ec2_md = found['ec2-metadata'] - os_md = found['metadata'] + ec2_md = found["ec2-metadata"] + os_md = found["metadata"] cfg_ds.ec2_metadata = ec2_md cfg_ds.metadata = os_md name_tests = { - 'ami': '/dev/sda1', - 'root': '/dev/sda1', - 'ephemeral0': '/dev/sda2', - 'swap': '/dev/sda3', + "ami": "/dev/sda1", + "root": "/dev/sda1", + "ephemeral0": "/dev/sda2", + "swap": "/dev/sda3", None: None, - 'bob': None, - 'root2k': None, + "bob": None, + "root2k": None, } for name, dev_name in name_tests.items(): - with mock.patch.object(os.path, 'exists', return_value=True): + with mock.patch.object(os.path, "exists", return_value=True): self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) def test_dir_valid(self): @@ -368,14 +525,14 @@ class TestConfigDriveDataSource(CiTestCase): found = ds.read_config_drive(self.tmp) expected_md = copy(OSTACK_META) - expected_md['instance-id'] = expected_md['uuid'] - expected_md['local-hostname'] = expected_md['hostname'] + expected_md["instance-id"] = expected_md["uuid"] + expected_md["local-hostname"] = expected_md["hostname"] - self.assertEqual(USER_DATA, found['userdata']) - self.assertEqual(expected_md, found['metadata']) - self.assertEqual(NETWORK_DATA, found['networkdata']) - self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0) - self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1) + self.assertEqual(USER_DATA, found["userdata"]) + self.assertEqual(expected_md, found["metadata"]) + self.assertEqual(NETWORK_DATA, found["networkdata"]) + self.assertEqual(found["files"]["/etc/foo.cfg"], CONTENT_0) + self.assertEqual(found["files"]["/etc/bar/bar.cfg"], CONTENT_1) def test_seed_dir_valid_extra(self): """Verify extra files do not affect datasource validity.""" @@ -389,10 +546,10 @@ class TestConfigDriveDataSource(CiTestCase): found = ds.read_config_drive(self.tmp) expected_md = copy(OSTACK_META) - expected_md['instance-id'] = expected_md['uuid'] - expected_md['local-hostname'] = expected_md['hostname'] + expected_md["instance-id"] = expected_md["uuid"] + expected_md["local-hostname"] = expected_md["hostname"] - self.assertEqual(expected_md, found['metadata']) + self.assertEqual(expected_md, found["metadata"]) def test_seed_dir_bad_json_metadata(self): """Verify that bad json in metadata raises BrokenConfigDriveDir.""" @@ -404,8 +561,9 @@ class TestConfigDriveDataSource(CiTestCase): populate_dir(self.tmp, data) - self.assertRaises(openstack.BrokenMetadata, - ds.read_config_drive, self.tmp) + self.assertRaises( + openstack.BrokenMetadata, ds.read_config_drive, self.tmp + ) def test_seed_dir_no_configdrive(self): """Verify that no metadata raises NonConfigDriveDir.""" @@ -416,20 +574,18 @@ class TestConfigDriveDataSource(CiTestCase): data["openstack/latest/random-file.txt"] = "random-content" data["content/foo"] = "foocontent" - self.assertRaises(openstack.NonReadable, - ds.read_config_drive, my_d) + self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d) def test_seed_dir_missing(self): """Verify that missing seed_dir raises NonConfigDriveDir.""" my_d = os.path.join(self.tmp, "nonexistantdirectory") - self.assertRaises(openstack.NonReadable, - ds.read_config_drive, my_d) + self.assertRaises(openstack.NonReadable, ds.read_config_drive, my_d) def test_find_candidates(self): devs_with_answers = {} def my_devs_with(*args, **kwargs): - criteria = args[0] if len(args) else kwargs.pop('criteria', None) + criteria = args[0] if len(args) else kwargs.pop("criteria", None) return devs_with_answers.get(criteria, []) def my_is_partition(dev): @@ -442,60 +598,67 @@ class TestConfigDriveDataSource(CiTestCase): orig_is_partition = util.is_partition util.is_partition = my_is_partition - devs_with_answers = {"TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=config-2": ["/dev/vdb"]} + devs_with_answers = { + "TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=config-2": ["/dev/vdb"], + } self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) # add a vfat item # zdd reverse sorts after vdb, but config-2 label is preferred - devs_with_answers['TYPE=vfat'] = ["/dev/zdd"] - self.assertEqual(["/dev/vdb", "/dev/zdd"], - ds.find_candidate_devs()) + devs_with_answers["TYPE=vfat"] = ["/dev/zdd"] + self.assertEqual( + ["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs() + ) # verify that partitions are considered, that have correct label. - devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], - "TYPE=iso9660": [], - "LABEL=config-2": ["/dev/vdb3"]} - self.assertEqual(["/dev/vdb3"], - ds.find_candidate_devs()) + devs_with_answers = { + "TYPE=vfat": ["/dev/sda1"], + "TYPE=iso9660": [], + "LABEL=config-2": ["/dev/vdb3"], + } + self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs()) # Verify that uppercase labels are also found. - devs_with_answers = {"TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=CONFIG-2": ["/dev/vdb"]} + devs_with_answers = { + "TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=CONFIG-2": ["/dev/vdb"], + } self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) finally: util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition - @mock.patch(M_PATH + 'on_first_boot') + @mock.patch(M_PATH + "on_first_boot") def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - self.assertEqual(myds.get_public_ssh_keys(), - [OSTACK_META['public_keys']['mykey']]) - self.assertEqual('configdrive', myds.cloud_name) - self.assertEqual('openstack', myds.platform) - self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) + self.assertEqual( + myds.get_public_ssh_keys(), [OSTACK_META["public_keys"]["mykey"]] + ) + self.assertEqual("configdrive", myds.cloud_name) + self.assertEqual("openstack", myds.platform) + self.assertEqual("seed-dir (%s/seed)" % self.tmp, myds.subplatform) def test_subplatform_config_drive_when_starts_with_dev(self): """subplatform reports config-drive when source starts with /dev/.""" - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: - with mock.patch(M_PATH + 'util.mount_cb'): - with mock.patch(M_PATH + 'on_first_boot'): - m_find_devs.return_value = ['/dev/anything'] + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) + with mock.patch(M_PATH + "find_candidate_devs") as m_find_devs: + with mock.patch(M_PATH + "util.mount_cb"): + with mock.patch(M_PATH + "on_first_boot"): + m_find_devs.return_value = ["/dev/anything"] self.assertEqual(True, cfg_ds.get_data()) - self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + self.assertEqual("config-disk (/dev/anything)", cfg_ds.subplatform) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestNetJson(CiTestCase): def setUp(self): @@ -503,55 +666,74 @@ class TestNetJson(CiTestCase): self.tmp = self.tmp_dir() self.maxDiff = None - @mock.patch(M_PATH + 'on_first_boot') + @mock.patch(M_PATH + "on_first_boot") def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertIsNotNone(myds.network_json) - @mock.patch(M_PATH + 'on_first_boot') + @mock.patch(M_PATH + "on_first_boot") def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - network_config = openstack.convert_net_json(NETWORK_DATA, - known_macs=KNOWN_MACS) + network_config = openstack.convert_net_json( + NETWORK_DATA, known_macs=KNOWN_MACS + ) self.assertEqual(myds.network_config, network_config) def test_network_config_conversion_dhcp6(self): """Test some ipv6 input network json and check the expected - conversions.""" + conversions.""" in_data = { - 'links': [ - {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', - 'ethernet_mac_address': 'fa:16:3e:69:b0:58', - 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, - {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', - 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', - 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + "links": [ + { + "vif_id": "2ecc7709-b3f7-4448-9580-e1ec32d75bbd", + "ethernet_mac_address": "fa:16:3e:69:b0:58", + "type": "ovs", + "mtu": None, + "id": "tap2ecc7709-b3", + }, + { + "vif_id": "2f88d109-5b57-40e6-af32-2472df09dc33", + "ethernet_mac_address": "fa:16:3e:d4:57:ad", + "type": "ovs", + "mtu": None, + "id": "tap2f88d109-5b", + }, + ], + "networks": [ + { + "link": "tap2ecc7709-b3", + "type": "ipv6_dhcpv6-stateless", + "network_id": "6d6357ac-0f70-4afa-8bd7-c274cc4ea235", + "id": "network0", + }, + { + "link": "tap2f88d109-5b", + "type": "ipv6_dhcpv6-stateful", + "network_id": "d227a9b3-6960-4d94-8976-ee5788b44f54", + "id": "network1", + }, ], - 'networks': [ - {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', - 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', - 'id': 'network0'}, - {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', - 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', - 'id': 'network1'}, - ] } out_data = { - 'version': 1, - 'config': [ - {'mac_address': 'fa:16:3e:69:b0:58', - 'mtu': None, - 'name': 'enp0s1', - 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], - 'type': 'physical'}, - {'mac_address': 'fa:16:3e:d4:57:ad', - 'mtu': None, - 'name': 'enp0s2', - 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], - 'type': 'physical', - 'accept-ra': True} + "version": 1, + "config": [ + { + "mac_address": "fa:16:3e:69:b0:58", + "mtu": None, + "name": "enp0s1", + "subnets": [{"type": "ipv6_dhcpv6-stateless"}], + "type": "physical", + }, + { + "mac_address": "fa:16:3e:d4:57:ad", + "mtu": None, + "name": "enp0s2", + "subnets": [{"type": "ipv6_dhcpv6-stateful"}], + "type": "physical", + "accept-ra": True, + }, ], } conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) @@ -559,107 +741,115 @@ class TestNetJson(CiTestCase): def test_network_config_conversions(self): """Tests a bunch of input network json and checks the - expected conversions.""" + expected conversions.""" in_datas = [ NETWORK_DATA, { - 'services': [{'type': 'dns', 'address': '172.19.0.12'}], - 'networks': [{ - 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', - 'type': 'ipv4', - 'netmask': '255.255.252.0', - 'link': 'tap1a81968a-79', - 'routes': [{ - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - 'gateway': '172.19.3.254', - }], - 'ip_address': '172.19.1.34', - 'id': 'network0', - }], - 'links': [{ - 'type': 'bridge', - 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', - 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', - 'id': 'tap1a81968a-79', - 'mtu': None, - }], + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [ + { + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + } + ], + "ip_address": "172.19.1.34", + "id": "network0", + } + ], + "links": [ + { + "type": "bridge", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "id": "tap1a81968a-79", + "mtu": None, + } + ], }, ] out_datas = [ { - 'version': 1, - 'config': [ + "version": 1, + "config": [ { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:69:b0:58', - 'name': 'enp0s1', - 'mtu': None, + "subnets": [{"type": "dhcp4"}], + "type": "physical", + "mac_address": "fa:16:3e:69:b0:58", + "name": "enp0s1", + "mtu": None, }, { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:d4:57:ad', - 'name': 'enp0s2', - 'mtu': None, + "subnets": [{"type": "dhcp4"}], + "type": "physical", + "mac_address": "fa:16:3e:d4:57:ad", + "name": "enp0s2", + "mtu": None, }, { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:05:30:fe', - 'name': 'nic0', - 'mtu': None, + "subnets": [{"type": "dhcp4"}], + "type": "physical", + "mac_address": "fa:16:3e:05:30:fe", + "name": "nic0", + "mtu": None, }, { - 'type': 'nameserver', - 'address': '199.204.44.24', + "type": "nameserver", + "address": "199.204.44.24", }, { - 'type': 'nameserver', - 'address': '199.204.47.54', - } + "type": "nameserver", + "address": "199.204.47.54", + }, ], - }, { - 'version': 1, - 'config': [ + "version": 1, + "config": [ { - 'name': 'foo3', - 'mac_address': 'fa:16:3e:ed:9a:59', - 'mtu': None, - 'type': 'physical', - 'subnets': [ + "name": "foo3", + "mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, + "type": "physical", + "subnets": [ { - 'address': '172.19.1.34', - 'netmask': '255.255.252.0', - 'type': 'static', - 'ipv4': True, - 'routes': [{ - 'gateway': '172.19.3.254', - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - }], + "address": "172.19.1.34", + "netmask": "255.255.252.0", + "type": "static", + "ipv4": True, + "routes": [ + { + "gateway": "172.19.3.254", + "netmask": "0.0.0.0", + "network": "0.0.0.0", + } + ], } - ] + ], }, { - 'type': 'nameserver', - 'address': '172.19.0.12', - } + "type": "nameserver", + "address": "172.19.0.12", + }, ], }, ] for in_data, out_data in zip(in_datas, out_datas): - conv_data = openstack.convert_net_json(in_data, - known_macs=KNOWN_MACS) + conv_data = openstack.convert_net_json( + in_data, known_macs=KNOWN_MACS + ) self.assertEqual(out_data, conv_data) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestConvertNetworkData(CiTestCase): @@ -670,86 +860,105 @@ class TestConvertNetworkData(CiTestCase): self.tmp = self.tmp_dir() def _getnames_in_config(self, ncfg): - return set([n['name'] for n in ncfg['config'] - if n['type'] == 'physical']) + return set( + [n["name"] for n in ncfg["config"] if n["type"] == "physical"] + ) def test_conversion_fills_names(self): ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS) - expected = set(['nic0', 'enp0s1', 'enp0s2']) + expected = set(["nic0", "enp0s1", "enp0s2"]) found = self._getnames_in_config(ncfg) self.assertEqual(found, expected) - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac): macs = KNOWN_MACS.copy() - macs.update({'fa:16:3e:05:30:fe': 'foonic1', - 'fa:16:3e:69:b0:58': 'ens1'}) + macs.update( + {"fa:16:3e:05:30:fe": "foonic1", "fa:16:3e:69:b0:58": "ens1"} + ) get_interfaces_by_mac.return_value = macs ncfg = openstack.convert_net_json(NETWORK_DATA) - expected = set(['nic0', 'ens1', 'enp0s2']) + expected = set(["nic0", "ens1", "enp0s2"]) found = self._getnames_in_config(ncfg) self.assertEqual(found, expected) def test_convert_raises_value_error_on_missing_name(self): - macs = {'aa:aa:aa:aa:aa:00': 'ens1'} - self.assertRaises(ValueError, openstack.convert_net_json, - NETWORK_DATA, known_macs=macs) + macs = {"aa:aa:aa:aa:aa:00": "ens1"} + self.assertRaises( + ValueError, + openstack.convert_net_json, + NETWORK_DATA, + known_macs=macs, + ) def test_conversion_with_route(self): - ncfg = openstack.convert_net_json(NETWORK_DATA_2, - known_macs=KNOWN_MACS) + ncfg = openstack.convert_net_json( + NETWORK_DATA_2, known_macs=KNOWN_MACS + ) # not the best test, but see that we get a route in the # network config and that it gets rendered to an ENI file routes = [] - for n in ncfg['config']: - for s in n.get('subnets', []): - routes.extend(s.get('routes', [])) + for n in ncfg["config"]: + for s in n.get("subnets", []): + routes.extend(s.get("routes", [])) self.assertIn( - {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'}, - routes) + {"network": "0.0.0.0", "netmask": "0.0.0.0", "gateway": "2.2.2.9"}, + routes, + ) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: + network_state.parse_net_config_data(ncfg), target=self.tmp + ) + with open( + os.path.join(self.tmp, "etc", "network", "interfaces"), "r" + ) as f: eni_rendering = f.read() self.assertIn("route add default gw 2.2.2.9", eni_rendering) def test_conversion_with_tap(self): - ncfg = openstack.convert_net_json(NETWORK_DATA_3, - known_macs=KNOWN_MACS) + ncfg = openstack.convert_net_json( + NETWORK_DATA_3, known_macs=KNOWN_MACS + ) physicals = set() - for i in ncfg['config']: - if i.get('type') == "physical": - physicals.add(i['name']) - self.assertEqual(physicals, set(('foo1', 'foo2'))) + for i in ncfg["config"]: + if i.get("type") == "physical": + physicals.add(i["name"]) + self.assertEqual(physicals, set(("foo1", "foo2"))) def test_bond_conversion(self): # light testing of bond conversion and eni rendering of bond - ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, - known_macs=KNOWN_MACS) + ncfg = openstack.convert_net_json( + NETWORK_DATA_BOND, known_macs=KNOWN_MACS + ) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: + network_state.parse_net_config_data(ncfg), target=self.tmp + ) + with open( + os.path.join(self.tmp, "etc", "network", "interfaces"), "r" + ) as f: eni_rendering = f.read() # Verify there are expected interfaces in the net config. interfaces = sorted( - [i['name'] for i in ncfg['config'] - if i['type'] in ('vlan', 'bond', 'physical')]) + [ + i["name"] + for i in ncfg["config"] + if i["type"] in ("vlan", "bond", "physical") + ] + ) self.assertEqual( sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]), - interfaces) + interfaces, + ) words = eni_rendering.split() # 'eth0' and 'eth1' are the ids. because their mac adresses # map to other names, we should not see them in the ENI - self.assertNotIn('eth0', words) - self.assertNotIn('eth1', words) + self.assertNotIn("eth0", words) + self.assertNotIn("eth1", words) # oeth0 and oeth1 are the interface names for eni. # bond0 will be generated for the bond. Each should be auto. @@ -762,13 +971,16 @@ class TestConvertNetworkData(CiTestCase): def test_vlan(self): # light testing of vlan config conversion and eni rendering - ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN, - known_macs=KNOWN_MACS) + ncfg = openstack.convert_net_json( + NETWORK_DATA_VLAN, known_macs=KNOWN_MACS + ) eni_renderer = eni.Renderer() eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: + network_state.parse_net_config_data(ncfg), target=self.tmp + ) + with open( + os.path.join(self.tmp, "etc", "network", "interfaces"), "r" + ) as f: eni_rendering = f.read() self.assertIn("iface enp0s1", eni_rendering) @@ -778,52 +990,63 @@ class TestConvertNetworkData(CiTestCase): def test_mac_addrs_can_be_upper_case(self): # input mac addresses on rackspace may be upper case my_netdata = deepcopy(NETWORK_DATA) - for link in my_netdata['links']: - link['ethernet_mac_address'] = link['ethernet_mac_address'].upper() + for link in my_netdata["links"]: + link["ethernet_mac_address"] = link["ethernet_mac_address"].upper() ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) config_name2mac = {} - for n in ncfg['config']: - if n['type'] == 'physical': - config_name2mac[n['name']] = n['mac_address'] - - expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', - 'enp0s2': 'fa:16:3e:d4:57:ad'} + for n in ncfg["config"]: + if n["type"] == "physical": + config_name2mac[n["name"]] = n["mac_address"] + + expected = { + "nic0": "fa:16:3e:05:30:fe", + "enp0s1": "fa:16:3e:69:b0:58", + "enp0s2": "fa:16:3e:d4:57:ad", + } self.assertEqual(expected, config_name2mac) def test_unknown_device_types_accepted(self): # If we don't recognise a link, we should treat it as physical for a # best-effort boot my_netdata = deepcopy(NETWORK_DATA) - my_netdata['links'][0]['type'] = 'my-special-link-type' + my_netdata["links"][0]["type"] = "my-special-link-type" ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) config_name2mac = {} - for n in ncfg['config']: - if n['type'] == 'physical': - config_name2mac[n['name']] = n['mac_address'] - - expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', - 'enp0s2': 'fa:16:3e:d4:57:ad'} + for n in ncfg["config"]: + if n["type"] == "physical": + config_name2mac[n["name"]] = n["mac_address"] + + expected = { + "nic0": "fa:16:3e:05:30:fe", + "enp0s1": "fa:16:3e:69:b0:58", + "enp0s2": "fa:16:3e:d4:57:ad", + } self.assertEqual(expected, config_name2mac) # We should, however, warn the user that we don't recognise the type - self.assertIn('Unknown network_data link type (my-special-link-type)', - self.logs.getvalue()) + self.assertIn( + "Unknown network_data link type (my-special-link-type)", + self.logs.getvalue(), + ) def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") os.mkdir(run) cfg_ds = ds.DataSourceConfigDrive( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": run}) + ) cfg_ds.seed_dir = os.path.join(base_d, "seed") if files: populate_dir(cfg_ds.seed_dir, files) cfg_ds.known_macs = KNOWN_MACS.copy() if not cfg_ds.get_data(): - raise RuntimeError("Data source did not extract itself from" - " seed directory %s" % cfg_ds.seed_dir) + raise RuntimeError( + "Data source did not extract itself from seed directory %s" + % cfg_ds.seed_dir + ) return cfg_ds @@ -832,13 +1055,14 @@ def populate_ds_from_read_config(cfg_ds, source, results): read_config_drive_dir hopefully in line with what it would have if cfg_ds.get_data had been successfully called""" cfg_ds.source = source - cfg_ds.metadata = results.get('metadata') - cfg_ds.ec2_metadata = results.get('ec2-metadata') - cfg_ds.userdata_raw = results.get('userdata') - cfg_ds.version = results.get('version') - cfg_ds.network_json = results.get('networkdata') + cfg_ds.metadata = results.get("metadata") + cfg_ds.ec2_metadata = results.get("ec2-metadata") + cfg_ds.userdata_raw = results.get("userdata") + cfg_ds.version = results.get("version") + cfg_ds.network_json = results.get("networkdata") cfg_ds._network_config = openstack.convert_net_json( - cfg_ds.network_json, known_macs=KNOWN_MACS) + cfg_ds.network_json, known_macs=KNOWN_MACS + ) # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py index 351bf7ba..f3e6224e 100644 --- a/tests/unittests/sources/test_digitalocean.py +++ b/tests/unittests/sources/test_digitalocean.py @@ -8,19 +8,20 @@ import json -from cloudinit import helpers -from cloudinit import settings +from cloudinit import helpers, settings from cloudinit.sources import DataSourceDigitalOcean from cloudinit.sources.helpers import digitalocean +from tests.unittests.helpers import CiTestCase, mock -from tests.unittests.helpers import mock, CiTestCase - -DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", - "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] +DO_MULTIPLE_KEYS = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co", +] DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" # the following JSON was taken from droplet (that's why its a string) -DO_META = json.loads(""" +DO_META = json.loads( + """ { "droplet_id": "22532410", "hostname": "utl-96268", @@ -76,89 +77,94 @@ DO_META = json.loads(""" ] } } -""") +""" +) # This has no private interface DO_META_2 = { "droplet_id": 27223699, "hostname": "smtest1", - "vendor_data": "\n".join([ - ('"Content-Type: multipart/mixed; ' - 'boundary=\"===============8645434374073493512==\"'), - 'MIME-Version: 1.0', - '', - '--===============8645434374073493512==', - 'MIME-Version: 1.0' - 'Content-Type: text/cloud-config; charset="us-ascii"' - 'Content-Transfer-Encoding: 7bit' - 'Content-Disposition: attachment; filename="cloud-config"' - '', - '#cloud-config', - 'disable_root: false', - 'manage_etc_hosts: true', - '', - '', - '--===============8645434374073493512==' - ]), - "public_keys": [ - "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" - ], + "vendor_data": "\n".join( + [ + '"Content-Type: multipart/mixed; ' + 'boundary="===============8645434374073493512=="', + "MIME-Version: 1.0", + "", + "--===============8645434374073493512==", + "MIME-Version: 1.0" + 'Content-Type: text/cloud-config; charset="us-ascii"' + "Content-Transfer-Encoding: 7bit" + 'Content-Disposition: attachment; filename="cloud-config"' + "", + "#cloud-config", + "disable_root: false", + "manage_etc_hosts: true", + "", + "", + "--===============8645434374073493512==", + ] + ), + "public_keys": ["ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"], "auth_key": "88888888888888888888888888888888", "region": "nyc3", "interfaces": { - "public": [{ - "ipv4": { - "ip_address": "45.55.249.133", - "netmask": "255.255.192.0", - "gateway": "45.55.192.1" - }, - "anchor_ipv4": { - "ip_address": "10.17.0.5", - "netmask": "255.255.0.0", - "gateway": "10.17.0.1" - }, - "mac": "ae:cc:08:7c:88:00", - "type": "public" - }] + "public": [ + { + "ipv4": { + "ip_address": "45.55.249.133", + "netmask": "255.255.192.0", + "gateway": "45.55.192.1", + }, + "anchor_ipv4": { + "ip_address": "10.17.0.5", + "netmask": "255.255.0.0", + "gateway": "10.17.0.1", + }, + "mac": "ae:cc:08:7c:88:00", + "type": "public", + } + ] }, "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, "tags": None, } -DO_META['public_keys'] = DO_SINGLE_KEY +DO_META["public_keys"] = DO_SINGLE_KEY -MD_URL = 'http://169.254.169.254/metadata/v1.json' +MD_URL = "http://169.254.169.254/metadata/v1.json" def _mock_dmi(): - return (True, DO_META.get('id')) + return (True, DO_META.get("id")) class TestDataSourceDigitalOcean(CiTestCase): """ Test reading the meta-data """ + def setUp(self): super(TestDataSourceDigitalOcean, self).setUp() self.tmp = self.tmp_dir() def get_ds(self, get_sysinfo=_mock_dmi): ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) ds.use_ip4LL = False if get_sysinfo is not None: ds._get_sysinfo = get_sysinfo return ds - @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') + @mock.patch("cloudinit.sources.helpers.digitalocean.read_sysinfo") def test_returns_false_not_on_docean(self, m_read_sysinfo): m_read_sysinfo.return_value = (False, None) ds = self.get_ds(get_sysinfo=None) self.assertEqual(False, ds.get_data()) self.assertTrue(m_read_sysinfo.called) - @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata") def test_metadata(self, mock_readmd): mock_readmd.return_value = DO_META.copy() @@ -168,22 +174,23 @@ class TestDataSourceDigitalOcean(CiTestCase): self.assertTrue(mock_readmd.called) - self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) - self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) - self.assertEqual(DO_META.get('region'), ds.availability_zone) - self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) - self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) + self.assertEqual(DO_META.get("user_data"), ds.get_userdata_raw()) + self.assertEqual(DO_META.get("vendor_data"), ds.get_vendordata_raw()) + self.assertEqual(DO_META.get("region"), ds.availability_zone) + self.assertEqual(DO_META.get("droplet_id"), ds.get_instance_id()) + self.assertEqual(DO_META.get("hostname"), ds.get_hostname()) # Single key - self.assertEqual([DO_META.get('public_keys')], - ds.get_public_ssh_keys()) + self.assertEqual( + [DO_META.get("public_keys")], ds.get_public_ssh_keys() + ) self.assertIsInstance(ds.get_public_ssh_keys(), list) - @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + @mock.patch("cloudinit.sources.helpers.digitalocean.read_metadata") def test_multiple_ssh_keys(self, mock_readmd): metadata = DO_META.copy() - metadata['public_keys'] = DO_MULTIPLE_KEYS + metadata["public_keys"] = DO_MULTIPLE_KEYS mock_readmd.return_value = metadata.copy() ds = self.get_ds() @@ -193,38 +200,39 @@ class TestDataSourceDigitalOcean(CiTestCase): self.assertTrue(mock_readmd.called) # Multiple keys - self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) + self.assertEqual(metadata["public_keys"], ds.get_public_ssh_keys()) self.assertIsInstance(ds.get_public_ssh_keys(), list) class TestNetworkConvert(CiTestCase): - def _get_networking(self): self.m_get_by_mac.return_value = { - '04:01:57:d1:9e:01': 'ens1', - '04:01:57:d1:9e:02': 'ens2', - 'b8:ae:ed:75:5f:9a': 'enp0s25', - 'ae:cc:08:7c:88:00': 'meta2p1'} + "04:01:57:d1:9e:01": "ens1", + "04:01:57:d1:9e:02": "ens2", + "b8:ae:ed:75:5f:9a": "enp0s25", + "ae:cc:08:7c:88:00": "meta2p1", + } netcfg = digitalocean.convert_network_configuration( - DO_META['interfaces'], DO_META['dns']['nameservers']) - self.assertIn('config', netcfg) + DO_META["interfaces"], DO_META["dns"]["nameservers"] + ) + self.assertIn("config", netcfg) return netcfg def setUp(self): super(TestNetworkConvert, self).setUp() - self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') + self.add_patch("cloudinit.net.get_interfaces_by_mac", "m_get_by_mac") def test_networking_defined(self): netcfg = self._get_networking() self.assertIsNotNone(netcfg) dns_defined = False - for part in netcfg.get('config'): - n_type = part.get('type') + for part in netcfg.get("config"): + n_type = part.get("type") print("testing part ", n_type, "\n", json.dumps(part, indent=3)) - if n_type == 'nameserver': - n_address = part.get('address') + if n_type == "nameserver": + n_address = part.get("address") self.assertIsNotNone(n_address) self.assertEqual(len(n_address), 3) @@ -234,9 +242,9 @@ class TestNetworkConvert(CiTestCase): dns_defined = True else: - n_subnets = part.get('type') - n_name = part.get('name') - n_mac = part.get('mac_address') + n_subnets = part.get("type") + n_name = part.get("name") + n_mac = part.get("mac_address") self.assertIsNotNone(n_type) self.assertIsNotNone(n_subnets) @@ -247,21 +255,21 @@ class TestNetworkConvert(CiTestCase): def _get_nic_definition(self, int_type, expected_name): """helper function to return if_type (i.e. public) and the expected - name used by cloud-init (i.e eth0)""" + name used by cloud-init (i.e eth0)""" netcfg = self._get_networking() - meta_def = (DO_META.get('interfaces')).get(int_type)[0] + meta_def = (DO_META.get("interfaces")).get(int_type)[0] - self.assertEqual(int_type, meta_def.get('type')) + self.assertEqual(int_type, meta_def.get("type")) - for nic_def in netcfg.get('config'): + for nic_def in netcfg.get("config"): print(nic_def) - if nic_def.get('name') == expected_name: + if nic_def.get("name") == expected_name: return nic_def, meta_def def _get_match_subn(self, subnets, ip_addr): """get the matching subnet definition based on ip address""" for subn in subnets: - address = subn.get('address') + address = subn.get("address") self.assertIsNotNone(address) # equals won't work because of ipv6 addressing being in @@ -274,99 +282,108 @@ class TestNetworkConvert(CiTestCase): """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" netcfg = self._get_networking() gateways = [] - for nic_def in netcfg.get('config'): - if nic_def.get('type') != 'physical': + for nic_def in netcfg.get("config"): + if nic_def.get("type") != "physical": continue - for subn in nic_def.get('subnets'): - if 'gateway' in subn: - gateways.append(subn.get('gateway')) + for subn in nic_def.get("subnets"): + if "gateway" in subn: + gateways.append(subn.get("gateway")) # we should have two gateways, one ipv4 and ipv6 self.assertEqual(len(gateways), 2) # make that the ipv6 gateway is there - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('ipv4') - self.assertIn(ipv4_def.get('gateway'), gateways) + (nic_def, meta_def) = self._get_nic_definition("public", "eth0") + ipv4_def = meta_def.get("ipv4") + self.assertIn(ipv4_def.get("gateway"), gateways) # make sure the the ipv6 gateway is there - ipv6_def = meta_def.get('ipv6') - self.assertIn(ipv6_def.get('gateway'), gateways) + ipv6_def = meta_def.get("ipv6") + self.assertIn(ipv6_def.get("gateway"), gateways) def test_public_interface_defined(self): """test that the public interface is defined as eth0""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - self.assertEqual('eth0', nic_def.get('name')) - self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) - self.assertEqual('physical', nic_def.get('type')) + (nic_def, meta_def) = self._get_nic_definition("public", "eth0") + self.assertEqual("eth0", nic_def.get("name")) + self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address")) + self.assertEqual("physical", nic_def.get("type")) def test_private_interface_defined(self): """test that the private interface is defined as eth1""" - (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') - self.assertEqual('eth1', nic_def.get('name')) - self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) - self.assertEqual('physical', nic_def.get('type')) + (nic_def, meta_def) = self._get_nic_definition("private", "eth1") + self.assertEqual("eth1", nic_def.get("name")) + self.assertEqual(meta_def.get("mac"), nic_def.get("mac_address")) + self.assertEqual("physical", nic_def.get("type")) def test_public_interface_ipv6(self): """test public ipv6 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv6_def = meta_def.get('ipv6') + (nic_def, meta_def) = self._get_nic_definition("public", "eth0") + ipv6_def = meta_def.get("ipv6") self.assertIsNotNone(ipv6_def) - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv6_def.get('ip_address')) + subn_def = self._get_match_subn( + nic_def.get("subnets"), ipv6_def.get("ip_address") + ) - cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), - ipv6_def.get('cidr')) + cidr_notated_address = "{0}/{1}".format( + ipv6_def.get("ip_address"), ipv6_def.get("cidr") + ) - self.assertEqual(cidr_notated_address, subn_def.get('address')) - self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) + self.assertEqual(cidr_notated_address, subn_def.get("address")) + self.assertEqual(ipv6_def.get("gateway"), subn_def.get("gateway")) def test_public_interface_ipv4(self): """test public ipv4 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('ipv4') + (nic_def, meta_def) = self._get_nic_definition("public", "eth0") + ipv4_def = meta_def.get("ipv4") self.assertIsNotNone(ipv4_def) - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv4_def.get('ip_address')) + subn_def = self._get_match_subn( + nic_def.get("subnets"), ipv4_def.get("ip_address") + ) - self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) - self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) + self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask")) + self.assertEqual(ipv4_def.get("gateway"), subn_def.get("gateway")) def test_public_interface_anchor_ipv4(self): """test public ipv4 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('anchor_ipv4') + (nic_def, meta_def) = self._get_nic_definition("public", "eth0") + ipv4_def = meta_def.get("anchor_ipv4") self.assertIsNotNone(ipv4_def) - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv4_def.get('ip_address')) + subn_def = self._get_match_subn( + nic_def.get("subnets"), ipv4_def.get("ip_address") + ) - self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) - self.assertNotIn('gateway', subn_def) + self.assertEqual(ipv4_def.get("netmask"), subn_def.get("netmask")) + self.assertNotIn("gateway", subn_def) - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_convert_without_private(self, m_get_by_mac): m_get_by_mac.return_value = { - 'b8:ae:ed:75:5f:9a': 'enp0s25', - 'ae:cc:08:7c:88:00': 'meta2p1'} + "b8:ae:ed:75:5f:9a": "enp0s25", + "ae:cc:08:7c:88:00": "meta2p1", + } netcfg = digitalocean.convert_network_configuration( - DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) + DO_META_2["interfaces"], DO_META_2["dns"]["nameservers"] + ) # print(netcfg) byname = {} - for i in netcfg['config']: - if 'name' in i: - if i['name'] in byname: - raise ValueError("name '%s' in config twice: %s" % - (i['name'], netcfg)) - byname[i['name']] = i - self.assertTrue('eth0' in byname) - self.assertTrue('subnets' in byname['eth0']) - eth0 = byname['eth0'] + for i in netcfg["config"]: + if "name" in i: + if i["name"] in byname: + raise ValueError( + "name '%s' in config twice: %s" % (i["name"], netcfg) + ) + byname[i["name"]] = i + self.assertTrue("eth0" in byname) + self.assertTrue("subnets" in byname["eth0"]) + eth0 = byname["eth0"] self.assertEqual( - sorted(['45.55.249.133', '10.17.0.5']), - sorted([i['address'] for i in eth0['subnets']])) + sorted(["45.55.249.133", "10.17.0.5"]), + sorted([i["address"] for i in eth0["subnets"]]), + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py index 19c2bbcd..b376660d 100644 --- a/tests/unittests/sources/test_ec2.py +++ b/tests/unittests/sources/test_ec2.py @@ -1,35 +1,37 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy -import httpretty import json -import requests from unittest import mock +import httpretty +import requests + from cloudinit import helpers from cloudinit.sources import DataSourceEc2 as ec2 from tests.unittests import helpers as test_helpers - DYNAMIC_METADATA = { "instance-identity": { - "document": json.dumps({ - "devpayProductCodes": None, - "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], - "availabilityZone": "us-west-2b", - "privateIp": "10.158.112.84", - "version": "2017-09-30", - "instanceId": "my-identity-id", - "billingProducts": None, - "instanceType": "t2.micro", - "accountId": "123456789012", - "imageId": "ami-5fb8c835", - "pendingTime": "2016-11-19T16:32:11Z", - "architecture": "x86_64", - "kernelId": None, - "ramdiskId": None, - "region": "us-west-2" - }) + "document": json.dumps( + { + "devpayProductCodes": None, + "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], + "availabilityZone": "us-west-2b", + "privateIp": "10.158.112.84", + "version": "2017-09-30", + "instanceId": "my-identity-id", + "billingProducts": None, + "instanceType": "t2.micro", + "accountId": "123456789012", + "imageId": "ami-5fb8c835", + "pendingTime": "2016-11-19T16:32:11Z", + "architecture": "x86_64", + "kernelId": None, + "ramdiskId": None, + "region": "us-west-2", + } + ) } } @@ -52,7 +54,7 @@ DEFAULT_METADATA = { "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", "local-ipv4": "172.3.3.15", "mac": "06:17:04:d7:26:09", - "metrics": {"vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"}, + "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'}, "network": { "interfaces": { "macs": { @@ -61,13 +63,15 @@ DEFAULT_METADATA = { "interface-id": "eni-e44ef49e", "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", - "local-hostname": ("ip-172-3-3-15.us-east-2." - "compute.internal"), + "local-hostname": ( + "ip-172-3-3-15.us-east-2.compute.internal" + ), "local-ipv4s": "172.3.3.15", "mac": "06:17:04:d7:26:09", "owner-id": "950047163771", - "public-hostname": ("ec2-13-59-77-202.us-east-2." - "compute.amazonaws.com"), + "public-hostname": ( + "ec2-13-59-77-202.us-east-2.compute.amazonaws.com" + ), "public-ipv4s": "13.59.77.202", "security-group-ids": "sg-5a61d333", "security-groups": "wide-open", @@ -77,20 +81,22 @@ DEFAULT_METADATA = { "vpc-id": "vpc-87e72bee", "vpc-ipv4-cidr-block": "172.31.0.0/16", "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56" + "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56", }, "06:17:04:d7:26:08": { - "device-number": "1", # Only IPv4 local config + "device-number": "1", # Only IPv4 local config "interface-id": "eni-e44ef49f", "ipv4-associations": {"": "172.3.3.16"}, "ipv6s": "", # No IPv6 config - "local-hostname": ("ip-172-3-3-16.us-east-2." - "compute.internal"), + "local-hostname": ( + "ip-172-3-3-16.us-east-2.compute.internal" + ), "local-ipv4s": "172.3.3.16", "mac": "06:17:04:d7:26:08", "owner-id": "950047163771", - "public-hostname": ("ec2-172-3-3-16.us-east-2." - "compute.amazonaws.com"), + "public-hostname": ( + "ec2-172-3-3-16.us-east-2.compute.amazonaws.com" + ), "public-ipv4s": "", # No public ipv4 config "security-group-ids": "sg-5a61d333", "security-groups": "wide-open", @@ -100,8 +106,8 @@ DEFAULT_METADATA = { "vpc-id": "vpc-87e72bee", "vpc-ipv4-cidr-block": "172.31.0.0/16", "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "" - } + "vpc-ipv6-cidr-blocks": "", + }, } } }, @@ -123,24 +129,17 @@ DEFAULT_METADATA = { NIC1_MD_IPV4_IPV6_MULTI_IP = { "device-number": "0", "interface-id": "eni-0d6335689899ce9cc", - "ipv4-associations": { - "18.218.219.181": "172.31.44.13" - }, + "ipv4-associations": {"18.218.219.181": "172.31.44.13"}, "ipv6s": [ "2600:1f16:292:100:c187:593c:4349:136", "2600:1f16:292:100:f153:12a3:c37c:11f9", - "2600:1f16:292:100:f152:2222:3333:4444" - ], - "local-hostname": ("ip-172-31-44-13.us-east-2." - "compute.internal"), - "local-ipv4s": [ - "172.31.44.13", - "172.31.45.70" + "2600:1f16:292:100:f152:2222:3333:4444", ], + "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", + "local-ipv4s": ["172.31.44.13", "172.31.45.70"], "mac": "0a:07:84:3d:6e:38", "owner-id": "329910648901", - "public-hostname": ("ec2-18-218-219-181.us-east-2." - "compute.amazonaws.com"), + "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", "public-ipv4s": "18.218.219.181", "security-group-ids": "sg-0c387755222ba8d2e", "security-groups": "launch-wizard-4", @@ -150,7 +149,7 @@ NIC1_MD_IPV4_IPV6_MULTI_IP = { "vpc-id": "vpc-a07f62c8", "vpc-ipv4-cidr-block": "172.31.0.0/16", "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56" + "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56", } NIC2_MD = { @@ -166,30 +165,22 @@ NIC2_MD = { "subnet-ipv4-cidr-block": "172.31.32.0/20", "vpc-id": "vpc-a07f62c8", "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16" + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", } SECONDARY_IP_METADATA_2018_09_24 = { "ami-id": "ami-0986c2ac728528ac2", "ami-launch-index": "0", "ami-manifest-path": "(unknown)", - "block-device-mapping": { - "ami": "/dev/sda1", - "root": "/dev/sda1" - }, - "events": { - "maintenance": { - "history": "[]", - "scheduled": "[]" - } - }, + "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, + "events": {"maintenance": {"history": "[]", "scheduled": "[]"}}, "hostname": "ip-172-31-44-13.us-east-2.compute.internal", "identity-credentials": { "ec2": { "info": { "AccountId": "329910648901", "Code": "Success", - "LastUpdated": "2019-07-06T14:22:56Z" + "LastUpdated": "2019-07-06T14:22:56Z", } } }, @@ -199,9 +190,7 @@ SECONDARY_IP_METADATA_2018_09_24 = { "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", "local-ipv4": "172.31.44.13", "mac": "0a:07:84:3d:6e:38", - "metrics": { - "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" - }, + "metrics": {"vhostmd": '<?xml version="1.0" encoding="UTF-8"?>'}, "network": { "interfaces": { "macs": { @@ -209,27 +198,17 @@ SECONDARY_IP_METADATA_2018_09_24 = { } } }, - "placement": { - "availability-zone": "us-east-2c" - }, + "placement": {"availability-zone": "us-east-2c"}, "profile": "default-hvm", - "public-hostname": ( - "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"), + "public-hostname": "ec2-18-218-219-181.us-east-2.compute.amazonaws.com", "public-ipv4": "18.218.219.181", - "public-keys": { - "yourkeyname,e": [ - "ssh-rsa AAAAW...DZ yourkeyname" - ] - }, + "public-keys": {"yourkeyname,e": ["ssh-rsa AAAAW...DZ yourkeyname"]}, "reservation-id": "r-09b4917135cdd33be", "security-groups": "launch-wizard-4", - "services": { - "domain": "amazonaws.com", - "partition": "aws" - } + "services": {"domain": "amazonaws.com", "partition": "aws"}, } -M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.' +M_PATH_NET = "cloudinit.sources.DataSourceEc2.net." def _register_ssh_keys(rfunc, base_url, keys_data): @@ -250,9 +229,9 @@ def _register_ssh_keys(rfunc, base_url, keys_data): """ base_url = base_url.rstrip("/") - odd_index = '\n'.join( - ["{0}={1}".format(n, name) - for n, name in enumerate(sorted(keys_data))]) + odd_index = "\n".join( + ["{0}={1}".format(n, name) for n, name in enumerate(sorted(keys_data))] + ) rfunc(base_url, odd_index) rfunc(base_url + "/", odd_index) @@ -260,7 +239,7 @@ def _register_ssh_keys(rfunc, base_url, keys_data): for n, name in enumerate(sorted(keys_data)): val = keys_data[name] if isinstance(val, list): - val = '\n'.join(val) + val = "\n".join(val) burl = base_url + "/%s" % n rfunc(burl, "openssh-key") rfunc(burl + "/", "openssh-key") @@ -281,6 +260,7 @@ def register_mock_metaserver(base_url, data): base_url/mac with 00:16:3e:00:00:00 In the index, references to lists or dictionaries have a trailing /. """ + def register_helper(register, base_url, body): if not isinstance(base_url, str): register(base_url, body) @@ -289,25 +269,24 @@ def register_mock_metaserver(base_url, data): if isinstance(body, str): register(base_url, body) elif isinstance(body, list): - register(base_url, '\n'.join(body) + '\n') - register(base_url + '/', '\n'.join(body) + '\n') + register(base_url, "\n".join(body) + "\n") + register(base_url + "/", "\n".join(body) + "\n") elif isinstance(body, dict): vals = [] for k, v in body.items(): - if k == 'public-keys': - _register_ssh_keys( - register, base_url + '/public-keys/', v) + if k == "public-keys": + _register_ssh_keys(register, base_url + "/public-keys/", v) continue suffix = k.rstrip("/") if not isinstance(v, (str, list)): suffix += "/" vals.append(suffix) - url = base_url + '/' + suffix + url = base_url + "/" + suffix register_helper(register, url, v) - register(base_url, '\n'.join(vals) + '\n') - register(base_url + '/', '\n'.join(vals) + '\n') + register(base_url, "\n".join(vals) + "\n") + register(base_url + "/", "\n".join(vals) + "\n") elif body is None: - register(base_url, 'not found', status=404) + register(base_url, "not found", status=404) def myreg(*argc, **kwargs): url = argc[0] @@ -322,9 +301,9 @@ class TestEc2(test_helpers.HttprettyTestCase): maxDiff = None valid_platform_data = { - 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', - 'uuid_source': 'dmi', - 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', + "uuid": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", + "uuid_source": "dmi", + "serial": "ec212f79-87d1-2f1d-588f-d86dc0fd5412", } def setUp(self): @@ -333,9 +312,9 @@ class TestEc2(test_helpers.HttprettyTestCase): self.metadata_addr = self.datasource.metadata_urls[0] self.tmp = self.tmp_dir() - def data_url(self, version, data_item='meta-data'): + def data_url(self, version, data_item="meta-data"): """Return a metadata url based on the version provided.""" - return '/'.join([self.metadata_addr, version, data_item]) + return "/".join([self.metadata_addr, version, data_item]) def _patch_add_cleanup(self, mpath, *args, **kwargs): p = mock.patch(mpath, *args, **kwargs) @@ -345,7 +324,7 @@ class TestEc2(test_helpers.HttprettyTestCase): def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): self.uris = [] distro = {} - paths = helpers.Paths({'run_dir': self.tmp}) + paths = helpers.Paths({"run_dir": self.tmp}) if sys_cfg is None: sys_cfg = {} ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) @@ -354,32 +333,39 @@ class TestEc2(test_helpers.HttprettyTestCase): if platform_data is not None: self._patch_add_cleanup( "cloudinit.sources.DataSourceEc2._collect_platform_data", - return_value=platform_data) + return_value=platform_data, + ) if md: - all_versions = ( - [ds.min_metadata_version] + ds.extended_metadata_versions) - token_url = self.data_url('latest', data_item='api/token') - register_mock_metaserver(token_url, 'API-TOKEN') + all_versions = [ + ds.min_metadata_version + ] + ds.extended_metadata_versions + token_url = self.data_url("latest", data_item="api/token") + register_mock_metaserver(token_url, "API-TOKEN") for version in all_versions: - metadata_url = self.data_url(version) + '/' + metadata_url = self.data_url(version) + "/" if version == md_version: # Register all metadata for desired version register_mock_metaserver( - metadata_url, md.get('md', DEFAULT_METADATA)) + metadata_url, md.get("md", DEFAULT_METADATA) + ) userdata_url = self.data_url( - version, data_item='user-data') - register_mock_metaserver(userdata_url, md.get('ud', '')) + version, data_item="user-data" + ) + register_mock_metaserver(userdata_url, md.get("ud", "")) identity_url = self.data_url( - version, data_item='dynamic/instance-identity') + version, data_item="dynamic/instance-identity" + ) register_mock_metaserver( - identity_url, md.get('id', DYNAMIC_METADATA)) + identity_url, md.get("id", DYNAMIC_METADATA) + ) else: - instance_id_url = metadata_url + 'instance-id' + instance_id_url = metadata_url + "instance-id" if version == ds.min_metadata_version: # Add min_metadata_version service availability check register_mock_metaserver( - instance_id_url, DEFAULT_METADATA['instance-id']) + instance_id_url, DEFAULT_METADATA["instance-id"] + ) else: # Register 404s for all unrequested extended versions register_mock_metaserver(instance_id_url, None) @@ -389,24 +375,33 @@ class TestEc2(test_helpers.HttprettyTestCase): """network_config property returns network version 2 for metadata""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' + sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, + md={"md": DEFAULT_METADATA}, + ) + find_fallback_path = M_PATH_NET + "find_fallback_nic" with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' + m_find_fallback.return_value = "eth9" ds.get_data() - mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": "06:17:04:d7:26:09"}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": True, + } + }, + } + patch_path = M_PATH_NET + "get_interfaces_by_mac" + get_interface_mac_path = M_PATH_NET + "get_interface_mac" with mock.patch(patch_path) as m_get_interfaces_by_mac: with mock.patch(find_fallback_path) as m_find_fallback: with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' + m_get_interfaces_by_mac.return_value = {mac1: "eth9"} + m_find_fallback.return_value = "eth9" m_get_mac.return_value = mac1 self.assertEqual(expected, ds.network_config) @@ -418,24 +413,33 @@ class TestEc2(test_helpers.HttprettyTestCase): """ ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' + sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, + md={"md": DEFAULT_METADATA}, + ) + find_fallback_path = M_PATH_NET + "find_fallback_nic" with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' + m_find_fallback.return_value = "eth9" ds.get_data() - mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + mac1 = "06:17:04:d7:26:08" # IPv4 only in DEFAULT_METADATA + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": mac1.lower()}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": False, + } + }, + } + patch_path = M_PATH_NET + "get_interfaces_by_mac" + get_interface_mac_path = M_PATH_NET + "get_interface_mac" with mock.patch(patch_path) as m_get_interfaces_by_mac: with mock.patch(find_fallback_path) as m_find_fallback: with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' + m_get_interfaces_by_mac.return_value = {mac1: "eth9"} + m_find_fallback.return_value = "eth9" m_get_mac.return_value = mac1 self.assertEqual(expected, ds.network_config) @@ -447,27 +451,38 @@ class TestEc2(test_helpers.HttprettyTestCase): """ ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': SECONDARY_IP_METADATA_2018_09_24}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' + sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, + md={"md": SECONDARY_IP_METADATA_2018_09_24}, + ) + find_fallback_path = M_PATH_NET + "find_fallback_nic" with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' + m_find_fallback.return_value = "eth9" ds.get_data() - mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6 - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1}, 'set-name': 'eth9', - 'addresses': ['172.31.45.70/20', - '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - 'dhcp4': True, 'dhcp6': True}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + mac1 = "0a:07:84:3d:6e:38" # 1 secondary IPv4 and 2 secondary IPv6 + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": mac1}, + "set-name": "eth9", + "addresses": [ + "172.31.45.70/20", + "2600:1f16:292:100:f152:2222:3333:4444/128", + "2600:1f16:292:100:f153:12a3:c37c:11f9/128", + ], + "dhcp4": True, + "dhcp6": True, + } + }, + } + patch_path = M_PATH_NET + "get_interfaces_by_mac" + get_interface_mac_path = M_PATH_NET + "get_interface_mac" with mock.patch(patch_path) as m_get_interfaces_by_mac: with mock.patch(find_fallback_path) as m_find_fallback: with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' + m_get_interfaces_by_mac.return_value = {mac1: "eth9"} + m_find_fallback.return_value = "eth9" m_get_mac.return_value = mac1 self.assertEqual(expected, ds.network_config) @@ -475,12 +490,13 @@ class TestEc2(test_helpers.HttprettyTestCase): """network_config property is cached in DataSourceEc2.""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - ds._network_config = {'cached': 'data'} - self.assertEqual({'cached': 'data'}, ds.network_config) + sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, + md={"md": DEFAULT_METADATA}, + ) + ds._network_config = {"cached": "data"} + self.assertEqual({"cached": "data"}, ds.network_config) - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): """Refresh the network_config Ec2 cache if network key is absent. @@ -488,28 +504,39 @@ class TestEc2(test_helpers.HttprettyTestCase): which lacked newly required network key. """ old_metadata = copy.deepcopy(DEFAULT_METADATA) - old_metadata.pop('network') + old_metadata.pop("network") ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': old_metadata}) + sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, + md={"md": old_metadata}, + ) self.assertTrue(ds.get_data()) # Provide new revision of metadata that contains network data register_mock_metaserver( - 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA) - mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA - get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac' - ds.fallback_nic = 'eth9' + "http://169.254.169.254/2009-04-04/meta-data/", DEFAULT_METADATA + ) + mac1 = "06:17:04:d7:26:09" # Defined in DEFAULT_METADATA + get_interface_mac_path = M_PATH_NET + "get_interfaces_by_mac" + ds.fallback_nic = "eth9" with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + m_get_interfaces_by_mac.return_value = {mac1: "eth9"} nc = ds.network_config # Will re-crawl network metadata self.assertIsNotNone(nc) self.assertIn( - 'Refreshing stale metadata from prior to upgrade', - self.logs.getvalue()) - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} + "Refreshing stale metadata from prior to upgrade", + self.logs.getvalue(), + ) + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": True, + } + }, + } self.assertEqual(expected, ds.network_config) def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): @@ -522,40 +549,46 @@ class TestEc2(test_helpers.HttprettyTestCase): self.datasource = ec2.DataSourceEc2Local ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) # Mock 404s on all versions except latest - all_versions = ( - [ds.min_metadata_version] + ds.extended_metadata_versions) + all_versions = [ + ds.min_metadata_version + ] + ds.extended_metadata_versions for ver in all_versions[:-1]: register_mock_metaserver( - 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver), - None) - ds.metadata_address = 'http://169.254.169.254' + "http://169.254.169.254/{0}/meta-data/instance-id".format(ver), + None, + ) + ds.metadata_address = "http://169.254.169.254" register_mock_metaserver( - '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]), - DEFAULT_METADATA) + "{0}/{1}/meta-data/".format(ds.metadata_address, all_versions[-1]), + DEFAULT_METADATA, + ) # Register dynamic/instance-identity document which we now read. register_mock_metaserver( - '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), - DYNAMIC_METADATA) + "{0}/{1}/dynamic/".format(ds.metadata_address, all_versions[-1]), + DYNAMIC_METADATA, + ) ds._cloud_name = ec2.CloudNames.AWS # Setup cached metadata on the Datasource ds.metadata = DEFAULT_METADATA - self.assertEqual('my-identity-id', ds.get_instance_id()) + self.assertEqual("my-identity-id", ds.get_instance_id()) def test_classic_instance_true(self): """If no vpc-id in metadata, is_classic_instance must return true.""" md_copy = copy.deepcopy(DEFAULT_METADATA) - ifaces_md = md_copy.get('network', {}).get('interfaces', {}) - for _mac, mac_data in ifaces_md.get('macs', {}).items(): - if 'vpc-id' in mac_data: - del mac_data['vpc-id'] + ifaces_md = md_copy.get("network", {}).get("interfaces", {}) + for _mac, mac_data in ifaces_md.get("macs", {}).items(): + if "vpc-id" in mac_data: + del mac_data["vpc-id"] ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': md_copy}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": md_copy}, + ) self.assertTrue(ds.get_data()) self.assertTrue(ds.is_classic_instance()) @@ -563,8 +596,9 @@ class TestEc2(test_helpers.HttprettyTestCase): """If vpc-id in metadata, is_classic_instance must return false.""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) self.assertTrue(ds.get_data()) self.assertFalse(ds.is_classic_instance()) @@ -572,108 +606,117 @@ class TestEc2(test_helpers.HttprettyTestCase): """Inaccessibility of http://169.254.169.254 are retried.""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=None) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md=None, + ) conn_error = requests.exceptions.ConnectionError( - '[Errno 113] no route to host' + "[Errno 113] no route to host" ) - mock_success = mock.MagicMock(contents=b'fakesuccess') + mock_success = mock.MagicMock(contents=b"fakesuccess") mock_success.ok.return_value = True - with mock.patch('cloudinit.url_helper.readurl') as m_readurl: + with mock.patch("cloudinit.url_helper.readurl") as m_readurl: m_readurl.side_effect = (conn_error, conn_error, mock_success) - with mock.patch('cloudinit.url_helper.time.sleep'): + with mock.patch("cloudinit.url_helper.time.sleep"): self.assertTrue(ds.wait_for_metadata_service()) # Just one /latest/api/token request self.assertEqual(3, len(m_readurl.call_args_list)) for readurl_call in m_readurl.call_args_list: - self.assertIn('latest/api/token', readurl_call[0][0]) + self.assertIn("latest/api/token", readurl_call[0][0]) def test_aws_token_403_fails_without_retries(self): """Verify that 403s fetching AWS tokens are not retried.""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=None) - token_url = self.data_url('latest', data_item='api/token') + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md=None, + ) + token_url = self.data_url("latest", data_item="api/token") httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403) self.assertFalse(ds.get_data()) # Just one /latest/api/token request logs = self.logs.getvalue() failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0' expected_logs = [ - 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is' - ' disabled. Aborting.', + "WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is" + " disabled. Aborting.", "WARNING: IMDS's HTTP endpoint is probably disabled", - failed_put_log + failed_put_log, ] for log in expected_logs: self.assertIn(log, logs) self.assertEqual( 1, - len([line for line in logs.splitlines() if failed_put_log in line]) + len( + [line for line in logs.splitlines() if failed_put_log in line] + ), ) def test_aws_token_redacted(self): """Verify that aws tokens are redacted when logged.""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) self.assertTrue(ds.get_data()) all_logs = self.logs.getvalue().splitlines() REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] + logs_with_token = [log for log in all_logs if "API-TOKEN" in log] self.assertEqual(1, len(logs_with_redacted_ttl)) self.assertEqual(81, len(logs_with_redacted)) self.assertEqual(0, len(logs_with_token)) - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, + md={"md": DEFAULT_METADATA}, + ) ret = ds.get_data() self.assertTrue(ret) self.assertEqual(0, m_dhcp.call_count) - self.assertEqual('aws', ds.cloud_name) - self.assertEqual('ec2', ds.platform_type) - self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform) + self.assertEqual("aws", ds.cloud_name) + self.assertEqual("ec2", ds.platform_type) + self.assertEqual("metadata (%s)" % ds.metadata_address, ds.subplatform) def test_valid_platform_with_strict_false(self): """Valid platform data should return true with strict_id false.""" ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) ret = ds.get_data() self.assertTrue(ret) def test_unknown_platform_with_strict_true(self): """Unknown platform data with strict_id true should return False.""" - uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" ds = self._setup_ds( - platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) + platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, + sys_cfg={"datasource": {"Ec2": {"strict_id": True}}}, + md={"md": DEFAULT_METADATA}, + ) ret = ds.get_data() self.assertFalse(ret) def test_unknown_platform_with_strict_false(self): """Unknown platform data with strict_id false should return True.""" - uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + uuid = "ab439480-72bf-11d3-91fc-b8aded755F9a" ds = self._setup_ds( - platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + platform_data={"uuid": uuid, "uuid_source": "dmi", "serial": ""}, + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) ret = ds.get_data() self.assertTrue(ret) @@ -682,24 +725,28 @@ class TestEc2(test_helpers.HttprettyTestCase): self.datasource = ec2.DataSourceEc2Local ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) platform_attrs = [ - attr for attr in ec2.CloudNames.__dict__.keys() - if not attr.startswith('__')] + attr + for attr in ec2.CloudNames.__dict__.keys() + if not attr.startswith("__") + ] for attr_name in platform_attrs: platform_name = getattr(ec2.CloudNames, attr_name) - if platform_name != 'aws': + if platform_name != "aws": ds._cloud_name = platform_name ret = ds.get_data() - self.assertEqual('ec2', ds.platform_type) + self.assertEqual("ec2", ds.platform_type) self.assertFalse(ret) message = ( "Local Ec2 mode only supported on ('aws',)," - ' not {0}'.format(platform_name)) + " not {0}".format(platform_name) + ) self.assertIn(message, self.logs.getvalue()) - @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') + @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): """DataSourceEc2Local returns False on BSD. @@ -709,20 +756,23 @@ class TestEc2(test_helpers.HttprettyTestCase): self.datasource = ec2.DataSourceEc2Local ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) ret = ds.get_data() self.assertFalse(ret) self.assertIn( "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue()) - - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') - def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp, - m_fallback_nic, m_net): + self.logs.getvalue(), + ) + + @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network") + @mock.patch("cloudinit.net.find_fallback_nic") + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") + @mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD") + def test_ec2_local_performs_dhcp_on_non_bsd( + self, m_is_bsd, m_dhcp, m_fallback_nic, m_net + ): """Ec2Local returns True for valid platform data on non-BSD with dhcp. DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. @@ -730,31 +780,41 @@ class TestEc2(test_helpers.HttprettyTestCase): When the platform data is valid, return True. """ - m_fallback_nic.return_value = 'eth9' + m_fallback_nic.return_value = "eth9" m_is_bsd.return_value = False - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'broadcast-address': '192.168.2.255'}] + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "broadcast-address": "192.168.2.255", + } + ] self.datasource = ec2.DataSourceEc2Local ds = self._setup_ds( platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) + sys_cfg={"datasource": {"Ec2": {"strict_id": False}}}, + md={"md": DEFAULT_METADATA}, + ) ret = ds.get_data() self.assertTrue(ret) - m_dhcp.assert_called_once_with('eth9', None) + m_dhcp.assert_called_once_with("eth9", None) m_net.assert_called_once_with( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) - self.assertIn('Crawl of metadata service took', self.logs.getvalue()) + broadcast="192.168.2.255", + interface="eth9", + ip="192.168.2.9", + prefix_or_mask="255.255.255.0", + router="192.168.2.1", + static_routes=None, + ) + self.assertIn("Crawl of metadata service took", self.logs.getvalue()) class TestGetSecondaryAddresses(test_helpers.CiTestCase): - mac = '06:17:04:d7:26:ff' + mac = "06:17:04:d7:26:ff" with_logs = True def test_md_with_no_secondary_addresses(self): @@ -764,26 +824,34 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase): def test_md_with_secondary_v4_and_v6_addresses(self): """All secondary addresses are returned from nic metadata""" self.assertEqual( - ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac)) + [ + "172.31.45.70/20", + "2600:1f16:292:100:f152:2222:3333:4444/128", + "2600:1f16:292:100:f153:12a3:c37c:11f9/128", + ], + ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac), + ) def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) - invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected" - invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is" + invalid_cidr_md["subnet-ipv4-cidr-block"] = "something-unexpected" + invalid_cidr_md["subnet-ipv6-cidr-block"] = "not/sure/what/this/is" self.assertEqual( - ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac)) + [ + "172.31.45.70/24", + "2600:1f16:292:100:f152:2222:3333:4444/128", + "2600:1f16:292:100:f153:12a3:c37c:11f9/128", + ], + ec2.get_secondary_addresses(invalid_cidr_md, self.mac), + ) expected_logs = [ "WARNING: Could not parse subnet-ipv4-cidr-block" " something-unexpected for mac 06:17:04:d7:26:ff." " ipv4 network config prefix defaults to /24", "WARNING: Could not parse subnet-ipv6-cidr-block" " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128" + " ipv6 network config prefix defaults to /128", ] logs = self.logs.getvalue() for log in expected_logs: @@ -791,188 +859,267 @@ class TestGetSecondaryAddresses(test_helpers.CiTestCase): class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - def setUp(self): super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = '06:17:04:d7:26:09' + self.mac1 = "06:17:04:d7:26:09" interface_dict = copy.deepcopy( - DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1]) + DEFAULT_METADATA["network"]["interfaces"]["macs"][self.mac1] + ) # These tests are written assuming the base interface doesn't have IPv6 - interface_dict.pop('ipv6s') + interface_dict.pop("ipv6s") self.network_metadata = { - 'interfaces': {'macs': {self.mac1: interface_dict}}} + "interfaces": {"macs": {self.mac1: interface_dict}} + } def test_convert_ec2_metadata_network_config_skips_absent_macs(self): """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'} + macs_to_nics = {self.mac1: "eth9", "DE:AD:BE:EF:FF:FF": "vitualnic2"} # DE:AD:BE:EF:FF:FF represented by OS but not in metadata - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": False, + } + }, + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - self.network_metadata, macs_to_nics)) + self.network_metadata, macs_to_nics + ), + ) def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: 'eth9'} + macs_to_nics = {self.mac1: "eth9"} network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} + nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] + nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" + nic1_metadata.pop("public-ipv4s") + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": True, + } + }, + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics)) + network_metadata_ipv6, macs_to_nics + ), + ) def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: 'eth9'} + macs_to_nics = {self.mac1: "eth9"} network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['local-ipv4s'] = '172.3.3.15' - nic1_metadata.pop('public-ipv4s') - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} + nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] + nic1_metadata["local-ipv4s"] = "172.3.3.15" + nic1_metadata.pop("public-ipv4s") + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": False, + } + }, + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics)) + network_metadata_ipv6, macs_to_nics + ), + ) def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: 'eth9'} + macs_to_nics = {self.mac1: "eth9"} network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['public-ipv4s'] = '' + nic1_metadata = network_metadata_ipv6["interfaces"]["macs"][self.mac1] + nic1_metadata["public-ipv4s"] = "" # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": False, + } + }, + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics, fallback_nic='eth9')) + network_metadata_ipv6, macs_to_nics, fallback_nic="eth9" + ), + ) def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: 'eth9'} + macs_to_nics = {self.mac1: "eth9"} network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') - nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" + nic1_metadata.pop("public-ipv4s") + nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": True, + } + }, + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) + network_metadata_both, macs_to_nics + ), + ) def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): """DHCP route-metric increases on secondary NICs for IPv4 and IPv6.""" - mac2 = '06:17:04:d7:26:08' - macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'} + mac2 = "06:17:04:d7:26:08" + macs_to_nics = {self.mac1: "eth9", mac2: "eth10"} network_metadata_both = copy.deepcopy(self.network_metadata) # Add 2nd nic info - network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg - nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc - expected = {'version': 2, 'ethernets': { - 'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}}, - 'eth10': { - 'match': {'macaddress': mac2}, 'set-name': 'eth10', - 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}, - 'dhcp6': False}}} + network_metadata_both["interfaces"]["macs"][mac2] = NIC2_MD + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" + nic1_metadata.pop("public-ipv4s") # No public-ipv4 IPs in cfg + nic1_metadata["local-ipv4s"] = "10.0.0.42" # Local ipv4 only on vpc + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 100}, + "dhcp6": True, + "dhcp6-overrides": {"route-metric": 100}, + }, + "eth10": { + "match": {"macaddress": mac2}, + "set-name": "eth10", + "dhcp4": True, + "dhcp4-overrides": {"route-metric": 200}, + "dhcp6": False, + }, + }, + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) + network_metadata_both, macs_to_nics + ), + ) def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: 'eth9'} + macs_to_nics = {self.mac1: "eth9"} network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} + nic1_metadata = network_metadata_both["interfaces"]["macs"][self.mac1] + nic1_metadata["ipv6s"] = "2620:0:1009:fd00:e442:c88d:c04d:dc85/64" + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": True, + } + }, + } self.assertEqual( expected, ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) + network_metadata_both, macs_to_nics + ), + ) def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, - 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' + expected = { + "version": 2, + "ethernets": { + "eth9": { + "match": {"macaddress": self.mac1}, + "set-name": "eth9", + "dhcp4": True, + "dhcp6": False, + } + }, + } + patch_path = M_PATH_NET + "get_interfaces_by_mac" with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'} + m_get_interfaces_by_mac.return_value = {self.mac1: "eth9"} self.assertEqual( expected, - ec2.convert_ec2_metadata_network_config(self.network_metadata)) + ec2.convert_ec2_metadata_network_config(self.network_metadata), + ) class TesIdentifyPlatform(test_helpers.CiTestCase): - def collmock(self, **kwargs): """return non-special _collect_platform_data updated with changes.""" unspecial = { - 'asset_tag': '3857-0037-2746-7462-1818-3997-77', - 'serial': 'H23-C4J3JV-R6', - 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', - 'uuid_source': 'dmi', - 'vendor': 'tothecloud', + "asset_tag": "3857-0037-2746-7462-1818-3997-77", + "serial": "H23-C4J3JV-R6", + "uuid": "81c7e555-6471-4833-9551-1ab366c4cfd2", + "uuid_source": "dmi", + "vendor": "tothecloud", } unspecial.update(**kwargs) return unspecial - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_zstack(self, m_collect): - """zstack should be identified if chassis-asset-tag ends in .zstack.io + """zstack should be identified if chassis-asset-tag + ends in .zstack.io """ - m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + m_collect.return_value = self.collmock(asset_tag="123456.zstack.io") self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_zstack_full_domain_only(self, m_collect): - """zstack asset-tag matching should match only on full domain boundary. + """zstack asset-tag matching should match only on + full domain boundary. """ - m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + m_collect.return_value = self.collmock(asset_tag="123456.buzzstack.io") self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_e24cloud(self, m_collect): """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor='e24cloud') + m_collect.return_value = self.collmock(vendor="e24cloud") self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + @mock.patch("cloudinit.sources.DataSourceEc2._collect_platform_data") def test_identify_e24cloud_negative(self, m_collect): """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor='e24cloudyday') + m_collect.return_value = self.collmock(vendor="e24cloudyday") self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py index b0ffb7a5..591256d8 100644 --- a/tests/unittests/sources/test_exoscale.py +++ b/tests/unittests/sources/test_exoscale.py @@ -2,36 +2,33 @@ # Author: Christopher Glass <christopher.glass@exoscale.com> # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import helpers +import os + +import httpretty +import requests + +from cloudinit import helpers, util from cloudinit.sources.DataSourceExoscale import ( API_VERSION, - DataSourceExoscale, METADATA_URL, - get_password, PASSWORD_SERVER_PORT, - read_metadata) + DataSourceExoscale, + get_password, + read_metadata, +) from tests.unittests.helpers import HttprettyTestCase, mock -from cloudinit import util -import httpretty -import os -import requests +TEST_PASSWORD_URL = "{}:{}/{}/".format( + METADATA_URL, PASSWORD_SERVER_PORT, API_VERSION +) +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, API_VERSION) -TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, - PASSWORD_SERVER_PORT, - API_VERSION) - -TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, - API_VERSION) - -TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, - API_VERSION) +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, API_VERSION) @httpretty.activate class TestDatasourceExoscale(HttprettyTestCase): - def setUp(self): super(TestDatasourceExoscale, self).setUp() self.tmp = self.tmp_dir() @@ -42,37 +39,35 @@ class TestDatasourceExoscale(HttprettyTestCase): def test_password_saved(self): """The password is not set when it is not found in the metadata service.""" - httpretty.register_uri(httpretty.GET, - self.password_url, - body="saved_password") + httpretty.register_uri( + httpretty.GET, self.password_url, body="saved_password" + ) self.assertFalse(get_password()) def test_password_empty(self): """No password is set if the metadata service returns an empty string.""" - httpretty.register_uri(httpretty.GET, - self.password_url, - body="") + httpretty.register_uri(httpretty.GET, self.password_url, body="") self.assertFalse(get_password()) def test_password(self): """The password is set to what is found in the metadata service.""" expected_password = "p@ssw0rd" - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_password) + httpretty.register_uri( + httpretty.GET, self.password_url, body=expected_password + ) password = get_password() self.assertEqual(expected_password, password) def test_activate_removes_set_passwords_semaphore(self): """Allow set_passwords to run every boot by removing the semaphore.""" - path = helpers.Paths({'cloud_dir': self.tmp}) - sem_dir = self.tmp_path('instance/sem', dir=self.tmp) + path = helpers.Paths({"cloud_dir": self.tmp}) + sem_dir = self.tmp_path("instance/sem", dir=self.tmp) util.ensure_dir(sem_dir) - sem_file = os.path.join(sem_dir, 'config_set_passwords') - with open(sem_file, 'w') as stream: - stream.write('') + sem_file = os.path.join(sem_dir, "config_set_passwords") + with open(sem_file, "w") as stream: + stream.write("") ds = DataSourceExoscale({}, None, path) ds.activate(None, None) self.assertFalse(os.path.exists(sem_file)) @@ -80,102 +75,130 @@ class TestDatasourceExoscale(HttprettyTestCase): def test_get_data(self): """The datasource conforms to expected behavior when supplied full test data.""" - path = helpers.Paths({'run_dir': self.tmp}) + path = helpers.Paths({"run_dir": self.tmp}) ds = DataSourceExoscale({}, None, path) ds._is_platform_viable = lambda: True expected_password = "p@ssw0rd" expected_id = "12345" expected_hostname = "myname" expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_password) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) + httpretty.register_uri( + httpretty.GET, self.userdata_url, body=expected_userdata + ) + httpretty.register_uri( + httpretty.GET, self.password_url, body=expected_password + ) + httpretty.register_uri( + httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname", + ) + httpretty.register_uri( + httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname, + ) + httpretty.register_uri( + httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id, + ) self.assertTrue(ds._get_data()) self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'ssh_pwauth': True, - 'password': expected_password, - 'chpasswd': { - 'expire': False, - }}) + self.assertEqual( + ds.metadata, + {"instance-id": expected_id, "local-hostname": expected_hostname}, + ) + self.assertEqual( + ds.get_config_obj(), + { + "ssh_pwauth": True, + "password": expected_password, + "chpasswd": { + "expire": False, + }, + }, + ) def test_get_data_saved_password(self): """The datasource conforms to expected behavior when saved_password is returned by the password server.""" - path = helpers.Paths({'run_dir': self.tmp}) + path = helpers.Paths({"run_dir": self.tmp}) ds = DataSourceExoscale({}, None, path) ds._is_platform_viable = lambda: True expected_answer = "saved_password" expected_id = "12345" expected_hostname = "myname" expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_answer) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) + httpretty.register_uri( + httpretty.GET, self.userdata_url, body=expected_userdata + ) + httpretty.register_uri( + httpretty.GET, self.password_url, body=expected_answer + ) + httpretty.register_uri( + httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname", + ) + httpretty.register_uri( + httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname, + ) + httpretty.register_uri( + httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id, + ) self.assertTrue(ds._get_data()) self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) + self.assertEqual( + ds.metadata, + {"instance-id": expected_id, "local-hostname": expected_hostname}, + ) self.assertEqual(ds.get_config_obj(), {}) def test_get_data_no_password(self): """The datasource conforms to expected behavior when no password is returned by the password server.""" - path = helpers.Paths({'run_dir': self.tmp}) + path = helpers.Paths({"run_dir": self.tmp}) ds = DataSourceExoscale({}, None, path) ds._is_platform_viable = lambda: True expected_answer = "" expected_id = "12345" expected_hostname = "myname" expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_answer) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) + httpretty.register_uri( + httpretty.GET, self.userdata_url, body=expected_userdata + ) + httpretty.register_uri( + httpretty.GET, self.password_url, body=expected_answer + ) + httpretty.register_uri( + httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname", + ) + httpretty.register_uri( + httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname, + ) + httpretty.register_uri( + httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id, + ) self.assertTrue(ds._get_data()) self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) + self.assertEqual( + ds.metadata, + {"instance-id": expected_id, "local-hostname": expected_hostname}, + ) self.assertEqual(ds.get_config_obj(), {}) - @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + @mock.patch("cloudinit.sources.DataSourceExoscale.get_password") def test_read_metadata_when_password_server_unreachable(self, m_password): """The read_metadata function returns partial results in case the password server (only) is unreachable.""" @@ -183,29 +206,36 @@ class TestDatasourceExoscale(HttprettyTestCase): expected_hostname = "myname" expected_userdata = "#cloud-config" - m_password.side_effect = requests.Timeout('Fake Connection Timeout') - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) + m_password.side_effect = requests.Timeout("Fake Connection Timeout") + httpretty.register_uri( + httpretty.GET, self.userdata_url, body=expected_userdata + ) + httpretty.register_uri( + httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname", + ) + httpretty.register_uri( + httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname, + ) + httpretty.register_uri( + httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id, + ) result = read_metadata() self.assertIsNone(result.get("password")) - self.assertEqual(result.get("user-data").decode("utf-8"), - expected_userdata) + self.assertEqual( + result.get("user-data").decode("utf-8"), expected_userdata + ) def test_non_viable_platform(self): """The datasource fails fast when the platform is not viable.""" - path = helpers.Paths({'run_dir': self.tmp}) + path = helpers.Paths({"run_dir": self.tmp}) ds = DataSourceExoscale({}, None, path) ds._is_platform_viable = lambda: False self.assertFalse(ds._get_data()) diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py index dc768e99..e030931b 100644 --- a/tests/unittests/sources/test_gce.py +++ b/tests/unittests/sources/test_gce.py @@ -5,58 +5,57 @@ # This file is part of cloud-init. See LICENSE file for license information. import datetime -import httpretty import json import re +from base64 import b64decode, b64encode from unittest import mock from urllib.parse import urlparse -from base64 import b64encode, b64decode +import httpretty -from cloudinit import distros -from cloudinit import helpers -from cloudinit import settings +from cloudinit import distros, helpers, settings from cloudinit.sources import DataSourceGCE - from tests.unittests import helpers as test_helpers - GCE_META = { - 'instance/id': '123', - 'instance/zone': 'foo/bar', - 'instance/hostname': 'server.project-foo.local', + "instance/id": "123", + "instance/zone": "foo/bar", + "instance/hostname": "server.project-foo.local", } GCE_META_PARTIAL = { - 'instance/id': '1234', - 'instance/hostname': 'server.project-bar.local', - 'instance/zone': 'bar/baz', + "instance/id": "1234", + "instance/hostname": "server.project-bar.local", + "instance/zone": "bar/baz", } GCE_META_ENCODING = { - 'instance/id': '12345', - 'instance/hostname': 'server.project-baz.local', - 'instance/zone': 'baz/bang', - 'instance/attributes': { - 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'), - 'user-data-encoding': 'base64', - } + "instance/id": "12345", + "instance/hostname": "server.project-baz.local", + "instance/zone": "baz/bang", + "instance/attributes": { + "user-data": b64encode(b"#!/bin/echo baz\n").decode("utf-8"), + "user-data-encoding": "base64", + }, } GCE_USER_DATA_TEXT = { - 'instance/id': '12345', - 'instance/hostname': 'server.project-baz.local', - 'instance/zone': 'baz/bang', - 'instance/attributes': { - 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n', - } + "instance/id": "12345", + "instance/hostname": "server.project-baz.local", + "instance/zone": "baz/bang", + "instance/attributes": { + "user-data": "#!/bin/sh\necho hi mom\ntouch /run/up-now\n", + }, } -HEADERS = {'Metadata-Flavor': 'Google'} +HEADERS = {"Metadata-Flavor": "Google"} MD_URL_RE = re.compile( - r'http://metadata.google.internal/computeMetadata/v1/.*') -GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' - 'v1/instance/guest-attributes/hostkeys/') + r"http://metadata.google.internal/computeMetadata/v1/.*" +) +GUEST_ATTRIBUTES_URL = ( + "http://metadata.google.internal/computeMetadata/" + "v1/instance/guest-attributes/hostkeys/" +) def _set_mock_metadata(gce_meta=None): @@ -65,10 +64,10 @@ def _set_mock_metadata(gce_meta=None): def _request_callback(method, uri, headers): url_path = urlparse(uri).path - if url_path.startswith('/computeMetadata/v1/'): - path = url_path.split('/computeMetadata/v1/')[1:][0] - recursive = path.endswith('/') - path = path.rstrip('/') + if url_path.startswith("/computeMetadata/v1/"): + path = url_path.split("/computeMetadata/v1/")[1:][0] + recursive = path.endswith("/") + path = path.rstrip("/") else: path = None if path in gce_meta: @@ -77,7 +76,7 @@ def _set_mock_metadata(gce_meta=None): response = json.dumps(response) return (200, headers, response) else: - return (404, headers, '') + return (404, headers, "") # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) @@ -85,28 +84,28 @@ def _set_mock_metadata(gce_meta=None): @httpretty.activate class TestDataSourceGCE(test_helpers.HttprettyTestCase): - def _make_distro(self, dtype, def_user=None): cfg = dict(settings.CFG_BUILTIN) - cfg['system_info']['distro'] = dtype - paths = helpers.Paths(cfg['system_info']['paths']) + cfg["system_info"]["distro"] = dtype + paths = helpers.Paths(cfg["system_info"]["paths"]) distro_cls = distros.fetch(dtype) if def_user: - cfg['system_info']['default_user'] = def_user.copy() - distro = distro_cls(dtype, cfg['system_info'], paths) + cfg["system_info"]["default_user"] = def_user.copy() + distro = distro_cls(dtype, cfg["system_info"], paths) return distro def setUp(self): tmp = self.tmp_dir() self.ds = DataSourceGCE.DataSourceGCE( - settings.CFG_BUILTIN, None, - helpers.Paths({'run_dir': tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp}) + ) ppatch = self.m_platform_reports_gce = mock.patch( - 'cloudinit.sources.DataSourceGCE.platform_reports_gce') + "cloudinit.sources.DataSourceGCE.platform_reports_gce" + ) self.m_platform_reports_gce = ppatch.start() self.m_platform_reports_gce.return_value = True self.addCleanup(ppatch.stop) - self.add_patch('time.sleep', 'm_sleep') # just to speed up tests + self.add_patch("time.sleep", "m_sleep") # just to speed up tests super(TestDataSourceGCE, self).setUp() def test_connection(self): @@ -121,30 +120,33 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): def test_metadata(self): # UnicodeDecodeError if set to ds.userdata instead of userdata_raw meta = GCE_META.copy() - meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' + meta["instance/attributes/user-data"] = b"/bin/echo \xff\n" _set_mock_metadata() self.ds.get_data() - shostname = GCE_META.get('instance/hostname').split('.')[0] - self.assertEqual(shostname, - self.ds.get_hostname()) + shostname = GCE_META.get("instance/hostname").split(".")[0] + self.assertEqual(shostname, self.ds.get_hostname()) - self.assertEqual(GCE_META.get('instance/id'), - self.ds.get_instance_id()) + self.assertEqual( + GCE_META.get("instance/id"), self.ds.get_instance_id() + ) - self.assertEqual(GCE_META.get('instance/attributes/user-data'), - self.ds.get_userdata_raw()) + self.assertEqual( + GCE_META.get("instance/attributes/user-data"), + self.ds.get_userdata_raw(), + ) # test partial metadata (missing user-data in particular) def test_metadata_partial(self): _set_mock_metadata(GCE_META_PARTIAL) self.ds.get_data() - self.assertEqual(GCE_META_PARTIAL.get('instance/id'), - self.ds.get_instance_id()) + self.assertEqual( + GCE_META_PARTIAL.get("instance/id"), self.ds.get_instance_id() + ) - shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0] + shostname = GCE_META_PARTIAL.get("instance/hostname").split(".")[0] self.assertEqual(shostname, self.ds.get_hostname()) def test_userdata_no_encoding(self): @@ -152,21 +154,25 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): _set_mock_metadata(GCE_USER_DATA_TEXT) self.ds.get_data() self.assertEqual( - GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(), - self.ds.get_userdata_raw()) + GCE_USER_DATA_TEXT["instance/attributes"]["user-data"].encode(), + self.ds.get_userdata_raw(), + ) def test_metadata_encoding(self): """user-data is base64 encoded if user-data-encoding is 'base64'.""" _set_mock_metadata(GCE_META_ENCODING) self.ds.get_data() - instance_data = GCE_META_ENCODING.get('instance/attributes') - decoded = b64decode(instance_data.get('user-data')) + instance_data = GCE_META_ENCODING.get("instance/attributes") + decoded = b64decode(instance_data.get("user-data")) self.assertEqual(decoded, self.ds.get_userdata_raw()) def test_missing_required_keys_return_false(self): - for required_key in ['instance/id', 'instance/zone', - 'instance/hostname']: + for required_key in [ + "instance/id", + "instance/zone", + "instance/hostname", + ]: meta = GCE_META_PARTIAL.copy() del meta[required_key] _set_mock_metadata(meta) @@ -179,29 +185,35 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual([], self.ds.get_public_ssh_keys()) def test_cloudinit_ssh_keys(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' + valid_key = "ssh-rsa VALID {0}" + invalid_key = "ssh-rsa INVALID {0}" project_attributes = { - 'sshKeys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(0)), - 'user:{0}'.format(invalid_key.format(0)), - ]), - 'ssh-keys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(1)), - 'user:{0}'.format(invalid_key.format(1)), - ]), + "sshKeys": "\n".join( + [ + "cloudinit:{0}".format(valid_key.format(0)), + "user:{0}".format(invalid_key.format(0)), + ] + ), + "ssh-keys": "\n".join( + [ + "cloudinit:{0}".format(valid_key.format(1)), + "user:{0}".format(invalid_key.format(1)), + ] + ), } instance_attributes = { - 'ssh-keys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(2)), - 'user:{0}'.format(invalid_key.format(2)), - ]), - 'block-project-ssh-keys': 'False', + "ssh-keys": "\n".join( + [ + "cloudinit:{0}".format(valid_key.format(2)), + "user:{0}".format(invalid_key.format(2)), + ] + ), + "block-project-ssh-keys": "False", } meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes + meta["project/attributes"] = project_attributes + meta["instance/attributes"] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() @@ -212,34 +224,42 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") def test_default_user_ssh_keys(self, mock_ug_util): mock_ug_util.normalize_users_groups.return_value = None, None - mock_ug_util.extract_default.return_value = 'ubuntu', None + mock_ug_util.extract_default.return_value = "ubuntu", None ubuntu_ds = DataSourceGCE.DataSourceGCE( - settings.CFG_BUILTIN, self._make_distro('ubuntu'), - helpers.Paths({'run_dir': self.tmp_dir()})) + settings.CFG_BUILTIN, + self._make_distro("ubuntu"), + helpers.Paths({"run_dir": self.tmp_dir()}), + ) - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' + valid_key = "ssh-rsa VALID {0}" + invalid_key = "ssh-rsa INVALID {0}" project_attributes = { - 'sshKeys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(0)), - 'user:{0}'.format(invalid_key.format(0)), - ]), - 'ssh-keys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(1)), - 'user:{0}'.format(invalid_key.format(1)), - ]), + "sshKeys": "\n".join( + [ + "ubuntu:{0}".format(valid_key.format(0)), + "user:{0}".format(invalid_key.format(0)), + ] + ), + "ssh-keys": "\n".join( + [ + "ubuntu:{0}".format(valid_key.format(1)), + "user:{0}".format(invalid_key.format(1)), + ] + ), } instance_attributes = { - 'ssh-keys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(2)), - 'user:{0}'.format(invalid_key.format(2)), - ]), - 'block-project-ssh-keys': 'False', + "ssh-keys": "\n".join( + [ + "ubuntu:{0}".format(valid_key.format(2)), + "user:{0}".format(invalid_key.format(2)), + ] + ), + "block-project-ssh-keys": "False", } meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes + meta["project/attributes"] = project_attributes + meta["instance/attributes"] = instance_attributes _set_mock_metadata(meta) ubuntu_ds.get_data() @@ -248,21 +268,21 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) def test_instance_ssh_keys_override(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' + valid_key = "ssh-rsa VALID {0}" + invalid_key = "ssh-rsa INVALID {0}" project_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)), + "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)), } instance_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), - 'block-project-ssh-keys': 'False', + "sshKeys": "cloudinit:{0}".format(valid_key.format(0)), + "ssh-keys": "cloudinit:{0}".format(valid_key.format(1)), + "block-project-ssh-keys": "False", } meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes + meta["project/attributes"] = project_attributes + meta["instance/attributes"] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() @@ -271,20 +291,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) def test_block_project_ssh_keys_override(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' + valid_key = "ssh-rsa VALID {0}" + invalid_key = "ssh-rsa INVALID {0}" project_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + "sshKeys": "cloudinit:{0}".format(invalid_key.format(0)), + "ssh-keys": "cloudinit:{0}".format(invalid_key.format(1)), } instance_attributes = { - 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), - 'block-project-ssh-keys': 'True', + "ssh-keys": "cloudinit:{0}".format(valid_key.format(0)), + "block-project-ssh-keys": "True", } meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes + meta["project/attributes"] = project_attributes + meta["instance/attributes"] = instance_attributes _set_mock_metadata(meta) self.ds.get_data() @@ -296,7 +316,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): _set_mock_metadata() r = self.ds.get_data() self.assertEqual(True, r) - self.assertEqual('bar', self.ds.availability_zone) + self.assertEqual("bar", self.ds.availability_zone) @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher") def test_get_data_returns_false_if_not_on_gce(self, m_fetcher): @@ -306,9 +326,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): m_fetcher.assert_not_called() def test_has_expired(self): - def _get_timestamp(days): - format_str = '%Y-%m-%dT%H:%M:%S+0000' + format_str = "%Y-%m-%dT%H:%M:%S+0000" today = datetime.datetime.now() timestamp = today + datetime.timedelta(days=days) return timestamp.strftime(format_str) @@ -317,12 +336,12 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): future = _get_timestamp(1) ssh_keys = { None: False, - '': False, - 'Invalid': False, - 'user:ssh-rsa key user@domain.com': False, + "": False, + "Invalid": False, + "user:ssh-rsa key user@domain.com": False, 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, - 'user:ssh-rsa key google-ssh': False, - 'user:ssh-rsa key google-ssh {invalid:json}': False, + "user:ssh-rsa key google-ssh": False, + "user:ssh-rsa key google-ssh {invalid:json}": False, 'user:ssh-rsa key google-ssh {"userName":"user"}': False, 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, @@ -334,28 +353,36 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): def test_parse_public_keys_non_ascii(self): public_key_data = [ - 'cloudinit:rsa ssh-ke%s invalid' % chr(165), - 'use%sname:rsa ssh-key' % chr(174), - 'cloudinit:test 1', - 'default:test 2', - 'user:test 3', + "cloudinit:rsa ssh-ke%s invalid" % chr(165), + "use%sname:rsa ssh-key" % chr(174), + "cloudinit:test 1", + "default:test 2", + "user:test 3", ] - expected = ['test 1', 'test 2'] + expected = ["test 1", "test 2"] found = DataSourceGCE._parse_public_keys( - public_key_data, default_user='default') + public_key_data, default_user="default" + ) self.assertEqual(sorted(found), sorted(expected)) @mock.patch("cloudinit.url_helper.readurl") def test_publish_host_keys(self, m_readurl): - hostkeys = [('ssh-rsa', 'asdfasdf'), - ('ssh-ed25519', 'qwerqwer')] + hostkeys = [("ssh-rsa", "asdfasdf"), ("ssh-ed25519", "qwerqwer")] readurl_expected_calls = [ - mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, - request_method='PUT', - url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), - mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, - request_method='PUT', - url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), + mock.call( + check_status=False, + data=b"asdfasdf", + headers=HEADERS, + request_method="PUT", + url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-rsa"), + ), + mock.call( + check_status=False, + data=b"qwerqwer", + headers=HEADERS, + request_method="PUT", + url="%s%s" % (GUEST_ATTRIBUTES_URL, "ssh-ed25519"), + ), ] self.ds.publish_host_keys(hostkeys) m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) @@ -385,4 +412,5 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): ds._get_data() assert m_dhcp.call_count == 0 + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py index 5af0f3db..9e70de34 100644 --- a/tests/unittests/sources/test_hetzner.py +++ b/tests/unittests/sources/test_hetzner.py @@ -4,16 +4,17 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.sources import DataSourceHetzner -import cloudinit.sources.helpers.hetzner as hc_helper -from cloudinit import util, settings, helpers - -from tests.unittests.helpers import mock, CiTestCase - import base64 + import pytest -METADATA = util.load_yaml(""" +import cloudinit.sources.helpers.hetzner as hc_helper +from cloudinit import helpers, settings, util +from cloudinit.sources import DataSourceHetzner +from tests.unittests.helpers import CiTestCase, mock + +METADATA = util.load_yaml( + """ hostname: cloudinit-test instance-id: 123456 local-ipv4: '' @@ -52,7 +53,8 @@ public-keys: AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \ test-key@workstation vendor_data: "test" -""") +""" +) USERDATA = b"""#cloud-config runcmd: @@ -64,55 +66,59 @@ class TestDataSourceHetzner(CiTestCase): """ Test reading the meta-data """ + def setUp(self): super(TestDataSourceHetzner, self).setUp() self.tmp = self.tmp_dir() def get_ds(self): ds = DataSourceHetzner.DataSourceHetzner( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) return ds - @mock.patch('cloudinit.net.EphemeralIPv4Network') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') - @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') - @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') - def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd, - m_fallback_nic, m_net): - m_get_hcloud_data.return_value = (True, - str(METADATA.get('instance-id'))) + @mock.patch("cloudinit.net.EphemeralIPv4Network") + @mock.patch("cloudinit.net.find_fallback_nic") + @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata") + @mock.patch("cloudinit.sources.helpers.hetzner.read_userdata") + @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data") + def test_read_data( + self, m_get_hcloud_data, m_usermd, m_readmd, m_fallback_nic, m_net + ): + m_get_hcloud_data.return_value = ( + True, + str(METADATA.get("instance-id")), + ) m_readmd.return_value = METADATA.copy() m_usermd.return_value = USERDATA - m_fallback_nic.return_value = 'eth0' + m_fallback_nic.return_value = "eth0" ds = self.get_ds() ret = ds.get_data() self.assertTrue(ret) m_net.assert_called_once_with( - 'eth0', '169.254.0.1', - 16, '169.254.255.255' + "eth0", "169.254.0.1", 16, "169.254.255.255" ) self.assertTrue(m_readmd.called) - self.assertEqual(METADATA.get('hostname'), ds.get_hostname()) + self.assertEqual(METADATA.get("hostname"), ds.get_hostname()) - self.assertEqual(METADATA.get('public-keys'), - ds.get_public_ssh_keys()) + self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys()) self.assertIsInstance(ds.get_public_ssh_keys(), list) self.assertEqual(ds.get_userdata_raw(), USERDATA) - self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) - - @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') - def test_not_on_hetzner_returns_false(self, m_get_hcloud_data, - m_find_fallback, m_read_md): + self.assertEqual(ds.get_vendordata_raw(), METADATA.get("vendor_data")) + + @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata") + @mock.patch("cloudinit.net.find_fallback_nic") + @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data") + def test_not_on_hetzner_returns_false( + self, m_get_hcloud_data, m_find_fallback, m_read_md + ): """If helper 'get_hcloud_data' returns False, - return False from get_data.""" + return False from get_data.""" m_get_hcloud_data.return_value = (False, None) ds = self.get_ds() ret = ds.get_data() @@ -132,11 +138,14 @@ class TestMaybeB64Decode: with pytest.raises(TypeError): hc_helper.maybe_b64decode(invalid_input) - @pytest.mark.parametrize("in_data,expected", [ - # If data is not b64 encoded, then return value should be the same. - (b"this is my data", b"this is my data"), - # If data is b64 encoded, then return value should be decoded. - (base64.b64encode(b"data"), b"data"), - ]) + @pytest.mark.parametrize( + "in_data,expected", + [ + # If data is not b64 encoded, then return value should be the same. + (b"this is my data", b"this is my data"), + # If data is b64 encoded, then return value should be decoded. + (base64.b64encode(b"data"), b"data"), + ], + ) def test_happy_path(self, in_data, expected): assert expected == hc_helper.maybe_b64decode(in_data) diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py index 38e8e892..17a8be64 100644 --- a/tests/unittests/sources/test_ibmcloud.py +++ b/tests/unittests/sources/test_ibmcloud.py @@ -1,15 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.helpers import Paths -from cloudinit.sources import DataSourceIBMCloud as ibm -from tests.unittests import helpers as test_helpers -from cloudinit import util - import base64 import copy import json from textwrap import dedent +from cloudinit import util +from cloudinit.helpers import Paths +from cloudinit.sources import DataSourceIBMCloud as ibm +from tests.unittests import helpers as test_helpers + mock = test_helpers.mock D_PATH = "cloudinit.sources.DataSourceIBMCloud." @@ -23,24 +23,36 @@ class TestGetIBMPlatform(test_helpers.CiTestCase): blkid_base = { "/dev/xvda1": { - "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", - "TYPE": "ext3"}, + "DEVNAME": "/dev/xvda1", + "LABEL": "cloudimg-bootfs", + "TYPE": "ext3", + }, "/dev/xvda2": { - "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", - "TYPE": "ext4"}, + "DEVNAME": "/dev/xvda2", + "LABEL": "cloudimg-rootfs", + "TYPE": "ext4", + }, } blkid_metadata_disk = { "/dev/xvdh1": { - "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", - "SEC_TYPE": "msdos", "UUID": "681B-8C5D", - "PARTUUID": "3d631e09-01"}, + "DEVNAME": "/dev/xvdh1", + "LABEL": "METADATA", + "TYPE": "vfat", + "SEC_TYPE": "msdos", + "UUID": "681B-8C5D", + "PARTUUID": "3d631e09-01", + }, } blkid_oscode_disk = { "/dev/xvdh": { - "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", - "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} + "DEVNAME": "/dev/xvdh", + "LABEL": "config-2", + "TYPE": "vfat", + "SEC_TYPE": "msdos", + "UUID": ibm.IBM_CONFIG_UUID, + } } def setUp(self): @@ -56,7 +68,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase): m_is_prov.return_value = False self.assertEqual( (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), - ibm.get_ibm_platform()) + ibm.get_ibm_platform(), + ) def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): """identify TEMPLATE_PROVISIONING_METADATA.""" @@ -64,7 +77,8 @@ class TestGetIBMPlatform(test_helpers.CiTestCase): m_is_prov.return_value = True self.assertEqual( (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), - ibm.get_ibm_platform()) + ibm.get_ibm_platform(), + ) def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): """identify TEMPLATE_PROVISIONING_NODATA.""" @@ -72,14 +86,16 @@ class TestGetIBMPlatform(test_helpers.CiTestCase): m_is_prov.return_value = True self.assertEqual( (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), - ibm.get_ibm_platform()) + ibm.get_ibm_platform(), + ) def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): """Identify OS_CODE.""" m_blkid.return_value = self.blkid_oscode m_is_prov.return_value = False - self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), - ibm.get_ibm_platform()) + self.assertEqual( + (ibm.Platforms.OS_CODE, "/dev/xvdh"), ibm.get_ibm_platform() + ) def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): """Test against false positive on openstack with non-ibm UUID.""" @@ -116,7 +132,8 @@ class TestReadMD(test_helpers.CiTestCase): "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, } - content_interfaces = dedent("""\ + content_interfaces = dedent( + """\ auto lo iface lo inet loopback @@ -125,71 +142,107 @@ class TestReadMD(test_helpers.CiTestCase): iface eth0 inet static address 10.82.43.5 netmask 255.255.255.192 - """) + """ + ) userdata = b"#!/bin/sh\necho hi mom\n" # meta.js file gets json encoded userdata as a list. meta_js = '["#!/bin/sh\necho hi mom\n"]' vendor_data = { - "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} + "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e" + } network_data = { "links": [ - {"id": "interface_29402281", "name": "eth0", "mtu": None, - "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, - {"id": "interface_29402279", "name": "eth1", "mtu": None, - "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} + { + "id": "interface_29402281", + "name": "eth0", + "mtu": None, + "type": "phy", + "ethernet_mac_address": "06:00:f1:bd:da:25", + }, + { + "id": "interface_29402279", + "name": "eth1", + "mtu": None, + "type": "phy", + "ethernet_mac_address": "06:98:5e:d0:7f:86", + }, ], "networks": [ - {"id": "network_109887563", "link": "interface_29402281", - "type": "ipv4", "ip_address": "10.82.43.2", - "netmask": "255.255.255.192", - "routes": [ - {"network": "10.0.0.0", "netmask": "255.0.0.0", - "gateway": "10.82.43.1"}, - {"network": "161.26.0.0", "netmask": "255.255.0.0", - "gateway": "10.82.43.1"}]}, - {"id": "network_109887551", "link": "interface_29402279", - "type": "ipv4", "ip_address": "108.168.194.252", - "netmask": "255.255.255.248", - "routes": [ - {"network": "0.0.0.0", "netmask": "0.0.0.0", - "gateway": "108.168.194.249"}]} + { + "id": "network_109887563", + "link": "interface_29402281", + "type": "ipv4", + "ip_address": "10.82.43.2", + "netmask": "255.255.255.192", + "routes": [ + { + "network": "10.0.0.0", + "netmask": "255.0.0.0", + "gateway": "10.82.43.1", + }, + { + "network": "161.26.0.0", + "netmask": "255.255.0.0", + "gateway": "10.82.43.1", + }, + ], + }, + { + "id": "network_109887551", + "link": "interface_29402279", + "type": "ipv4", + "ip_address": "108.168.194.252", + "netmask": "255.255.255.248", + "routes": [ + { + "network": "0.0.0.0", + "netmask": "0.0.0.0", + "gateway": "108.168.194.249", + } + ], + }, ], "services": [ {"type": "dns", "address": "10.0.80.11"}, - {"type": "dns", "address": "10.0.80.12"} + {"type": "dns", "address": "10.0.80.12"}, ], } - sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' + sysuuid = "7f79ebf5-d791-43c3-a723-854e8389d59f" def _get_expected_metadata(self, os_md): """return expected 'metadata' for data loaded from meta_data.json.""" os_md = copy.deepcopy(os_md) renames = ( - ('hostname', 'local-hostname'), - ('uuid', 'instance-id'), - ('public_keys', 'public-keys')) + ("hostname", "local-hostname"), + ("uuid", "instance-id"), + ("public_keys", "public-keys"), + ) ret = {} for osname, mdname in renames: if osname in os_md: ret[mdname] = os_md[osname] - if 'random_seed' in os_md: - ret['random_seed'] = base64.b64decode(os_md['random_seed']) + if "random_seed" in os_md: + ret["random_seed"] = base64.b64decode(os_md["random_seed"]) return ret def test_provisioning_md(self, m_platform, m_sysuuid): """Provisioning env with a metadata disk should return None.""" m_platform.return_value = ( - ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") + ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, + "/dev/xvdh", + ) self.assertIsNone(ibm.read_md()) def test_provisioning_no_metadata(self, m_platform, m_sysuuid): """Provisioning env with no metadata disk should return None.""" m_platform.return_value = ( - ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) + ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, + None, + ) self.assertIsNone(ibm.read_md()) def test_provisioning_not_ibm(self, m_platform, m_sysuuid): @@ -201,62 +254,83 @@ class TestReadMD(test_helpers.CiTestCase): """Template live environment should be identified.""" tmpdir = self.tmp_dir() m_platform.return_value = ( - ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) + ibm.Platforms.TEMPLATE_LIVE_METADATA, + tmpdir, + ) m_sysuuid.return_value = self.sysuuid - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.template_md), - 'openstack/latest/user_data': self.userdata, - 'openstack/content/interfaces': self.content_interfaces, - 'meta.js': self.meta_js}) + test_helpers.populate_dir( + tmpdir, + { + "openstack/latest/meta_data.json": json.dumps( + self.template_md + ), + "openstack/latest/user_data": self.userdata, + "openstack/content/interfaces": self.content_interfaces, + "meta.js": self.meta_js, + }, + ) ret = ibm.read_md() - self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, - ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertEqual(self.userdata, ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.template_md), - ret['metadata']) - self.assertEqual(self.sysuuid, ret['system-uuid']) + self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"]) + self.assertEqual(tmpdir, ret["source"]) + self.assertEqual(self.userdata, ret["userdata"]) + self.assertEqual( + self._get_expected_metadata(self.template_md), ret["metadata"] + ) + self.assertEqual(self.sysuuid, ret["system-uuid"]) def test_os_code_live(self, m_platform, m_sysuuid): """Verify an os_code metadata path.""" tmpdir = self.tmp_dir() m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) netdata = json.dumps(self.network_data) - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), - 'openstack/latest/user_data': self.userdata, - 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), - 'openstack/latest/network_data.json': netdata, - }) + test_helpers.populate_dir( + tmpdir, + { + "openstack/latest/meta_data.json": json.dumps(self.oscode_md), + "openstack/latest/user_data": self.userdata, + "openstack/latest/vendor_data.json": json.dumps( + self.vendor_data + ), + "openstack/latest/network_data.json": netdata, + }, + ) ret = ibm.read_md() - self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertEqual(self.userdata, ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.oscode_md), - ret['metadata']) + self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"]) + self.assertEqual(tmpdir, ret["source"]) + self.assertEqual(self.userdata, ret["userdata"]) + self.assertEqual( + self._get_expected_metadata(self.oscode_md), ret["metadata"] + ) def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): """Verify os_code without user-data.""" tmpdir = self.tmp_dir() m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), - 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), - }) + test_helpers.populate_dir( + tmpdir, + { + "openstack/latest/meta_data.json": json.dumps(self.oscode_md), + "openstack/latest/vendor_data.json": json.dumps( + self.vendor_data + ), + }, + ) ret = ibm.read_md() - self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertIsNone(ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.oscode_md), - ret['metadata']) + self.assertEqual(ibm.Platforms.OS_CODE, ret["platform"]) + self.assertEqual(tmpdir, ret["source"]) + self.assertIsNone(ret["userdata"]) + self.assertEqual( + self._get_expected_metadata(self.oscode_md), ret["metadata"] + ) class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): """Test the _is_ibm_provisioning method.""" + inst_log = "/root/swinstall.log" prov_cfg = "/root/provisioningConfiguration.cfg" boot_ref = "/proc/1/environ" @@ -279,9 +353,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): def test_config_with_old_log(self): """A config with a log from previous boot is not provisioning.""" rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", -30), - self.boot_ref: ("PWD=/", 0)} + data = { + self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0), + } test_helpers.populate_dir_with_ts(rootd, data) self.assertFalse(self._call_with_root(rootd=rootd)) self.assertIn("from previous boot", self.logs.getvalue()) @@ -289,9 +365,11 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): def test_config_with_new_log(self): """A config with a log from this boot is provisioning.""" rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", 30), - self.boot_ref: ("PWD=/", 0)} + data = { + self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0), + } test_helpers.populate_dir_with_ts(rootd, data) self.assertTrue(self._call_with_root(rootd=rootd)) self.assertIn("from current boot", self.logs.getvalue()) @@ -300,44 +378,49 @@ class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): """If the config and log existed, but no reference, assume not.""" rootd = self.tmp_dir() test_helpers.populate_dir( - rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}) + rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"} + ) self.assertFalse(self._call_with_root(rootd=rootd)) self.assertIn("no reference file", self.logs.getvalue()) class TestDataSourceIBMCloud(test_helpers.CiTestCase): - def setUp(self): super(TestDataSourceIBMCloud, self).setUp() self.tmp = self.tmp_dir() - self.cloud_dir = self.tmp_path('cloud', dir=self.tmp) + self.cloud_dir = self.tmp_path("cloud", dir=self.tmp) util.ensure_dir(self.cloud_dir) - paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir}) - self.ds = ibm.DataSourceIBMCloud( - sys_cfg={}, distro=None, paths=paths) + paths = Paths({"run_dir": self.tmp, "cloud_dir": self.cloud_dir}) + self.ds = ibm.DataSourceIBMCloud(sys_cfg={}, distro=None, paths=paths) def test_get_data_false(self): """When read_md returns None, get_data returns False.""" - with mock.patch(D_PATH + 'read_md', return_value=None): + with mock.patch(D_PATH + "read_md", return_value=None): self.assertFalse(self.ds.get_data()) def test_get_data_processes_read_md(self): """get_data processes and caches content returned by read_md.""" md = { - 'metadata': {}, 'networkdata': 'net', 'platform': 'plat', - 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud', - 'vendordata': 'vd'} - with mock.patch(D_PATH + 'read_md', return_value=md): + "metadata": {}, + "networkdata": "net", + "platform": "plat", + "source": "src", + "system-uuid": "uuid", + "userdata": "ud", + "vendordata": "vd", + } + with mock.patch(D_PATH + "read_md", return_value=md): self.assertTrue(self.ds.get_data()) - self.assertEqual('src', self.ds.source) - self.assertEqual('plat', self.ds.platform) + self.assertEqual("src", self.ds.source) + self.assertEqual("plat", self.ds.platform) self.assertEqual({}, self.ds.metadata) - self.assertEqual('ud', self.ds.userdata_raw) - self.assertEqual('net', self.ds.network_json) - self.assertEqual('vd', self.ds.vendordata_pure) - self.assertEqual('uuid', self.ds.system_uuid) - self.assertEqual('ibmcloud', self.ds.cloud_name) - self.assertEqual('ibmcloud', self.ds.platform_type) - self.assertEqual('plat (src)', self.ds.subplatform) + self.assertEqual("ud", self.ds.userdata_raw) + self.assertEqual("net", self.ds.network_json) + self.assertEqual("vd", self.ds.vendordata_pure) + self.assertEqual("uuid", self.ds.system_uuid) + self.assertEqual("ibmcloud", self.ds.cloud_name) + self.assertEqual("ibmcloud", self.ds.platform_type) + self.assertEqual("plat (src)", self.ds.subplatform) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py index a1d19518..745a7fa6 100644 --- a/tests/unittests/sources/test_init.py +++ b/tests/unittests/sources/test_init.py @@ -5,46 +5,60 @@ import inspect import os import stat +from cloudinit import importer, util from cloudinit.event import EventScope, EventType from cloudinit.helpers import Paths -from cloudinit import importer from cloudinit.sources import ( - EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, - METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, - canonical_cloud_id, redact_sensitive_keys) -from tests.unittests.helpers import CiTestCase, mock + EXPERIMENTAL_TEXT, + INSTANCE_JSON_FILE, + INSTANCE_JSON_SENSITIVE_FILE, + METADATA_UNKNOWN, + REDACT_SENSITIVE_VALUE, + UNSET, + DataSource, + canonical_cloud_id, + redact_sensitive_keys, +) from cloudinit.user_data import UserDataProcessor -from cloudinit import util +from tests.unittests.helpers import CiTestCase, mock class DataSourceTestSubclassNet(DataSource): - dsname = 'MyTestSubclass' + dsname = "MyTestSubclass" url_max_wait = 55 - def __init__(self, sys_cfg, distro, paths, custom_metadata=None, - custom_userdata=None, get_data_retval=True): - super(DataSourceTestSubclassNet, self).__init__( - sys_cfg, distro, paths) + def __init__( + self, + sys_cfg, + distro, + paths, + custom_metadata=None, + custom_userdata=None, + get_data_retval=True, + ): + super(DataSourceTestSubclassNet, self).__init__(sys_cfg, distro, paths) self._custom_userdata = custom_userdata self._custom_metadata = custom_metadata self._get_data_retval = get_data_retval def _get_cloud_name(self): - return 'SubclassCloudName' + return "SubclassCloudName" def _get_data(self): if self._custom_metadata: self.metadata = self._custom_metadata else: - self.metadata = {'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion'} + self.metadata = { + "availability_zone": "myaz", + "local-hostname": "test-subclass-hostname", + "region": "myregion", + } if self._custom_userdata: self.userdata_raw = self._custom_userdata else: - self.userdata_raw = 'userdata_raw' - self.vendordata_raw = 'vendordata_raw' + self.userdata_raw = "userdata_raw" + self.vendordata_raw = "vendordata_raw" return self._get_data_retval @@ -59,8 +73,8 @@ class TestDataSource(CiTestCase): def setUp(self): super(TestDataSource, self).setUp() - self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} - self.distro = 'distrotest' # generally should be a Distro object + self.sys_cfg = {"datasource": {"_undef": {"key1": False}}} + self.distro = "distrotest" # generally should be a Distro object self.paths = Paths({}) self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) @@ -74,22 +88,23 @@ class TestDataSource(CiTestCase): self.assertIsNone(self.datasource.userdata_raw) self.assertIsNone(self.datasource.vendordata) self.assertIsNone(self.datasource.vendordata_raw) - self.assertEqual({'key1': False}, self.datasource.ds_cfg) + self.assertEqual({"key1": False}, self.datasource.ds_cfg) self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) def test_datasource_init_gets_ds_cfg_using_dsname(self): """Init uses DataSource.dsname for sourcing ds_cfg.""" - sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} - distro = 'distrotest' # generally should be a Distro object + sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}} + distro = "distrotest" # generally should be a Distro object datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) - self.assertEqual({'key2': False}, datasource.ds_cfg) + self.assertEqual({"key2": False}, datasource.ds_cfg) def test_str_is_classname(self): """The string representation of the datasource is the classname.""" - self.assertEqual('DataSource', str(self.datasource)) + self.assertEqual("DataSource", str(self.datasource)) self.assertEqual( - 'DataSourceTestSubclassNet', - str(DataSourceTestSubclassNet('', '', self.paths))) + "DataSourceTestSubclassNet", + str(DataSourceTestSubclassNet("", "", self.paths)), + ) def test_datasource_get_url_params_defaults(self): """get_url_params default url config settings for the datasource.""" @@ -97,16 +112,21 @@ class TestDataSource(CiTestCase): self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) self.assertEqual(params.num_retries, self.datasource.url_retries) - self.assertEqual(params.sec_between_retries, - self.datasource.url_sec_between_retries) + self.assertEqual( + params.sec_between_retries, self.datasource.url_sec_between_retries + ) def test_datasource_get_url_params_subclassed(self): """Subclasses can override get_url_params defaults.""" - sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} - distro = 'distrotest' # generally should be a Distro object + sys_cfg = {"datasource": {"MyTestSubclass": {"key2": False}}} + distro = "distrotest" # generally should be a Distro object datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) - expected = (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries, datasource.url_sec_between_retries) + expected = ( + datasource.url_max_wait, + datasource.url_timeout, + datasource.url_retries, + datasource.url_sec_between_retries, + ) url_params = datasource.get_url_params() self.assertNotEqual(self.datasource.get_url_params(), url_params) self.assertEqual(expected, url_params) @@ -114,40 +134,64 @@ class TestDataSource(CiTestCase): def test_datasource_get_url_params_ds_config_override(self): """Datasource configuration options can override url param defaults.""" sys_cfg = { - 'datasource': { - 'MyTestSubclass': { - 'max_wait': '1', 'timeout': '2', - 'retries': '3', 'sec_between_retries': 4 - }}} + "datasource": { + "MyTestSubclass": { + "max_wait": "1", + "timeout": "2", + "retries": "3", + "sec_between_retries": 4, + } + } + } datasource = DataSourceTestSubclassNet( - sys_cfg, self.distro, self.paths) + sys_cfg, self.distro, self.paths + ) expected = (1, 2, 3, 4) url_params = datasource.get_url_params() self.assertNotEqual( - (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries, datasource.url_sec_between_retries), - url_params) + ( + datasource.url_max_wait, + datasource.url_timeout, + datasource.url_retries, + datasource.url_sec_between_retries, + ), + url_params, + ) self.assertEqual(expected, url_params) def test_datasource_get_url_params_is_zero_or_greater(self): """get_url_params ignores timeouts with a value below 0.""" # Set an override that is below 0 which gets ignored. - sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} + sys_cfg = {"datasource": {"_undef": {"timeout": "-1"}}} datasource = DataSource(sys_cfg, self.distro, self.paths) - (_max_wait, timeout, _retries, - _sec_between_retries) = datasource.get_url_params() + ( + _max_wait, + timeout, + _retries, + _sec_between_retries, + ) = datasource.get_url_params() self.assertEqual(0, timeout) def test_datasource_get_url_uses_defaults_on_errors(self): """On invalid system config values for url_params defaults are used.""" # All invalid values should be logged - sys_cfg = {'datasource': { - '_undef': { - 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} + sys_cfg = { + "datasource": { + "_undef": { + "max_wait": "nope", + "timeout": "bug", + "retries": "nonint", + } + } + } datasource = DataSource(sys_cfg, self.distro, self.paths) url_params = datasource.get_url_params() - expected = (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries, datasource.url_sec_between_retries) + expected = ( + datasource.url_max_wait, + datasource.url_timeout, + datasource.url_retries, + datasource.url_sec_between_retries, + ) self.assertEqual(expected, url_params) logs = self.logs.getvalue() expected_logs = [ @@ -158,27 +202,28 @@ class TestDataSource(CiTestCase): for log in expected_logs: self.assertIn(log, logs) - @mock.patch('cloudinit.sources.net.find_fallback_nic') + @mock.patch("cloudinit.sources.net.find_fallback_nic") def test_fallback_interface_is_discovered(self, m_get_fallback_nic): """The fallback_interface is discovered via find_fallback_nic.""" - m_get_fallback_nic.return_value = 'nic9' - self.assertEqual('nic9', self.datasource.fallback_interface) + m_get_fallback_nic.return_value = "nic9" + self.assertEqual("nic9", self.datasource.fallback_interface) - @mock.patch('cloudinit.sources.net.find_fallback_nic') + @mock.patch("cloudinit.sources.net.find_fallback_nic") def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): """Log a warning when fallback_interface can not discover the nic.""" - self.datasource._cloud_name = 'MySupahCloud' + self.datasource._cloud_name = "MySupahCloud" m_get_fallback_nic.return_value = None # Couldn't discover nic self.assertIsNone(self.datasource.fallback_interface) self.assertEqual( - 'WARNING: Did not find a fallback interface on MySupahCloud.\n', - self.logs.getvalue()) + "WARNING: Did not find a fallback interface on MySupahCloud.\n", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.sources.net.find_fallback_nic') + @mock.patch("cloudinit.sources.net.find_fallback_nic") def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): """The fallback_interface is cached and won't be rediscovered.""" - self.datasource._fallback_interface = 'nic10' - self.assertEqual('nic10', self.datasource.fallback_interface) + self.datasource._fallback_interface = "nic10" + self.assertEqual("nic10", self.datasource.fallback_interface) m_get_fallback_nic.assert_not_called() def test__get_data_unimplemented(self): @@ -186,80 +231,95 @@ class TestDataSource(CiTestCase): with self.assertRaises(NotImplementedError) as context_manager: self.datasource.get_data() self.assertIn( - 'Subclasses of DataSource must implement _get_data', - str(context_manager.exception)) + "Subclasses of DataSource must implement _get_data", + str(context_manager.exception), + ) datasource2 = InvalidDataSourceTestSubclassNet( - self.sys_cfg, self.distro, self.paths) + self.sys_cfg, self.distro, self.paths + ) with self.assertRaises(NotImplementedError) as context_manager: datasource2.get_data() self.assertIn( - 'Subclasses of DataSource must implement _get_data', - str(context_manager.exception)) + "Subclasses of DataSource must implement _get_data", + str(context_manager.exception), + ) def test_get_data_calls_subclass__get_data(self): """Datasource.get_data uses the subclass' version of _get_data.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) self.assertTrue(datasource.get_data()) self.assertEqual( - {'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion'}, - datasource.metadata) - self.assertEqual('userdata_raw', datasource.userdata_raw) - self.assertEqual('vendordata_raw', datasource.vendordata_raw) + { + "availability_zone": "myaz", + "local-hostname": "test-subclass-hostname", + "region": "myregion", + }, + datasource.metadata, + ) + self.assertEqual("userdata_raw", datasource.userdata_raw) + self.assertEqual("vendordata_raw", datasource.vendordata_raw) def test_get_hostname_strips_local_hostname_without_domain(self): """Datasource.get_hostname strips metadata local-hostname of domain.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) self.assertTrue(datasource.get_data()) self.assertEqual( - 'test-subclass-hostname', datasource.metadata['local-hostname']) - self.assertEqual('test-subclass-hostname', datasource.get_hostname()) - datasource.metadata['local-hostname'] = 'hostname.my.domain.com' - self.assertEqual('hostname', datasource.get_hostname()) + "test-subclass-hostname", datasource.metadata["local-hostname"] + ) + self.assertEqual("test-subclass-hostname", datasource.get_hostname()) + datasource.metadata["local-hostname"] = "hostname.my.domain.com" + self.assertEqual("hostname", datasource.get_hostname()) def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): """Datasource.get_hostname with fqdn set gets qualified hostname.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) self.assertTrue(datasource.get_data()) - datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + datasource.metadata["local-hostname"] = "hostname.my.domain.com" self.assertEqual( - 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) + "hostname.my.domain.com", datasource.get_hostname(fqdn=True) + ) def test_get_hostname_without_metadata_uses_system_hostname(self): """Datasource.gethostname runs util.get_hostname when no metadata.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) self.assertEqual({}, datasource.metadata) - mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' - with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts" + with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost: with mock.patch(mock_fqdn) as m_fqdn: - m_gethost.return_value = 'systemhostname.domain.com' + m_gethost.return_value = "systemhostname.domain.com" m_fqdn.return_value = None # No maching fqdn in /etc/hosts - self.assertEqual('systemhostname', datasource.get_hostname()) + self.assertEqual("systemhostname", datasource.get_hostname()) self.assertEqual( - 'systemhostname.domain.com', - datasource.get_hostname(fqdn=True)) + "systemhostname.domain.com", + datasource.get_hostname(fqdn=True), + ) def test_get_hostname_without_metadata_returns_none(self): """Datasource.gethostname returns None when metadata_only and no MD.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) self.assertEqual({}, datasource.metadata) - mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' - with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts" + with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost: with mock.patch(mock_fqdn) as m_fqdn: self.assertIsNone(datasource.get_hostname(metadata_only=True)) self.assertIsNone( - datasource.get_hostname(fqdn=True, metadata_only=True)) + datasource.get_hostname(fqdn=True, metadata_only=True) + ) self.assertEqual([], m_gethost.call_args_list) self.assertEqual([], m_fqdn.call_args_list) @@ -267,78 +327,99 @@ class TestDataSource(CiTestCase): """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) self.assertEqual({}, datasource.metadata) - mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' - with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts" + with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost: with mock.patch(mock_fqdn) as m_fqdn: - m_gethost.return_value = 'systemhostname.domain.com' - m_fqdn.return_value = 'fqdnhostname.domain.com' - self.assertEqual('fqdnhostname', datasource.get_hostname()) - self.assertEqual('fqdnhostname.domain.com', - datasource.get_hostname(fqdn=True)) + m_gethost.return_value = "systemhostname.domain.com" + m_fqdn.return_value = "fqdnhostname.domain.com" + self.assertEqual("fqdnhostname", datasource.get_hostname()) + self.assertEqual( + "fqdnhostname.domain.com", + datasource.get_hostname(fqdn=True), + ) def test_get_data_does_not_write_instance_data_on_failure(self): """get_data does not write INSTANCE_JSON_FILE on get_data False.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - get_data_retval=False) + self.sys_cfg, + self.distro, + Paths({"run_dir": tmp}), + get_data_retval=False, + ) self.assertFalse(datasource.get_data()) json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) self.assertFalse( - os.path.exists(json_file), 'Found unexpected file %s' % json_file) + os.path.exists(json_file), "Found unexpected file %s" % json_file + ) def test_get_data_writes_json_instance_data_on_success(self): """get_data writes INSTANCE_JSON_FILE to run_dir as world readable.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) sys_info = { "python": "3.7", - "platform": - "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", - "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", - "x86_64"], - "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + "platform": ( + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal" + ), + "uname": [ + "Linux", + "myhost", + "5.4.0-24-generic", + "SMP blah", + "x86_64", + ], + "variant": "ubuntu", + "dist": ["ubuntu", "20.04", "focal"], + } with mock.patch("cloudinit.util.system_info", return_value=sys_info): datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) expected = { - 'base64_encoded_keys': [], - 'merged_cfg': REDACT_SENSITIVE_VALUE, - 'sensitive_keys': ['merged_cfg'], - 'sys_info': sys_info, - 'v1': { - '_beta_keys': ['subplatform'], - 'availability-zone': 'myaz', - 'availability_zone': 'myaz', - 'cloud-name': 'subclasscloudname', - 'cloud_name': 'subclasscloudname', - 'distro': 'ubuntu', - 'distro_release': 'focal', - 'distro_version': '20.04', - 'instance-id': 'iid-datasource', - 'instance_id': 'iid-datasource', - 'local-hostname': 'test-subclass-hostname', - 'local_hostname': 'test-subclass-hostname', - 'kernel_release': '5.4.0-24-generic', - 'machine': 'x86_64', - 'platform': 'mytestsubclass', - 'public_ssh_keys': [], - 'python_version': '3.7', - 'region': 'myregion', - 'system_platform': - 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', - 'subplatform': 'unknown', - 'variant': 'ubuntu'}, - 'ds': { - - '_doc': EXPERIMENTAL_TEXT, - 'meta_data': {'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion'}}} + "base64_encoded_keys": [], + "merged_cfg": REDACT_SENSITIVE_VALUE, + "sensitive_keys": ["merged_cfg"], + "sys_info": sys_info, + "v1": { + "_beta_keys": ["subplatform"], + "availability-zone": "myaz", + "availability_zone": "myaz", + "cloud-name": "subclasscloudname", + "cloud_name": "subclasscloudname", + "distro": "ubuntu", + "distro_release": "focal", + "distro_version": "20.04", + "instance-id": "iid-datasource", + "instance_id": "iid-datasource", + "local-hostname": "test-subclass-hostname", + "local_hostname": "test-subclass-hostname", + "kernel_release": "5.4.0-24-generic", + "machine": "x86_64", + "platform": "mytestsubclass", + "public_ssh_keys": [], + "python_version": "3.7", + "region": "myregion", + "system_platform": ( + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal" + ), + "subplatform": "unknown", + "variant": "ubuntu", + }, + "ds": { + "_doc": EXPERIMENTAL_TEXT, + "meta_data": { + "availability_zone": "myaz", + "local-hostname": "test-subclass-hostname", + "region": "myregion", + }, + }, + } self.assertEqual(expected, util.load_json(content)) file_stat = os.stat(json_file) self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) @@ -348,63 +429,89 @@ class TestDataSource(CiTestCase): """get_data writes redacted content to public INSTANCE_JSON_FILE.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + self.sys_cfg, + self.distro, + Paths({"run_dir": tmp}), custom_metadata={ - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': {'security-credentials': { - 'cred1': 'sekret', 'cred2': 'othersekret'}}}) + "availability_zone": "myaz", + "local-hostname": "test-subclass-hostname", + "region": "myregion", + "some": { + "security-credentials": { + "cred1": "sekret", + "cred2": "othersekret", + } + }, + }, + ) self.assertCountEqual( - ('merged_cfg', 'security-credentials',), - datasource.sensitive_metadata_keys) + ( + "merged_cfg", + "security-credentials", + ), + datasource.sensitive_metadata_keys, + ) sys_info = { "python": "3.7", - "platform": - "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", - "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", - "x86_64"], - "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + "platform": ( + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal" + ), + "uname": [ + "Linux", + "myhost", + "5.4.0-24-generic", + "SMP blah", + "x86_64", + ], + "variant": "ubuntu", + "dist": ["ubuntu", "20.04", "focal"], + } with mock.patch("cloudinit.util.system_info", return_value=sys_info): datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) redacted = util.load_json(util.load_file(json_file)) expected = { - 'base64_encoded_keys': [], - 'merged_cfg': REDACT_SENSITIVE_VALUE, - 'sensitive_keys': [ - 'ds/meta_data/some/security-credentials', 'merged_cfg'], - 'sys_info': sys_info, - 'v1': { - '_beta_keys': ['subplatform'], - 'availability-zone': 'myaz', - 'availability_zone': 'myaz', - 'cloud-name': 'subclasscloudname', - 'cloud_name': 'subclasscloudname', - 'distro': 'ubuntu', - 'distro_release': 'focal', - 'distro_version': '20.04', - 'instance-id': 'iid-datasource', - 'instance_id': 'iid-datasource', - 'local-hostname': 'test-subclass-hostname', - 'local_hostname': 'test-subclass-hostname', - 'kernel_release': '5.4.0-24-generic', - 'machine': 'x86_64', - 'platform': 'mytestsubclass', - 'public_ssh_keys': [], - 'python_version': '3.7', - 'region': 'myregion', - 'system_platform': - 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', - 'subplatform': 'unknown', - 'variant': 'ubuntu'}, - 'ds': { - '_doc': EXPERIMENTAL_TEXT, - 'meta_data': { - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} + "base64_encoded_keys": [], + "merged_cfg": REDACT_SENSITIVE_VALUE, + "sensitive_keys": [ + "ds/meta_data/some/security-credentials", + "merged_cfg", + ], + "sys_info": sys_info, + "v1": { + "_beta_keys": ["subplatform"], + "availability-zone": "myaz", + "availability_zone": "myaz", + "cloud-name": "subclasscloudname", + "cloud_name": "subclasscloudname", + "distro": "ubuntu", + "distro_release": "focal", + "distro_version": "20.04", + "instance-id": "iid-datasource", + "instance_id": "iid-datasource", + "local-hostname": "test-subclass-hostname", + "local_hostname": "test-subclass-hostname", + "kernel_release": "5.4.0-24-generic", + "machine": "x86_64", + "platform": "mytestsubclass", + "public_ssh_keys": [], + "python_version": "3.7", + "region": "myregion", + "system_platform": ( + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal" + ), + "subplatform": "unknown", + "variant": "ubuntu", + }, + "ds": { + "_doc": EXPERIMENTAL_TEXT, + "meta_data": { + "availability_zone": "myaz", + "local-hostname": "test-subclass-hostname", + "region": "myregion", + "some": {"security-credentials": REDACT_SENSITIVE_VALUE}, + }, + }, } self.assertCountEqual(expected, redacted) file_stat = os.stat(json_file) @@ -416,71 +523,101 @@ class TestDataSource(CiTestCase): """ tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + self.sys_cfg, + self.distro, + Paths({"run_dir": tmp}), custom_metadata={ - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': {'security-credentials': { - 'cred1': 'sekret', 'cred2': 'othersekret'}}}) + "availability_zone": "myaz", + "local-hostname": "test-subclass-hostname", + "region": "myregion", + "some": { + "security-credentials": { + "cred1": "sekret", + "cred2": "othersekret", + } + }, + }, + ) sys_info = { "python": "3.7", - "platform": - "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", - "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", - "x86_64"], - "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + "platform": ( + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal" + ), + "uname": [ + "Linux", + "myhost", + "5.4.0-24-generic", + "SMP blah", + "x86_64", + ], + "variant": "ubuntu", + "dist": ["ubuntu", "20.04", "focal"], + } self.assertCountEqual( - ('merged_cfg', 'security-credentials',), - datasource.sensitive_metadata_keys) + ( + "merged_cfg", + "security-credentials", + ), + datasource.sensitive_metadata_keys, + ) with mock.patch("cloudinit.util.system_info", return_value=sys_info): datasource.get_data() sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) content = util.load_file(sensitive_json_file) expected = { - 'base64_encoded_keys': [], - 'merged_cfg': { - '_doc': ( - 'Merged cloud-init system config from ' - '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/' + "base64_encoded_keys": [], + "merged_cfg": { + "_doc": ( + "Merged cloud-init system config from " + "/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/" + ), + "datasource": {"_undef": {"key1": False}}, + }, + "sensitive_keys": [ + "ds/meta_data/some/security-credentials", + "merged_cfg", + ], + "sys_info": sys_info, + "v1": { + "_beta_keys": ["subplatform"], + "availability-zone": "myaz", + "availability_zone": "myaz", + "cloud-name": "subclasscloudname", + "cloud_name": "subclasscloudname", + "distro": "ubuntu", + "distro_release": "focal", + "distro_version": "20.04", + "instance-id": "iid-datasource", + "instance_id": "iid-datasource", + "kernel_release": "5.4.0-24-generic", + "local-hostname": "test-subclass-hostname", + "local_hostname": "test-subclass-hostname", + "machine": "x86_64", + "platform": "mytestsubclass", + "public_ssh_keys": [], + "python_version": "3.7", + "region": "myregion", + "subplatform": "unknown", + "system_platform": ( + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal" ), - 'datasource': {'_undef': {'key1': False}}}, - 'sensitive_keys': [ - 'ds/meta_data/some/security-credentials', 'merged_cfg'], - 'sys_info': sys_info, - 'v1': { - '_beta_keys': ['subplatform'], - 'availability-zone': 'myaz', - 'availability_zone': 'myaz', - 'cloud-name': 'subclasscloudname', - 'cloud_name': 'subclasscloudname', - 'distro': 'ubuntu', - 'distro_release': 'focal', - 'distro_version': '20.04', - 'instance-id': 'iid-datasource', - 'instance_id': 'iid-datasource', - 'kernel_release': '5.4.0-24-generic', - 'local-hostname': 'test-subclass-hostname', - 'local_hostname': 'test-subclass-hostname', - 'machine': 'x86_64', - 'platform': 'mytestsubclass', - 'public_ssh_keys': [], - 'python_version': '3.7', - 'region': 'myregion', - 'subplatform': 'unknown', - 'system_platform': - 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', - 'variant': 'ubuntu'}, - 'ds': { - '_doc': EXPERIMENTAL_TEXT, - 'meta_data': { - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': { - 'security-credentials': - {'cred1': 'sekret', 'cred2': 'othersekret'}}}} + "variant": "ubuntu", + }, + "ds": { + "_doc": EXPERIMENTAL_TEXT, + "meta_data": { + "availability_zone": "myaz", + "local-hostname": "test-subclass-hostname", + "region": "myregion", + "some": { + "security-credentials": { + "cred1": "sekret", + "cred2": "othersekret", + } + }, + }, + }, } self.assertCountEqual(expected, util.load_json(content)) file_stat = os.stat(sensitive_json_file) @@ -491,69 +628,81 @@ class TestDataSource(CiTestCase): """get_data warns unserializable content in INSTANCE_JSON_FILE.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) + self.sys_cfg, + self.distro, + Paths({"run_dir": tmp}), + custom_metadata={"key1": "val1", "key2": {"key2.1": self.paths}}, + ) datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) expected_metadata = { - 'key1': 'val1', - 'key2': { - 'key2.1': "Warning: redacted unserializable type <class" - " 'cloudinit.helpers.Paths'>"}} + "key1": "val1", + "key2": { + "key2.1": ( + "Warning: redacted unserializable type <class" + " 'cloudinit.helpers.Paths'>" + ) + }, + } instance_json = util.load_json(content) - self.assertEqual( - expected_metadata, instance_json['ds']['meta_data']) + self.assertEqual(expected_metadata, instance_json["ds"]["meta_data"]) def test_persist_instance_data_writes_ec2_metadata_when_set(self): """When ec2_metadata class attribute is set, persist to json.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) datasource.ec2_metadata = UNSET datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) instance_data = util.load_json(util.load_file(json_file)) - self.assertNotIn('ec2_metadata', instance_data['ds']) - datasource.ec2_metadata = {'ec2stuff': 'is good'} + self.assertNotIn("ec2_metadata", instance_data["ds"]) + datasource.ec2_metadata = {"ec2stuff": "is good"} datasource.persist_instance_data() instance_data = util.load_json(util.load_file(json_file)) self.assertEqual( - {'ec2stuff': 'is good'}, - instance_data['ds']['ec2_metadata']) + {"ec2stuff": "is good"}, instance_data["ds"]["ec2_metadata"] + ) def test_persist_instance_data_writes_network_json_when_set(self): """When network_data.json class attribute is set, persist to json.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.sys_cfg, self.distro, Paths({"run_dir": tmp}) + ) datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) instance_data = util.load_json(util.load_file(json_file)) - self.assertNotIn('network_json', instance_data['ds']) - datasource.network_json = {'network_json': 'is good'} + self.assertNotIn("network_json", instance_data["ds"]) + datasource.network_json = {"network_json": "is good"} datasource.persist_instance_data() instance_data = util.load_json(util.load_file(json_file)) self.assertEqual( - {'network_json': 'is good'}, - instance_data['ds']['network_json']) + {"network_json": "is good"}, instance_data["ds"]["network_json"] + ) def test_get_data_base64encodes_unserializable_bytes(self): """On py3, get_data base64encodes any unserializable content.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.sys_cfg, + self.distro, + Paths({"run_dir": tmp}), + custom_metadata={"key1": "val1", "key2": {"key2.1": b"\x123"}}, + ) self.assertTrue(datasource.get_data()) json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) instance_json = util.load_json(content) self.assertCountEqual( - ['ds/meta_data/key2/key2.1'], - instance_json['base64_encoded_keys']) + ["ds/meta_data/key2/key2.1"], instance_json["base64_encoded_keys"] + ) self.assertEqual( - {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, - instance_json['ds']['meta_data']) + {"key1": "val1", "key2": {"key2.1": "EjM="}}, + instance_json["ds"]["meta_data"], + ) def test_get_hostname_subclass_support(self): """Validate get_hostname signature on all subclasses of DataSource.""" @@ -561,23 +710,24 @@ class TestDataSource(CiTestCase): # Import all DataSource subclasses so we can inspect them. modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) for _loc, name in modules.items(): - mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) + mod_locs, _ = importer.find_module(name, ["cloudinit.sources"], []) if mod_locs: importer.import_module(mod_locs[0]) for child in DataSource.__subclasses__(): - if 'Test' in child.dsname: + if "Test" in child.dsname: continue self.assertEqual( base_args, inspect.getfullargspec(child.get_hostname), - '%s does not implement DataSource.get_hostname params' - % child) + "%s does not implement DataSource.get_hostname params" % child, + ) for grandchild in child.__subclasses__(): self.assertEqual( base_args, inspect.getfullargspec(grandchild.get_hostname), - '%s does not implement DataSource.get_hostname params' - % grandchild) + "%s does not implement DataSource.get_hostname params" + % grandchild, + ) def test_clear_cached_attrs_resets_cached_attr_class_attributes(self): """Class attributes listed in cached_attr_defaults are reset.""" @@ -598,7 +748,7 @@ class TestDataSource(CiTestCase): for attr, _ in self.datasource.cached_attr_defaults: setattr(self.datasource, attr, count) count += 1 - self.datasource._dirty_cache = False # Fake clean cache + self.datasource._dirty_cache = False # Fake clean cache self.datasource.clear_cached_attrs() count = 0 for attr, _ in self.datasource.cached_attr_defaults: @@ -609,163 +759,194 @@ class TestDataSource(CiTestCase): """Skip any cached_attr_defaults which aren't class attributes.""" self.datasource._dirty_cache = True self.datasource.clear_cached_attrs() - for attr in ('ec2_metadata', 'network_json'): + for attr in ("ec2_metadata", "network_json"): self.assertFalse(hasattr(self.datasource, attr)) def test_clear_cached_attrs_of_custom_attrs(self): """Custom attr_values can be passed to clear_cached_attrs.""" self.datasource._dirty_cache = True cached_attr_name = self.datasource.cached_attr_defaults[0][0] - setattr(self.datasource, cached_attr_name, 'himom') - self.datasource.myattr = 'orig' + setattr(self.datasource, cached_attr_name, "himom") + self.datasource.myattr = "orig" self.datasource.clear_cached_attrs( - attr_defaults=(('myattr', 'updated'),)) - self.assertEqual('himom', getattr(self.datasource, cached_attr_name)) - self.assertEqual('updated', self.datasource.myattr) - - @mock.patch.dict(DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - @mock.patch.dict(DataSource.supported_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + attr_defaults=(("myattr", "updated"),) + ) + self.assertEqual("himom", getattr(self.datasource, cached_attr_name)) + self.assertEqual("updated", self.datasource.myattr) + + @mock.patch.dict( + DataSource.default_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}, + ) + @mock.patch.dict( + DataSource.supported_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}, + ) def test_update_metadata_only_acts_on_supported_update_events(self): """update_metadata_if_supported wont get_data on unsupported events.""" self.assertEqual( {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])}, - self.datasource.default_update_events + self.datasource.default_update_events, ) def fake_get_data(): - raise Exception('get_data should not be called') + raise Exception("get_data should not be called") self.datasource.get_data = fake_get_data self.assertFalse( self.datasource.update_metadata_if_supported( - source_event_types=[EventType.BOOT])) + source_event_types=[EventType.BOOT] + ) + ) - @mock.patch.dict(DataSource.supported_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + @mock.patch.dict( + DataSource.supported_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}, + ) def test_update_metadata_returns_true_on_supported_update_event(self): """update_metadata_if_supported returns get_data on supported events""" + def fake_get_data(): return True self.datasource.get_data = fake_get_data - self.datasource._network_config = 'something' + self.datasource._network_config = "something" self.datasource._dirty_cache = True self.assertTrue( self.datasource.update_metadata_if_supported( source_event_types=[ - EventType.BOOT, EventType.BOOT_NEW_INSTANCE])) + EventType.BOOT, + EventType.BOOT_NEW_INSTANCE, + ] + ) + ) self.assertEqual(UNSET, self.datasource._network_config) self.assertIn( "DEBUG: Update datasource metadata and network config due to" " events: boot-new-instance", - self.logs.getvalue() + self.logs.getvalue(), ) class TestRedactSensitiveData(CiTestCase): - def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self): """When sensitive_keys is absent or empty from metadata do nothing.""" - md = {'my': 'data'} + md = {"my": "data"} self.assertEqual( - md, redact_sensitive_keys(md, redact_value='redacted')) - md['sensitive_keys'] = [] + md, redact_sensitive_keys(md, redact_value="redacted") + ) + md["sensitive_keys"] = [] self.assertEqual( - md, redact_sensitive_keys(md, redact_value='redacted')) + md, redact_sensitive_keys(md, redact_value="redacted") + ) def test_redact_sensitive_data_redacts_exact_match_name(self): """Only exact matched sensitive_keys are redacted from metadata.""" - md = {'sensitive_keys': ['md/secure'], - 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} + md = { + "sensitive_keys": ["md/secure"], + "md": {"secure": "s3kr1t", "insecure": "publik"}, + } secure_md = copy.deepcopy(md) - secure_md['md']['secure'] = 'redacted' + secure_md["md"]["secure"] = "redacted" self.assertEqual( - secure_md, - redact_sensitive_keys(md, redact_value='redacted')) + secure_md, redact_sensitive_keys(md, redact_value="redacted") + ) def test_redact_sensitive_data_does_redacts_with_default_string(self): """When redact_value is absent, REDACT_SENSITIVE_VALUE is used.""" - md = {'sensitive_keys': ['md/secure'], - 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} + md = { + "sensitive_keys": ["md/secure"], + "md": {"secure": "s3kr1t", "insecure": "publik"}, + } secure_md = copy.deepcopy(md) - secure_md['md']['secure'] = 'redacted for non-root user' - self.assertEqual( - secure_md, - redact_sensitive_keys(md)) + secure_md["md"]["secure"] = "redacted for non-root user" + self.assertEqual(secure_md, redact_sensitive_keys(md)) class TestCanonicalCloudID(CiTestCase): - def test_cloud_id_returns_platform_on_unknowns(self): """When region and cloud_name are unknown, return platform.""" self.assertEqual( - 'platform', - canonical_cloud_id(cloud_name=METADATA_UNKNOWN, - region=METADATA_UNKNOWN, - platform='platform')) + "platform", + canonical_cloud_id( + cloud_name=METADATA_UNKNOWN, + region=METADATA_UNKNOWN, + platform="platform", + ), + ) def test_cloud_id_returns_platform_on_none(self): """When region and cloud_name are unknown, return platform.""" self.assertEqual( - 'platform', - canonical_cloud_id(cloud_name=None, - region=None, - platform='platform')) + "platform", + canonical_cloud_id( + cloud_name=None, region=None, platform="platform" + ), + ) def test_cloud_id_returns_cloud_name_on_unknown_region(self): """When region is unknown, return cloud_name.""" for region in (None, METADATA_UNKNOWN): self.assertEqual( - 'cloudname', - canonical_cloud_id(cloud_name='cloudname', - region=region, - platform='platform')) + "cloudname", + canonical_cloud_id( + cloud_name="cloudname", region=region, platform="platform" + ), + ) def test_cloud_id_returns_platform_on_unknown_cloud_name(self): """When region is set but cloud_name is unknown return cloud_name.""" self.assertEqual( - 'platform', - canonical_cloud_id(cloud_name=METADATA_UNKNOWN, - region='region', - platform='platform')) + "platform", + canonical_cloud_id( + cloud_name=METADATA_UNKNOWN, + region="region", + platform="platform", + ), + ) def test_cloud_id_aws_based_on_region_and_cloud_name(self): """When cloud_name is aws, return proper cloud-id based on region.""" self.assertEqual( - 'aws-china', - canonical_cloud_id(cloud_name='aws', - region='cn-north-1', - platform='platform')) + "aws-china", + canonical_cloud_id( + cloud_name="aws", region="cn-north-1", platform="platform" + ), + ) self.assertEqual( - 'aws', - canonical_cloud_id(cloud_name='aws', - region='us-east-1', - platform='platform')) + "aws", + canonical_cloud_id( + cloud_name="aws", region="us-east-1", platform="platform" + ), + ) self.assertEqual( - 'aws-gov', - canonical_cloud_id(cloud_name='aws', - region='us-gov-1', - platform='platform')) + "aws-gov", + canonical_cloud_id( + cloud_name="aws", region="us-gov-1", platform="platform" + ), + ) self.assertEqual( # Overrideen non-aws cloud_name is returned - '!aws', - canonical_cloud_id(cloud_name='!aws', - region='us-gov-1', - platform='platform')) + "!aws", + canonical_cloud_id( + cloud_name="!aws", region="us-gov-1", platform="platform" + ), + ) def test_cloud_id_azure_based_on_region_and_cloud_name(self): """Report cloud-id when cloud_name is azure and region is in china.""" self.assertEqual( - 'azure-china', - canonical_cloud_id(cloud_name='azure', - region='chinaeast', - platform='platform')) + "azure-china", + canonical_cloud_id( + cloud_name="azure", region="chinaeast", platform="platform" + ), + ) self.assertEqual( - 'azure', - canonical_cloud_id(cloud_name='azure', - region='!chinaeast', - platform='platform')) + "azure", + canonical_cloud_id( + cloud_name="azure", region="!chinaeast", platform="platform" + ), + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py index a6e51f3b..ad1508a0 100644 --- a/tests/unittests/sources/test_lxd.py +++ b/tests/unittests/sources/test_lxd.py @@ -1,18 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple -from copy import deepcopy import json import re import stat +from collections import namedtuple +from copy import deepcopy from unittest import mock -import yaml import pytest +import yaml + +from cloudinit.sources import UNSET +from cloudinit.sources import DataSourceLXD as lxd +from cloudinit.sources import InvalidMetaDataException -from cloudinit.sources import ( - DataSourceLXD as lxd, InvalidMetaDataException, UNSET -) DS_PATH = "cloudinit.sources.DataSourceLXD." @@ -23,10 +24,11 @@ NETWORK_V1 = { "version": 1, "config": [ { - "type": "physical", "name": "eth0", - "subnets": [{"type": "dhcp", "control": "auto"}] + "type": "physical", + "name": "eth0", + "subnets": [{"type": "dhcp", "control": "auto"}], } - ] + ], } @@ -43,12 +45,10 @@ LXD_V1_METADATA = { "user-data": "#cloud-config\npackages: [sl]\n", "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", "config": { - "user.user-data": - "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", - "user.vendor-data": - "#cloud-config\nruncmd: ['echo vendor-data']\n", + "user.user-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "user.vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", "user.network-config": yaml.safe_dump(NETWORK_V1), - } + }, } @@ -76,9 +76,9 @@ def lxd_ds(request, paths, lxd_metadata): class TestGenerateFallbackNetworkConfig: - @pytest.mark.parametrize( - "uname_machine,systemd_detect_virt,expected", ( + "uname_machine,systemd_detect_virt,expected", + ( # None for systemd_detect_virt returns None from which ({}, None, NETWORK_V1), ({}, None, NETWORK_V1), @@ -86,8 +86,8 @@ class TestGenerateFallbackNetworkConfig: # `uname -m` on kvm determines devname ("x86_64", "kvm\n", _add_network_v1_device("enp5s0")), ("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")), - ("s390x", "kvm\n", _add_network_v1_device("enc9")) - ) + ("s390x", "kvm\n", _add_network_v1_device("enc9")), + ), ) @mock.patch(DS_PATH + "util.system_info") @mock.patch(DS_PATH + "subp.subp") @@ -145,11 +145,12 @@ class TestDataSourceLXD: class TestIsPlatformViable: @pytest.mark.parametrize( - "exists,lstat_mode,expected", ( + "exists,lstat_mode,expected", + ( (False, None, False), (True, stat.S_IFREG, False), (True, stat.S_IFSOCK, True), - ) + ), ) @mock.patch(DS_PATH + "os.lstat") @mock.patch(DS_PATH + "os.path.exists") @@ -169,7 +170,8 @@ class TestIsPlatformViable: class TestReadMetadata: @pytest.mark.parametrize( - "url_responses,expected,logs", ( + "url_responses,expected,logs", + ( ( # Assert non-JSON format from config route { "http://lxd/1.0/meta-data": "local-hostname: md\n", @@ -178,31 +180,38 @@ class TestReadMetadata: InvalidMetaDataException( "Unable to determine cloud-init config from" " http://lxd/1.0/config. Expected JSON but found:" - " [NOT_JSON"), - ["[GET] [HTTP:200] http://lxd/1.0/meta-data", - "[GET] [HTTP:200] http://lxd/1.0/config"], + " [NOT_JSON" + ), + [ + "[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config", + ], ), - ( # Assert success on just meta-data + ( # Assert success on just meta-data { "http://lxd/1.0/meta-data": "local-hostname: md\n", "http://lxd/1.0/config": "[]", }, { "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, - "config": {}, "meta-data": "local-hostname: md\n" + "config": {}, + "meta-data": "local-hostname: md\n", }, - ["[GET] [HTTP:200] http://lxd/1.0/meta-data", - "[GET] [HTTP:200] http://lxd/1.0/config"], + [ + "[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config", + ], ), - ( # Assert 404s for config routes log skipping + ( # Assert 404s for config routes log skipping { "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": + "http://lxd/1.0/config": ( '["/1.0/config/user.custom1",' ' "/1.0/config/user.meta-data",' ' "/1.0/config/user.network-config",' ' "/1.0/config/user.user-data",' - ' "/1.0/config/user.vendor-data"]', + ' "/1.0/config/user.vendor-data"]' + ), "http://lxd/1.0/config/user.custom1": "custom1", "http://lxd/1.0/config/user.meta-data": "", # 404 "http://lxd/1.0/config/user.network-config": "net-config", @@ -212,7 +221,7 @@ class TestReadMetadata: { "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, "config": { - "user.custom1": "custom1", # Not promoted + "user.custom1": "custom1", # Not promoted "user.network-config": "net-config", }, "meta-data": "local-hostname: md\n", @@ -231,15 +240,16 @@ class TestReadMetadata: " http://lxd/1.0/config/user.network-config", ], ), - ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys + ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys { "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": + "http://lxd/1.0/config": ( '["/1.0/config/user.custom1",' ' "/1.0/config/user.meta-data",' ' "/1.0/config/user.network-config",' ' "/1.0/config/user.user-data",' - ' "/1.0/config/user.vendor-data"]', + ' "/1.0/config/user.vendor-data"]' + ), "http://lxd/1.0/config/user.custom1": "custom1", "http://lxd/1.0/config/user.meta-data": "meta-data", "http://lxd/1.0/config/user.network-config": "net-config", @@ -249,7 +259,7 @@ class TestReadMetadata: { "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, "config": { - "user.custom1": "custom1", # Not promoted + "user.custom1": "custom1", # Not promoted "user.meta-data": "meta-data", "user.network-config": "net-config", "user.user-data": "user-data", @@ -271,31 +281,38 @@ class TestReadMetadata: "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data", ], ), - ( # Assert cloud-init.* config key values prefered over user.* + ( # Assert cloud-init.* config key values prefered over user.* { "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": + "http://lxd/1.0/config": ( '["/1.0/config/user.meta-data",' ' "/1.0/config/user.network-config",' ' "/1.0/config/user.user-data",' ' "/1.0/config/user.vendor-data",' ' "/1.0/config/cloud-init.network-config",' ' "/1.0/config/cloud-init.user-data",' - ' "/1.0/config/cloud-init.vendor-data"]', + ' "/1.0/config/cloud-init.vendor-data"]' + ), "http://lxd/1.0/config/user.meta-data": "user.meta-data", - "http://lxd/1.0/config/user.network-config": - "user.network-config", + "http://lxd/1.0/config/user.network-config": ( + "user.network-config" + ), "http://lxd/1.0/config/user.user-data": "user.user-data", - "http://lxd/1.0/config/user.vendor-data": - "user.vendor-data", - "http://lxd/1.0/config/cloud-init.meta-data": - "cloud-init.meta-data", - "http://lxd/1.0/config/cloud-init.network-config": - "cloud-init.network-config", - "http://lxd/1.0/config/cloud-init.user-data": - "cloud-init.user-data", - "http://lxd/1.0/config/cloud-init.vendor-data": - "cloud-init.vendor-data", + "http://lxd/1.0/config/user.vendor-data": ( + "user.vendor-data" + ), + "http://lxd/1.0/config/cloud-init.meta-data": ( + "cloud-init.meta-data" + ), + "http://lxd/1.0/config/cloud-init.network-config": ( + "cloud-init.network-config" + ), + "http://lxd/1.0/config/cloud-init.user-data": ( + "cloud-init.user-data" + ), + "http://lxd/1.0/config/cloud-init.vendor-data": ( + "cloud-init.vendor-data" + ), }, { "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, @@ -304,11 +321,11 @@ class TestReadMetadata: "user.network-config": "user.network-config", "user.user-data": "user.user-data", "user.vendor-data": "user.vendor-data", - "cloud-init.network-config": - "cloud-init.network-config", + "cloud-init.network-config": ( + "cloud-init.network-config" + ), "cloud-init.user-data": "cloud-init.user-data", - "cloud-init.vendor-data": - "cloud-init.vendor-data", + "cloud-init.vendor-data": "cloud-init.vendor-data", }, "meta-data": "local-hostname: md\n", "network-config": "cloud-init.network-config", @@ -337,9 +354,9 @@ class TestReadMetadata: " cloud-init.vendor-data value.", ], ), - ) + ), ) - @mock.patch.object(lxd.requests.Session, 'get') + @mock.patch.object(lxd.requests.Session, "get") def test_read_metadata_handles_unexpected_content_or_http_status( self, session_get, url_responses, expected, logs, caplog ): @@ -348,7 +365,7 @@ class TestReadMetadata: def fake_get(url): """Mock Response json, ok, status_code, text from url_responses.""" m_resp = mock.MagicMock() - content = url_responses.get(url, '') + content = url_responses.get(url, "") m_resp.json.side_effect = lambda: json.loads(content) if content: mock_ok = mock.PropertyMock(return_value=True) @@ -373,4 +390,5 @@ class TestReadMetadata: for log in logs: assert log in caplogs + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_maas.py b/tests/unittests/sources/test_maas.py index 34b79587..e95ba374 100644 --- a/tests/unittests/sources/test_maas.py +++ b/tests/unittests/sources/test_maas.py @@ -1,19 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. -from copy import copy import os import shutil import tempfile -import yaml +from copy import copy from unittest import mock -from cloudinit.sources import DataSourceMAAS +import yaml + from cloudinit import url_helper +from cloudinit.sources import DataSourceMAAS from tests.unittests.helpers import CiTestCase, populate_dir class TestMAASDataSource(CiTestCase): - def setUp(self): super(TestMAASDataSource, self).setUp() # Make a temp directoy for tests to use. @@ -23,11 +23,13 @@ class TestMAASDataSource(CiTestCase): def test_seed_dir_valid(self): """Verify a valid seeddir is read as such.""" - userdata = b'valid01-userdata' - data = {'meta-data/instance-id': 'i-valid01', - 'meta-data/local-hostname': 'valid01-hostname', - 'user-data': userdata, - 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} + userdata = b"valid01-userdata" + data = { + "meta-data/instance-id": "i-valid01", + "meta-data/local-hostname": "valid01-hostname", + "user-data": userdata, + "public-keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname", + } my_d = os.path.join(self.tmp, "valid") populate_dir(my_d, data) @@ -35,20 +37,23 @@ class TestMAASDataSource(CiTestCase): ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, ud) - for key in ('instance-id', 'local-hostname'): + for key in ("instance-id", "local-hostname"): self.assertEqual(data["meta-data/" + key], md[key]) # verify that 'userdata' is not returned as part of the metadata - self.assertFalse(('user-data' in md)) + self.assertFalse(("user-data" in md)) self.assertIsNone(vd) def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" - userdata = b'valid-extra-userdata' - data = {'meta-data/instance-id': 'i-valid-extra', - 'meta-data/local-hostname': 'valid-extra-hostname', - 'user-data': userdata, 'foo': 'bar'} + userdata = b"valid-extra-userdata" + data = { + "meta-data/instance-id": "i-valid-extra", + "meta-data/local-hostname": "valid-extra-hostname", + "user-data": userdata, + "foo": "bar", + } my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) @@ -56,62 +61,77 @@ class TestMAASDataSource(CiTestCase): ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, ud) - for key in ('instance-id', 'local-hostname'): - self.assertEqual(data['meta-data/' + key], md[key]) + for key in ("instance-id", "local-hostname"): + self.assertEqual(data["meta-data/" + key], md[key]) # additional files should not just appear as keys in metadata atm - self.assertFalse(('foo' in md)) + self.assertFalse(("foo" in md)) def test_seed_dir_invalid(self): """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" - valid = {'instance-id': 'i-instanceid', - 'local-hostname': 'test-hostname', 'user-data': ''} + valid = { + "instance-id": "i-instanceid", + "local-hostname": "test-hostname", + "user-data": "", + } my_based = os.path.join(self.tmp, "valid_extra") # missing 'userdata' file my_d = "%s-01" % my_based invalid_data = copy(valid) - del invalid_data['local-hostname'] + del invalid_data["local-hostname"] populate_dir(my_d, invalid_data) - self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, - DataSourceMAAS.read_maas_seed_dir, my_d) + self.assertRaises( + DataSourceMAAS.MAASSeedDirMalformed, + DataSourceMAAS.read_maas_seed_dir, + my_d, + ) # missing 'instance-id' my_d = "%s-02" % my_based invalid_data = copy(valid) - del invalid_data['instance-id'] + del invalid_data["instance-id"] populate_dir(my_d, invalid_data) - self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, - DataSourceMAAS.read_maas_seed_dir, my_d) + self.assertRaises( + DataSourceMAAS.MAASSeedDirMalformed, + DataSourceMAAS.read_maas_seed_dir, + my_d, + ) def test_seed_dir_none(self): """Verify that empty seed_dir raises MAASSeedDirNone.""" my_d = os.path.join(self.tmp, "valid_empty") - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, - DataSourceMAAS.read_maas_seed_dir, my_d) + self.assertRaises( + DataSourceMAAS.MAASSeedDirNone, + DataSourceMAAS.read_maas_seed_dir, + my_d, + ) def test_seed_dir_missing(self): """Verify that missing seed_dir raises MAASSeedDirNone.""" - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, - DataSourceMAAS.read_maas_seed_dir, - os.path.join(self.tmp, "nonexistantdirectory")) + self.assertRaises( + DataSourceMAAS.MAASSeedDirNone, + DataSourceMAAS.read_maas_seed_dir, + os.path.join(self.tmp, "nonexistantdirectory"), + ) def mock_read_maas_seed_url(self, data, seed, version="19991231"): """mock up readurl to appear as a web server at seed has provided data. return what read_maas_seed_url returns.""" + def my_readurl(*args, **kwargs): if len(args): url = args[0] else: - url = kwargs['url'] + url = kwargs["url"] prefix = "%s/%s/" % (seed, version) if not url.startswith(prefix): raise ValueError("unexpected call %s" % url) - short = url[len(prefix):] + short = url[len(prefix) :] if short not in data: raise url_helper.UrlError("not found", code=404, url=url) return url_helper.StringResponse(data[short]) @@ -124,44 +144,48 @@ class TestMAASDataSource(CiTestCase): def test_seed_url_valid(self): """Verify that valid seed_url is read as such.""" valid = { - 'meta-data/instance-id': 'i-instanceid', - 'meta-data/local-hostname': 'test-hostname', - 'meta-data/public-keys': 'test-hostname', - 'meta-data/vendor-data': b'my-vendordata', - 'user-data': b'foodata', + "meta-data/instance-id": "i-instanceid", + "meta-data/local-hostname": "test-hostname", + "meta-data/public-keys": "test-hostname", + "meta-data/vendor-data": b"my-vendordata", + "user-data": b"foodata", } my_seed = "http://example.com/xmeta" my_ver = "1999-99-99" ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver) - self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual(valid["meta-data/instance-id"], md["instance-id"]) self.assertEqual( - valid['meta-data/local-hostname'], md['local-hostname']) - self.assertEqual(valid['meta-data/public-keys'], md['public-keys']) - self.assertEqual(valid['user-data'], ud) + valid["meta-data/local-hostname"], md["local-hostname"] + ) + self.assertEqual(valid["meta-data/public-keys"], md["public-keys"]) + self.assertEqual(valid["user-data"], ud) # vendor-data is yaml, which decodes a string - self.assertEqual(valid['meta-data/vendor-data'].decode(), vd) + self.assertEqual(valid["meta-data/vendor-data"].decode(), vd) def test_seed_url_vendor_data_dict(self): - expected_vd = {'key1': 'value1'} + expected_vd = {"key1": "value1"} valid = { - 'meta-data/instance-id': 'i-instanceid', - 'meta-data/local-hostname': 'test-hostname', - 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), + "meta-data/instance-id": "i-instanceid", + "meta-data/local-hostname": "test-hostname", + "meta-data/vendor-data": yaml.safe_dump(expected_vd).encode(), } _ud, md, vd = self.mock_read_maas_seed_url( - valid, "http://example.com/foo") + valid, "http://example.com/foo" + ) - self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual(valid["meta-data/instance-id"], md["instance-id"]) self.assertEqual(expected_vd, vd) @mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") class TestGetOauthHelper(CiTestCase): - base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', - 'token_key': 'FAKE_TOKEN_KEY', - 'token_secret': 'FAKE_TOKEN_SECRET', - 'consumer_secret': None} + base_cfg = { + "consumer_key": "FAKE_CONSUMER_KEY", + "token_key": "FAKE_TOKEN_KEY", + "token_secret": "FAKE_TOKEN_SECRET", + "consumer_secret": None, + } def test_all_required(self, m_helper): """Valid config as expected.""" @@ -171,17 +195,20 @@ class TestGetOauthHelper(CiTestCase): def test_other_fields_not_passed_through(self, m_helper): """Only relevant fields are passed through.""" mycfg = self.base_cfg.copy() - mycfg['unrelated_field'] = 'unrelated' + mycfg["unrelated_field"] = "unrelated" DataSourceMAAS.get_oauth_helper(mycfg) m_helper.assert_has_calls([mock.call(**self.base_cfg)]) class TestGetIdHash(CiTestCase): - v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', - 'token_secret': 'TSEC'} + v1_cfg = { + "consumer_key": "CKEY", + "token_key": "TKEY", + "token_secret": "TSEC", + } v1_id = ( - 'v1:' - '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') + "v1:403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392" + ) def test_v1_expected(self): """Test v1 id generated as expected working behavior from config.""" @@ -191,8 +218,8 @@ class TestGetIdHash(CiTestCase): def test_v1_extra_fields_are_ignored(self): """Test v1 id ignores unused entries in config.""" cfg = self.v1_cfg.copy() - cfg['consumer_secret'] = "BOO" - cfg['unrelated'] = "HI MOM" + cfg["consumer_secret"] = "BOO" + cfg["unrelated"] = "HI MOM" result = DataSourceMAAS.get_id_from_ds_cfg(cfg) self.assertEqual(self.v1_id, result) diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py index 26f91054..1f6b722d 100644 --- a/tests/unittests/sources/test_nocloud.py +++ b/tests/unittests/sources/test_nocloud.py @@ -1,27 +1,27 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import dmi -from cloudinit import helpers -from cloudinit.sources.DataSourceNoCloud import ( - DataSourceNoCloud as dsNoCloud, - _maybe_remove_top_network, - parse_cmdline_data) -from cloudinit import util -from tests.unittests.helpers import CiTestCase, populate_dir, mock, ExitStack - import os import textwrap + import yaml +from cloudinit import dmi, helpers, util +from cloudinit.sources.DataSourceNoCloud import DataSourceNoCloud as dsNoCloud +from cloudinit.sources.DataSourceNoCloud import ( + _maybe_remove_top_network, + parse_cmdline_data, +) +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir + -@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd') +@mock.patch("cloudinit.sources.DataSourceNoCloud.util.is_lxd") class TestNoCloudDataSource(CiTestCase): - def setUp(self): super(TestNoCloudDataSource, self).setUp() self.tmp = self.tmp_dir() self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + {"cloud_dir": self.tmp, "run_dir": self.tmp} + ) self.cmdline = "root=TESTCMDLINE" @@ -29,77 +29,77 @@ class TestNoCloudDataSource(CiTestCase): self.addCleanup(self.mocks.close) self.mocks.enter_context( - mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) + mock.patch.object(util, "get_cmdline", return_value=self.cmdline) + ) self.mocks.enter_context( - mock.patch.object(dmi, 'read_dmi_data', return_value=None)) + mock.patch.object(dmi, "read_dmi_data", return_value=None) + ) def _test_fs_config_is_read(self, fs_label, fs_label_to_search): - vfat_device = 'device-1' + vfat_device = "device-1" def m_mount_cb(device, callback, mtype): - if (device == vfat_device): - return {'meta-data': yaml.dump({'instance-id': 'IID'})} + if device == vfat_device: + return {"meta-data": yaml.dump({"instance-id": "IID"})} else: return {} - def m_find_devs_with(query='', path=''): - if 'TYPE=vfat' == query: + def m_find_devs_with(query="", path=""): + if "TYPE=vfat" == query: return [vfat_device] - elif 'LABEL={}'.format(fs_label) == query: + elif "LABEL={}".format(fs_label) == query: return [vfat_device] else: return [] self.mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - side_effect=m_find_devs_with)) + mock.patch.object( + util, "find_devs_with", side_effect=m_find_devs_with + ) + ) self.mocks.enter_context( - mock.patch.object(util, 'mount_cb', - side_effect=m_mount_cb)) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + mock.patch.object(util, "mount_cb", side_effect=m_mount_cb) + ) + sys_cfg = {"datasource": {"NoCloud": {"fs_label": fs_label_to_search}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() - self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertEqual(dsrc.metadata.get("instance-id"), "IID") self.assertTrue(ret) def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): - md = {'instance-id': 'IID', 'dsmode': 'local'} + md = {"instance-id": "IID", "dsmode": "local"} ud = b"USER_DATA_HERE" seed_dir = os.path.join(self.paths.seed_dir, "nocloud") - populate_dir(seed_dir, - {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) + populate_dir( + seed_dir, {"user-data": ud, "meta-data": yaml.safe_dump(md)} + ) - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, ud) self.assertEqual(dsrc.metadata, md) - self.assertEqual(dsrc.platform_type, 'lxd') - self.assertEqual( - dsrc.subplatform, 'seed-dir (%s)' % seed_dir) + self.assertEqual(dsrc.platform_type, "lxd") + self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir) self.assertTrue(ret) def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): """Non-lxd environments will list nocloud as the platform.""" m_is_lxd.return_value = False - md = {'instance-id': 'IID', 'dsmode': 'local'} + md = {"instance-id": "IID", "dsmode": "local"} seed_dir = os.path.join(self.paths.seed_dir, "nocloud") - populate_dir(seed_dir, - {'user-data': '', 'meta-data': yaml.safe_dump(md)}) + populate_dir( + seed_dir, {"user-data": "", "meta-data": yaml.safe_dump(md)} + ) - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.platform_type, 'nocloud') - self.assertEqual( - dsrc.subplatform, 'seed-dir (%s)' % seed_dir) + self.assertEqual(dsrc.platform_type, "nocloud") + self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir) def test_fs_label(self, m_is_lxd): # find_devs_with should not be called ff fs_label is None @@ -107,65 +107,70 @@ class TestNoCloudDataSource(CiTestCase): pass self.mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - side_effect=PsuedoException)) + mock.patch.object( + util, "find_devs_with", side_effect=PsuedoException + ) + ) # by default, NoCloud should search for filesystems by label - sys_cfg = {'datasource': {'NoCloud': {}}} + sys_cfg = {"datasource": {"NoCloud": {}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertRaises(PsuedoException, dsrc.get_data) # but disabling searching should just end up with None found - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertFalse(ret) def test_fs_config_lowercase_label(self, m_is_lxd): - self._test_fs_config_is_read('cidata', 'cidata') + self._test_fs_config_is_read("cidata", "cidata") def test_fs_config_uppercase_label(self, m_is_lxd): - self._test_fs_config_is_read('CIDATA', 'cidata') + self._test_fs_config_is_read("CIDATA", "cidata") def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): - self._test_fs_config_is_read('cidata', 'CIDATA') + self._test_fs_config_is_read("cidata", "CIDATA") def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): - self._test_fs_config_is_read('CIDATA', 'CIDATA') + self._test_fs_config_is_read("CIDATA", "CIDATA") def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertFalse(dsrc.get_data()) def test_seed_in_config(self, m_is_lxd): data = { - 'fs_label': None, - 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), - 'user-data': b"USER_DATA_RAW", + "fs_label": None, + "meta-data": yaml.safe_dump({"instance-id": "IID"}), + "user-data": b"USER_DATA_RAW", } - sys_cfg = {'datasource': {'NoCloud': data}} + sys_cfg = {"datasource": {"NoCloud": data}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") - self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertEqual(dsrc.metadata.get("instance-id"), "IID") self.assertTrue(ret) def test_nocloud_seed_with_vendordata(self, m_is_lxd): - md = {'instance-id': 'IID', 'dsmode': 'local'} + md = {"instance-id": "IID", "dsmode": "local"} ud = b"USER_DATA_HERE" vd = b"THIS IS MY VENDOR_DATA" - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': ud, 'meta-data': yaml.safe_dump(md), - 'vendor-data': vd}) + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + { + "user-data": ud, + "meta-data": yaml.safe_dump(md), + "vendor-data": vd, + }, + ) - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() @@ -175,10 +180,12 @@ class TestNoCloudDataSource(CiTestCase): self.assertTrue(ret) def test_nocloud_no_vendordata(self, m_is_lxd): - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {"user-data": b"ud", "meta-data": "instance-id: IID\n"}, + ) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() @@ -189,23 +196,28 @@ class TestNoCloudDataSource(CiTestCase): def test_metadata_network_interfaces(self, m_is_lxd): gateway = "103.225.10.1" md = { - 'instance-id': 'i-abcd', - 'local-hostname': 'hostname1', - 'network-interfaces': textwrap.dedent("""\ + "instance-id": "i-abcd", + "local-hostname": "hostname1", + "network-interfaces": textwrap.dedent( + """\ auto eth0 iface eth0 inet static hwaddr 00:16:3e:70:e1:04 address 103.225.10.12 netmask 255.255.255.0 - gateway """ + gateway + """ - dns-servers 8.8.8.8""")} + gateway """ + + gateway + + """ + dns-servers 8.8.8.8""" + ), + } populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': yaml.dump(md) + "\n"}) + {"user-data": b"ud", "meta-data": yaml.dump(md) + "\n"}, + ) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() @@ -215,16 +227,26 @@ class TestNoCloudDataSource(CiTestCase): def test_metadata_network_config(self, m_is_lxd): # network-config needs to get into network_config - netconf = {'version': 1, - 'config': [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}]} + netconf = { + "version": 1, + "config": [ + { + "type": "physical", + "name": "interface0", + "subnets": [{"type": "dhcp"}], + } + ], + } populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': "instance-id: IID\n", - 'network-config': yaml.dump(netconf) + "\n"}) + { + "user-data": b"ud", + "meta-data": "instance-id: IID\n", + "network-config": yaml.dump(netconf) + "\n", + }, + ) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() @@ -233,14 +255,17 @@ class TestNoCloudDataSource(CiTestCase): def test_metadata_network_config_with_toplevel_network(self, m_is_lxd): """network-config may have 'network' top level key.""" - netconf = {'config': 'disabled'} + netconf = {"config": "disabled"} populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': "instance-id: IID\n", - 'network-config': yaml.dump({'network': netconf}) + "\n"}) + { + "user-data": b"ud", + "meta-data": "instance-id: IID\n", + "network-config": yaml.dump({"network": netconf}) + "\n", + }, + ) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() @@ -251,27 +276,42 @@ class TestNoCloudDataSource(CiTestCase): # network-config should override meta-data/network-interfaces gateway = "103.225.10.1" md = { - 'instance-id': 'i-abcd', - 'local-hostname': 'hostname1', - 'network-interfaces': textwrap.dedent("""\ + "instance-id": "i-abcd", + "local-hostname": "hostname1", + "network-interfaces": textwrap.dedent( + """\ auto eth0 iface eth0 inet static hwaddr 00:16:3e:70:e1:04 address 103.225.10.12 netmask 255.255.255.0 - gateway """ + gateway + """ - dns-servers 8.8.8.8""")} + gateway """ + + gateway + + """ + dns-servers 8.8.8.8""" + ), + } - netconf = {'version': 1, - 'config': [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}]} + netconf = { + "version": 1, + "config": [ + { + "type": "physical", + "name": "interface0", + "subnets": [{"type": "dhcp"}], + } + ], + } populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': yaml.dump(md) + "\n", - 'network-config': yaml.dump(netconf) + "\n"}) + { + "user-data": b"ud", + "meta-data": yaml.dump(md) + "\n", + "network-config": yaml.dump(netconf) + "\n", + }, + ) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() @@ -281,17 +321,24 @@ class TestNoCloudDataSource(CiTestCase): @mock.patch("cloudinit.util.blkid") def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {"user-data": b"ud", "meta-data": "instance-id: IID\n"}, + ) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} self.mocks.enter_context( - mock.patch.object(util, 'is_FreeBSD', return_value=True)) + mock.patch.object(util, "is_FreeBSD", return_value=True) + ) def _mfind_devs_with_freebsd( - criteria=None, oformat='device', - tag=None, no_cache=False, path=None): + criteria=None, + oformat="device", + tag=None, + no_cache=False, + path=None, + ): if not criteria: return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] if criteria.startswith("LABEL="): @@ -304,17 +351,19 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object( - util, 'find_devs_with_freebsd', - side_effect=_mfind_devs_with_freebsd)) + util, + "find_devs_with_freebsd", + side_effect=_mfind_devs_with_freebsd, + ) + ) dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc._get_devices('foo') - self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + ret = dsrc._get_devices("foo") + self.assertEqual(["/dev/msdosfs/foo", "/dev/iso9660/foo"], ret) fake_blkid.assert_not_called() class TestParseCommandLineData(CiTestCase): - def test_parse_cmdline_data_valid(self): ds_id = "ds=nocloud" pairs = ( @@ -322,18 +371,21 @@ class TestParseCommandLineData(CiTestCase): ("%(ds_id)s; root=/dev/foo", {}), ("%(ds_id)s", {}), ("%(ds_id)s;", {}), - ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}), - ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost", - {'seedfrom': 'SEED', 'local-hostname': 'xhost'}), - ("%(ds_id)s;h=xhost", - {'local-hostname': 'xhost'}), - ("%(ds_id)s;h=xhost;i=IID", - {'local-hostname': 'xhost', 'instance-id': 'IID'}), + ("%(ds_id)s;s=SEED", {"seedfrom": "SEED"}), + ( + "%(ds_id)s;seedfrom=SEED;local-hostname=xhost", + {"seedfrom": "SEED", "local-hostname": "xhost"}, + ), + ("%(ds_id)s;h=xhost", {"local-hostname": "xhost"}), + ( + "%(ds_id)s;h=xhost;i=IID", + {"local-hostname": "xhost", "instance-id": "IID"}, + ), ) for (fmt, expected) in pairs: fill = {} - cmdline = fmt % {'ds_id': ds_id} + cmdline = fmt % {"ds_id": ds_id} ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) self.assertEqual(expected, fill) self.assertTrue(ret) @@ -358,36 +410,44 @@ class TestParseCommandLineData(CiTestCase): class TestMaybeRemoveToplevelNetwork(CiTestCase): """test _maybe_remove_top_network function.""" - basecfg = [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}] + + basecfg = [ + { + "type": "physical", + "name": "interface0", + "subnets": [{"type": "dhcp"}], + } + ] def test_should_remove_safely(self): - mcfg = {'config': self.basecfg, 'version': 1} - self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + mcfg = {"config": self.basecfg, "version": 1} + self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg})) def test_no_remove_if_other_keys(self): """should not shift if other keys at top level.""" - mcfg = {'network': {'config': self.basecfg, 'version': 1}, - 'unknown_keyname': 'keyval'} + mcfg = { + "network": {"config": self.basecfg, "version": 1}, + "unknown_keyname": "keyval", + } self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) def test_no_remove_if_non_dict(self): """should not shift if not a dict.""" - mcfg = {'network': '"content here'} + mcfg = {"network": '"content here'} self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) def test_no_remove_if_missing_config_or_version(self): """should not shift unless network entry has config and version.""" - mcfg = {'network': {'config': self.basecfg}} + mcfg = {"network": {"config": self.basecfg}} self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - mcfg = {'network': {'version': 1}} + mcfg = {"network": {"version": 1}} self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) def test_remove_with_config_disabled(self): """network/config=disabled should be shifted.""" - mcfg = {'config': 'disabled'} - self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + mcfg = {"config": "disabled"} + self.assertEqual(mcfg, _maybe_remove_top_network({"network": mcfg})) # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py index e5963f5a..e05c4749 100644 --- a/tests/unittests/sources/test_opennebula.py +++ b/tests/unittests/sources/test_opennebula.py @@ -1,62 +1,61 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import helpers -from cloudinit.sources import DataSourceOpenNebula as ds -from cloudinit import util -from tests.unittests.helpers import mock, populate_dir, CiTestCase - import os import pwd import unittest import pytest +from cloudinit import helpers, util +from cloudinit.sources import DataSourceOpenNebula as ds +from tests.unittests.helpers import CiTestCase, mock, populate_dir TEST_VARS = { - 'VAR1': 'single', - 'VAR2': 'double word', - 'VAR3': 'multi\nline\n', - 'VAR4': "'single'", - 'VAR5': "'double word'", - 'VAR6': "'multi\nline\n'", - 'VAR7': 'single\\t', - 'VAR8': 'double\\tword', - 'VAR9': 'multi\\t\nline\n', - 'VAR10': '\\', # expect '\' - 'VAR11': '\'', # expect ' - 'VAR12': '$', # expect $ + "VAR1": "single", + "VAR2": "double word", + "VAR3": "multi\nline\n", + "VAR4": "'single'", + "VAR5": "'double word'", + "VAR6": "'multi\nline\n'", + "VAR7": "single\\t", + "VAR8": "double\\tword", + "VAR9": "multi\\t\nline\n", + "VAR10": "\\", # expect '\' + "VAR11": "'", # expect ' + "VAR12": "$", # expect $ } -INVALID_CONTEXT = ';' -USER_DATA = '#cloud-config\napt_upgrade: true' -SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' -HOSTNAME = 'foo.example.com' -PUBLIC_IP = '10.0.0.3' -MACADDR = '02:00:0a:12:01:01' -IP_BY_MACADDR = '10.18.1.1' -IP4_PREFIX = '24' -IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba' -IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba' -IP6_GW = '2001:db8:1::ffff' -IP6_PREFIX = '48' +INVALID_CONTEXT = ";" +USER_DATA = "#cloud-config\napt_upgrade: true" +SSH_KEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i" +HOSTNAME = "foo.example.com" +PUBLIC_IP = "10.0.0.3" +MACADDR = "02:00:0a:12:01:01" +IP_BY_MACADDR = "10.18.1.1" +IP4_PREFIX = "24" +IP6_GLOBAL = "2001:db8:1:0:400:c0ff:fea8:1ba" +IP6_ULA = "fd01:dead:beaf:0:400:c0ff:fea8:1ba" +IP6_GW = "2001:db8:1::ffff" +IP6_PREFIX = "48" DS_PATH = "cloudinit.sources.DataSourceOpenNebula" class TestOpenNebulaDataSource(CiTestCase): parsed_user = None - allowed_subp = ['bash'] + allowed_subp = ["bash"] def setUp(self): super(TestOpenNebulaDataSource, self).setUp() self.tmp = self.tmp_dir() self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + {"cloud_dir": self.tmp, "run_dir": self.tmp} + ) # defaults for few tests self.ds = ds.DataSourceOpenNebula self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula") - self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}} + self.sys_cfg = {"datasource": {"OpenNebula": {"dsmode": "local"}}} # we don't want 'sudo' called in tests. so we patch switch_user_cmd def my_switch_user_cmd(user): @@ -86,7 +85,7 @@ class TestOpenNebulaDataSource(CiTestCase): try: # dont' try to lookup for CDs util.find_devs_with = lambda n: [] - populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) + populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT}) dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) finally: @@ -97,18 +96,19 @@ class TestOpenNebulaDataSource(CiTestCase): try: # generate non-existing system user name sys_cfg = self.sys_cfg - invalid_user = 'invalid' - while not sys_cfg['datasource']['OpenNebula'].get('parseuser'): + invalid_user = "invalid" + while not sys_cfg["datasource"]["OpenNebula"].get("parseuser"): try: pwd.getpwnam(invalid_user) - invalid_user += 'X' + invalid_user += "X" except KeyError: - sys_cfg['datasource']['OpenNebula']['parseuser'] = \ - invalid_user + sys_cfg["datasource"]["OpenNebula"][ + "parseuser" + ] = invalid_user # dont' try to lookup for CDs util.find_devs_with = lambda n: [] - populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) + populate_context_dir(self.seed_dir, {"KEY1": "val1"}) dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) finally: @@ -119,227 +119,265 @@ class TestOpenNebulaDataSource(CiTestCase): try: # dont' try to lookup for CDs util.find_devs_with = lambda n: [] - populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) + populate_context_dir(self.seed_dir, {"KEY1": "val1"}) dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) finally: util.find_devs_with = orig_find_devs_with - self.assertEqual('opennebula', dsrc.cloud_name) - self.assertEqual('opennebula', dsrc.platform_type) + self.assertEqual("opennebula", dsrc.cloud_name) + self.assertEqual("opennebula", dsrc.platform_type) self.assertEqual( - 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform) + "seed-dir (%s/seed/opennebula)" % self.tmp, dsrc.subplatform + ) def test_seed_dir_non_contextdisk(self): - self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir, - self.seed_dir, mock.Mock()) + self.assertRaises( + ds.NonContextDiskDir, + ds.read_context_disk_dir, + self.seed_dir, + mock.Mock(), + ) def test_seed_dir_empty1_context(self): - populate_dir(self.seed_dir, {'context.sh': ''}) + populate_dir(self.seed_dir, {"context.sh": ""}) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertIsNone(results['userdata']) - self.assertEqual(results['metadata'], {}) + self.assertIsNone(results["userdata"]) + self.assertEqual(results["metadata"], {}) def test_seed_dir_empty2_context(self): populate_context_dir(self.seed_dir, {}) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertIsNone(results['userdata']) - self.assertEqual(results['metadata'], {}) + self.assertIsNone(results["userdata"]) + self.assertEqual(results["metadata"], {}) def test_seed_dir_broken_context(self): - populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) + populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT}) - self.assertRaises(ds.BrokenContextDiskDir, - ds.read_context_disk_dir, - self.seed_dir, mock.Mock()) + self.assertRaises( + ds.BrokenContextDiskDir, + ds.read_context_disk_dir, + self.seed_dir, + mock.Mock(), + ) def test_context_parser(self): populate_context_dir(self.seed_dir, TEST_VARS) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('metadata' in results) - self.assertEqual(TEST_VARS, results['metadata']) + self.assertTrue("metadata" in results) + self.assertEqual(TEST_VARS, results["metadata"]) def test_ssh_key(self): - public_keys = ['first key', 'second key'] + public_keys = ["first key", "second key"] for c in range(4): - for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'): + for k in ("SSH_KEY", "SSH_PUBLIC_KEY"): my_d = os.path.join(self.tmp, "%s-%i" % (k, c)) - populate_context_dir(my_d, {k: '\n'.join(public_keys)}) + populate_context_dir(my_d, {k: "\n".join(public_keys)}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue('metadata' in results) - self.assertTrue('public-keys' in results['metadata']) - self.assertEqual(public_keys, - results['metadata']['public-keys']) + self.assertTrue("metadata" in results) + self.assertTrue("public-keys" in results["metadata"]) + self.assertEqual( + public_keys, results["metadata"]["public-keys"] + ) public_keys.append(SSH_KEY % (c + 1,)) def test_user_data_plain(self): - for k in ('USER_DATA', 'USERDATA'): + for k in ("USER_DATA", "USERDATA"): my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: USER_DATA, - 'USERDATA_ENCODING': ''}) + populate_context_dir(my_d, {k: USER_DATA, "USERDATA_ENCODING": ""}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) + self.assertTrue("userdata" in results) + self.assertEqual(USER_DATA, results["userdata"]) def test_user_data_encoding_required_for_decode(self): b64userdata = util.b64e(USER_DATA) - for k in ('USER_DATA', 'USERDATA'): + for k in ("USER_DATA", "USERDATA"): my_d = os.path.join(self.tmp, k) populate_context_dir(my_d, {k: b64userdata}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue('userdata' in results) - self.assertEqual(b64userdata, results['userdata']) + self.assertTrue("userdata" in results) + self.assertEqual(b64userdata, results["userdata"]) def test_user_data_base64_encoding(self): - for k in ('USER_DATA', 'USERDATA'): + for k in ("USER_DATA", "USERDATA"): my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: util.b64e(USER_DATA), - 'USERDATA_ENCODING': 'base64'}) + populate_context_dir( + my_d, {k: util.b64e(USER_DATA), "USERDATA_ENCODING": "base64"} + ) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) + self.assertTrue("userdata" in results) + self.assertEqual(USER_DATA, results["userdata"]) @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_hostname(self, m_get_phys_by_mac): - for dev in ('eth0', 'ens3'): + for dev in ("eth0", "ens3"): m_get_phys_by_mac.return_value = {MACADDR: dev} - for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', - 'ETH0_IP'): + for k in ( + "SET_HOSTNAME", + "HOSTNAME", + "PUBLIC_IP", + "IP_PUBLIC", + "ETH0_IP", + ): my_d = os.path.join(self.tmp, k) populate_context_dir(my_d, {k: PUBLIC_IP}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue('metadata' in results) - self.assertTrue('local-hostname' in results['metadata']) + self.assertTrue("metadata" in results) + self.assertTrue("local-hostname" in results["metadata"]) self.assertEqual( - PUBLIC_IP, results['metadata']['local-hostname']) + PUBLIC_IP, results["metadata"]["local-hostname"] + ) @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_network_interfaces(self, m_get_phys_by_mac): - for dev in ('eth0', 'ens3'): + for dev in ("eth0", "ens3"): m_get_phys_by_mac.return_value = {MACADDR: dev} # without ETH0_MAC # for Older OpenNebula? - populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) + populate_context_dir(self.seed_dir, {"ETH0_IP": IP_BY_MACADDR}) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP_BY_MACADDR + "/" + IP4_PREFIX + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_IP and ETH0_MAC populate_context_dir( - self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) + self.seed_dir, {"ETH0_IP": IP_BY_MACADDR, "ETH0_MAC": MACADDR} + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP_BY_MACADDR + "/" + IP4_PREFIX + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_IP with empty string and ETH0_MAC # in the case of using Virtual Network contains # "AR = [ TYPE = ETHER ]" populate_context_dir( - self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) + self.seed_dir, {"ETH0_IP": "", "ETH0_MAC": MACADDR} + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP_BY_MACADDR + "/" + IP4_PREFIX + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_MASK populate_context_dir( - self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '255.255.0.0' - }) + self.seed_dir, + { + "ETH0_IP": IP_BY_MACADDR, + "ETH0_MAC": MACADDR, + "ETH0_MASK": "255.255.0.0", + }, + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP_BY_MACADDR + '/16' in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP_BY_MACADDR + "/16" + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_MASK with empty string populate_context_dir( - self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '' - }) + self.seed_dir, + { + "ETH0_IP": IP_BY_MACADDR, + "ETH0_MAC": MACADDR, + "ETH0_MASK": "", + }, + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP_BY_MACADDR + "/" + IP4_PREFIX + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_IP6 populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_MAC': MACADDR, - }) + self.seed_dir, + { + "ETH0_IP6": IP6_GLOBAL, + "ETH0_MAC": MACADDR, + }, + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP6_GLOBAL + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP6_GLOBAL + "/64" + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_IP6_ULA populate_context_dir( - self.seed_dir, { - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': MACADDR, - }) + self.seed_dir, + { + "ETH0_IP6_ULA": IP6_ULA, + "ETH0_MAC": MACADDR, + }, + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP6_ULA + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP6_ULA + "/64" + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_MAC': MACADDR, - }) + self.seed_dir, + { + "ETH0_IP6": IP6_GLOBAL, + "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX, + "ETH0_MAC": MACADDR, + }, + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP6_GLOBAL + '/' + IP6_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP6_GLOBAL + "/" + IP6_PREFIX + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_MAC': MACADDR, - }) + self.seed_dir, + { + "ETH0_IP6": IP6_GLOBAL, + "ETH0_IP6_PREFIX_LENGTH": "", + "ETH0_MAC": MACADDR, + }, + ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue('network-interfaces' in results) + self.assertTrue("network-interfaces" in results) self.assertTrue( - IP6_GLOBAL + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) + IP6_GLOBAL + "/64" + in results["network-interfaces"]["ethernets"][dev]["addresses"] + ) def test_find_candidates(self): def my_devs_with(criteria): @@ -352,25 +390,28 @@ class TestOpenNebulaDataSource(CiTestCase): orig_find_devs_with = util.find_devs_with try: util.find_devs_with = my_devs_with - self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"], - ds.find_candidate_devs()) + self.assertEqual( + ["/dev/sdb", "/dev/sr0", "/dev/vdb"], ds.find_candidate_devs() + ) finally: util.find_devs_with = orig_find_devs_with -@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={})) +@mock.patch(DS_PATH + ".net.get_interfaces_by_mac", mock.Mock(return_value={})) class TestOpenNebulaNetwork(unittest.TestCase): - system_nics = ('eth0', 'ens3') + system_nics = ("eth0", "ens3") def test_context_devname(self): """Verify context_devname correctly returns mac and name.""" context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH1_MAC': '02:00:0a:12:0f:0f', } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH1_MAC": "02:00:0a:12:0f:0f", + } expected = { - '02:00:0a:12:01:01': 'ETH0', - '02:00:0a:12:0f:0f': 'ETH1', } + "02:00:0a:12:01:01": "ETH0", + "02:00:0a:12:0f:0f": "ETH1", + } net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(expected, net.context_devname) @@ -380,28 +421,30 @@ class TestOpenNebulaNetwork(unittest.TestCase): and search domains. """ context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + "DNS": "1.2.3.8", + "ETH0_DNS": "1.2.3.6 1.2.3.7", + "ETH0_SEARCH_DOMAIN": "example.com example.org", + } expected = { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']} + "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"], + "search": ["example.com", "example.org"], + } net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_nameservers('eth0') + val = net.get_nameservers("eth0") self.assertEqual(expected, val) def test_get_mtu(self): """Verify get_mtu('device') correctly returns MTU size.""" - context = {'ETH0_MTU': '1280'} + context = {"ETH0_MTU": "1280"} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mtu('eth0') - self.assertEqual('1280', val) + val = net.get_mtu("eth0") + self.assertEqual("1280", val) def test_get_ip(self): """Verify get_ip('device') correctly returns IPv4 address.""" - context = {'ETH0_IP': PUBLIC_IP} + context = {"ETH0_IP": PUBLIC_IP} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip('eth0', MACADDR) + val = net.get_ip("eth0", MACADDR) self.assertEqual(PUBLIC_IP, val) def test_get_ip_emptystring(self): @@ -410,9 +453,9 @@ class TestOpenNebulaNetwork(unittest.TestCase): It returns IP address created by MAC address if ETH0_IP has empty string. """ - context = {'ETH0_IP': ''} + context = {"ETH0_IP": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip('eth0', MACADDR) + val = net.get_ip("eth0", MACADDR) self.assertEqual(IP_BY_MACADDR, val) def test_get_ip6(self): @@ -421,11 +464,12 @@ class TestOpenNebulaNetwork(unittest.TestCase): In this case, IPv6 address is Given by ETH0_IP6. """ context = { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_ULA': '', } + "ETH0_IP6": IP6_GLOBAL, + "ETH0_IP6_ULA": "", + } expected = [IP6_GLOBAL] net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') + val = net.get_ip6("eth0") self.assertEqual(expected, val) def test_get_ip6_ula(self): @@ -434,11 +478,12 @@ class TestOpenNebulaNetwork(unittest.TestCase): In this case, IPv6 address is Given by ETH0_IP6_ULA. """ context = { - 'ETH0_IP6': '', - 'ETH0_IP6_ULA': IP6_ULA, } + "ETH0_IP6": "", + "ETH0_IP6_ULA": IP6_ULA, + } expected = [IP6_ULA] net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') + val = net.get_ip6("eth0") self.assertEqual(expected, val) def test_get_ip6_dual(self): @@ -447,20 +492,21 @@ class TestOpenNebulaNetwork(unittest.TestCase): In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. """ context = { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_ULA': IP6_ULA, } + "ETH0_IP6": IP6_GLOBAL, + "ETH0_IP6_ULA": IP6_ULA, + } expected = [IP6_GLOBAL, IP6_ULA] net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') + val = net.get_ip6("eth0") self.assertEqual(expected, val) def test_get_ip6_prefix(self): """ Verify get_ip6_prefix('device') correctly returns IPv6 prefix. """ - context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} + context = {"ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6_prefix('eth0') + val = net.get_ip6_prefix("eth0") self.assertEqual(IP6_PREFIX, val) def test_get_ip6_prefix_emptystring(self): @@ -469,59 +515,59 @@ class TestOpenNebulaNetwork(unittest.TestCase): It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty string. """ - context = {'ETH0_IP6_PREFIX_LENGTH': ''} + context = {"ETH0_IP6_PREFIX_LENGTH": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6_prefix('eth0') - self.assertEqual('64', val) + val = net.get_ip6_prefix("eth0") + self.assertEqual("64", val) def test_get_gateway(self): """ Verify get_gateway('device') correctly returns IPv4 default gateway address. """ - context = {'ETH0_GATEWAY': '1.2.3.5'} + context = {"ETH0_GATEWAY": "1.2.3.5"} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_gateway('eth0') - self.assertEqual('1.2.3.5', val) + val = net.get_gateway("eth0") + self.assertEqual("1.2.3.5", val) def test_get_gateway6(self): """ Verify get_gateway6('device') correctly returns IPv6 default gateway address. """ - for k in ('GATEWAY6', 'IP6_GATEWAY'): - context = {'ETH0_' + k: IP6_GW} + for k in ("GATEWAY6", "IP6_GATEWAY"): + context = {"ETH0_" + k: IP6_GW} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_gateway6('eth0') + val = net.get_gateway6("eth0") self.assertEqual(IP6_GW, val) def test_get_mask(self): """ Verify get_mask('device') correctly returns IPv4 subnet mask. """ - context = {'ETH0_MASK': '255.255.0.0'} + context = {"ETH0_MASK": "255.255.0.0"} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mask('eth0') - self.assertEqual('255.255.0.0', val) + val = net.get_mask("eth0") + self.assertEqual("255.255.0.0", val) def test_get_mask_emptystring(self): """ Verify get_mask('device') correctly returns IPv4 subnet mask. It returns default value '255.255.255.0' if ETH0_MASK has empty string. """ - context = {'ETH0_MASK': ''} + context = {"ETH0_MASK": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mask('eth0') - self.assertEqual('255.255.255.0', val) + val = net.get_mask("eth0") + self.assertEqual("255.255.255.0", val) def test_get_network(self): """ Verify get_network('device') correctly returns IPv4 network address. """ - context = {'ETH0_NETWORK': '1.2.3.0'} + context = {"ETH0_NETWORK": "1.2.3.0"} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_network('eth0', MACADDR) - self.assertEqual('1.2.3.0', val) + val = net.get_network("eth0", MACADDR) + self.assertEqual("1.2.3.0", val) def test_get_network_emptystring(self): """ @@ -529,48 +575,48 @@ class TestOpenNebulaNetwork(unittest.TestCase): It returns network address created by MAC address if ETH0_NETWORK has empty string. """ - context = {'ETH0_NETWORK': ''} + context = {"ETH0_NETWORK": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_network('eth0', MACADDR) - self.assertEqual('10.18.1.0', val) + val = net.get_network("eth0", MACADDR) + self.assertEqual("10.18.1.0", val) def test_get_field(self): """ Verify get_field('device', 'name') returns *context* value. """ - context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + context = {"ETH9_DUMMY": "DUMMY_VALUE"} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') - self.assertEqual('DUMMY_VALUE', val) + val = net.get_field("eth9", "dummy") + self.assertEqual("DUMMY_VALUE", val) def test_get_field_withdefaultvalue(self): """ Verify get_field('device', 'name', 'default value') returns *context* value. """ - context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + context = {"ETH9_DUMMY": "DUMMY_VALUE"} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') - self.assertEqual('DUMMY_VALUE', val) + val = net.get_field("eth9", "dummy", "DEFAULT_VALUE") + self.assertEqual("DUMMY_VALUE", val) def test_get_field_withdefaultvalue_emptycontext(self): """ Verify get_field('device', 'name', 'default value') returns *default* value if context value is empty string. """ - context = {'ETH9_DUMMY': ''} + context = {"ETH9_DUMMY": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') - self.assertEqual('DEFAULT_VALUE', val) + val = net.get_field("eth9", "dummy", "DEFAULT_VALUE") + self.assertEqual("DEFAULT_VALUE", val) def test_get_field_emptycontext(self): """ Verify get_field('device', 'name') returns None if context value is empty string. """ - context = {'ETH9_DUMMY': ''} + context = {"ETH9_DUMMY": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') + val = net.get_field("eth9", "dummy") self.assertEqual(None, val) def test_get_field_nonecontext(self): @@ -578,9 +624,9 @@ class TestOpenNebulaNetwork(unittest.TestCase): Verify get_field('device', 'name') returns None if context value is None. """ - context = {'ETH9_DUMMY': None} + context = {"ETH9_DUMMY": None} net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') + val = net.get_field("eth9", "dummy") self.assertEqual(None, val) @mock.patch(DS_PATH + ".get_physical_nics_by_mac") @@ -589,31 +635,39 @@ class TestOpenNebulaNetwork(unittest.TestCase): self.maxDiff = None # empty ETH0_GATEWAY context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY': '', } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_GATEWAY": "", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) # set ETH0_GATEWAY context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY': '1.2.3.5', } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_GATEWAY": "1.2.3.5", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'gateway4': '1.2.3.5', - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "gateway4": "1.2.3.5", + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) @@ -624,31 +678,39 @@ class TestOpenNebulaNetwork(unittest.TestCase): self.maxDiff = None # empty ETH0_GATEWAY6 context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY6': '', } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_GATEWAY6": "", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) # set ETH0_GATEWAY6 context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY6': IP6_GW, } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_GATEWAY6": IP6_GW, + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'gateway6': IP6_GW, - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "gateway6": IP6_GW, + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) @@ -659,37 +721,46 @@ class TestOpenNebulaNetwork(unittest.TestCase): self.maxDiff = None # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_IP6': '', - 'ETH0_IP6_ULA': '', - 'ETH0_IP6_PREFIX_LENGTH': '', } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_IP6": "", + "ETH0_IP6_ULA": "", + "ETH0_IP6_PREFIX_LENGTH": "", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_IP6_ULA': IP6_ULA, } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_IP6": IP6_GLOBAL, + "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX, + "ETH0_IP6_ULA": IP6_ULA, + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [ - IP_BY_MACADDR + '/' + IP4_PREFIX, - IP6_GLOBAL + '/' + IP6_PREFIX, - IP6_ULA + '/' + IP6_PREFIX]}}} + "match": {"macaddress": MACADDR}, + "addresses": [ + IP_BY_MACADDR + "/" + IP4_PREFIX, + IP6_GLOBAL + "/" + IP6_PREFIX, + IP6_ULA + "/" + IP6_PREFIX, + ], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) @@ -700,37 +771,46 @@ class TestOpenNebulaNetwork(unittest.TestCase): self.maxDiff = None # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'DNS': '', - 'ETH0_DNS': '', - 'ETH0_SEARCH_DOMAIN': '', } + "ETH0_MAC": "02:00:0a:12:01:01", + "DNS": "", + "ETH0_DNS": "", + "ETH0_SEARCH_DOMAIN": "", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + "ETH0_MAC": "02:00:0a:12:01:01", + "DNS": "1.2.3.8", + "ETH0_DNS": "1.2.3.6 1.2.3.7", + "ETH0_SEARCH_DOMAIN": "example.com example.org", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']}, - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "nameservers": { + "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"], + "search": ["example.com", "example.org"], + }, + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) @@ -741,31 +821,39 @@ class TestOpenNebulaNetwork(unittest.TestCase): self.maxDiff = None # empty ETH0_MTU context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MTU': '', } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_MTU": "", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) # set ETH0_MTU context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MTU': '1280', } + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_MTU": "1280", + } for nic in self.system_nics: expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'mtu': '1280', - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "mtu": "1280", + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) self.assertEqual(net.gen_conf(), expected) @@ -776,11 +864,14 @@ class TestOpenNebulaNetwork(unittest.TestCase): m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork({}, mock.Mock()) expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/" + IP4_PREFIX], + } + }, + } self.assertEqual(net.gen_conf(), expected) @@ -795,71 +886,82 @@ class TestOpenNebulaNetwork(unittest.TestCase): def test_eth0_override(self): self.maxDiff = None context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': '', - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_IP6': '', - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_IP6_ULA': '', - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': '', + "DNS": "1.2.3.8", + "ETH0_DNS": "1.2.3.6 1.2.3.7", + "ETH0_GATEWAY": "1.2.3.5", + "ETH0_GATEWAY6": "", + "ETH0_IP": IP_BY_MACADDR, + "ETH0_IP6": "", + "ETH0_IP6_PREFIX_LENGTH": "", + "ETH0_IP6_ULA": "", + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_MASK": "255.255.0.0", + "ETH0_MTU": "", + "ETH0_NETWORK": "10.18.0.0", + "ETH0_SEARCH_DOMAIN": "", } for nic in self.system_nics: - net = ds.OpenNebulaNetwork(context, mock.Mock(), - system_nics_by_mac={MACADDR: nic}) + net = ds.OpenNebulaNetwork( + context, mock.Mock(), system_nics_by_mac={MACADDR: nic} + ) expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/16'], - 'gateway4': '1.2.3.5', - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} + "match": {"macaddress": MACADDR}, + "addresses": [IP_BY_MACADDR + "/16"], + "gateway4": "1.2.3.5", + "nameservers": { + "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"] + }, + } + }, + } self.assertEqual(expected, net.gen_conf()) def test_eth0_v4v6_override(self): self.maxDiff = None context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': IP6_GW, - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '1280', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', + "DNS": "1.2.3.8", + "ETH0_DNS": "1.2.3.6 1.2.3.7", + "ETH0_GATEWAY": "1.2.3.5", + "ETH0_GATEWAY6": IP6_GW, + "ETH0_IP": IP_BY_MACADDR, + "ETH0_IP6": IP6_GLOBAL, + "ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX, + "ETH0_IP6_ULA": IP6_ULA, + "ETH0_MAC": "02:00:0a:12:01:01", + "ETH0_MASK": "255.255.0.0", + "ETH0_MTU": "1280", + "ETH0_NETWORK": "10.18.0.0", + "ETH0_SEARCH_DOMAIN": "example.com example.org", } for nic in self.system_nics: - net = ds.OpenNebulaNetwork(context, mock.Mock(), - system_nics_by_mac={MACADDR: nic}) + net = ds.OpenNebulaNetwork( + context, mock.Mock(), system_nics_by_mac={MACADDR: nic} + ) expected = { - 'version': 2, - 'ethernets': { + "version": 2, + "ethernets": { nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [ - IP_BY_MACADDR + '/16', - IP6_GLOBAL + '/' + IP6_PREFIX, - IP6_ULA + '/' + IP6_PREFIX], - 'gateway4': '1.2.3.5', - 'gateway6': IP6_GW, - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']}, - 'mtu': '1280'}}} + "match": {"macaddress": MACADDR}, + "addresses": [ + IP_BY_MACADDR + "/16", + IP6_GLOBAL + "/" + IP6_PREFIX, + IP6_ULA + "/" + IP6_PREFIX, + ], + "gateway4": "1.2.3.5", + "gateway6": IP6_GW, + "nameservers": { + "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"], + "search": ["example.com", "example.org"], + }, + "mtu": "1280", + } + }, + } self.assertEqual(expected, net.gen_conf()) @@ -869,62 +971,67 @@ class TestOpenNebulaNetwork(unittest.TestCase): MAC_1 = "02:00:0a:12:01:01" MAC_2 = "02:00:0a:12:01:02" context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': IP6_GW, - 'ETH0_IP': '10.18.1.1', - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': MAC_2, - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '1280', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': 'example.com', - 'ETH3_DNS': '10.3.1.2', - 'ETH3_GATEWAY': '10.3.0.1', - 'ETH3_GATEWAY6': '', - 'ETH3_IP': '10.3.1.3', - 'ETH3_IP6': '', - 'ETH3_IP6_PREFIX_LENGTH': '', - 'ETH3_IP6_ULA': '', - 'ETH3_MAC': MAC_1, - 'ETH3_MASK': '255.255.0.0', - 'ETH3_MTU': '', - 'ETH3_NETWORK': '10.3.0.0', - 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org', + "DNS": "1.2.3.8", + "ETH0_DNS": "1.2.3.6 1.2.3.7", + "ETH0_GATEWAY": "1.2.3.5", + "ETH0_GATEWAY6": IP6_GW, + "ETH0_IP": "10.18.1.1", + "ETH0_IP6": IP6_GLOBAL, + "ETH0_IP6_PREFIX_LENGTH": "", + "ETH0_IP6_ULA": IP6_ULA, + "ETH0_MAC": MAC_2, + "ETH0_MASK": "255.255.0.0", + "ETH0_MTU": "1280", + "ETH0_NETWORK": "10.18.0.0", + "ETH0_SEARCH_DOMAIN": "example.com", + "ETH3_DNS": "10.3.1.2", + "ETH3_GATEWAY": "10.3.0.1", + "ETH3_GATEWAY6": "", + "ETH3_IP": "10.3.1.3", + "ETH3_IP6": "", + "ETH3_IP6_PREFIX_LENGTH": "", + "ETH3_IP6_ULA": "", + "ETH3_MAC": MAC_1, + "ETH3_MASK": "255.255.0.0", + "ETH3_MTU": "", + "ETH3_NETWORK": "10.3.0.0", + "ETH3_SEARCH_DOMAIN": "third.example.com third.example.org", } net = ds.OpenNebulaNetwork( context, mock.Mock(), - system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'} + system_nics_by_mac={MAC_1: "enp0s25", MAC_2: "enp1s2"}, ) expected = { - 'version': 2, - 'ethernets': { - 'enp1s2': { - 'match': {'macaddress': MAC_2}, - 'addresses': [ - '10.18.1.1/16', - IP6_GLOBAL + '/64', - IP6_ULA + '/64'], - 'gateway4': '1.2.3.5', - 'gateway6': IP6_GW, - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com']}, - 'mtu': '1280'}, - 'enp0s25': { - 'match': {'macaddress': MAC_1}, - 'addresses': ['10.3.1.3/16'], - 'gateway4': '10.3.0.1', - 'nameservers': { - 'addresses': ['10.3.1.2', '1.2.3.8'], - 'search': [ - 'third.example.com', - 'third.example.org']}}}} + "version": 2, + "ethernets": { + "enp1s2": { + "match": {"macaddress": MAC_2}, + "addresses": [ + "10.18.1.1/16", + IP6_GLOBAL + "/64", + IP6_ULA + "/64", + ], + "gateway4": "1.2.3.5", + "gateway6": IP6_GW, + "nameservers": { + "addresses": ["1.2.3.6", "1.2.3.7", "1.2.3.8"], + "search": ["example.com"], + }, + "mtu": "1280", + }, + "enp0s25": { + "match": {"macaddress": MAC_1}, + "addresses": ["10.3.1.3/16"], + "gateway4": "10.3.0.1", + "nameservers": { + "addresses": ["10.3.1.2", "1.2.3.8"], + "search": ["third.example.com", "third.example.org"], + }, + }, + }, + } self.assertEqual(expected, net.gen_conf()) @@ -932,7 +1039,7 @@ class TestOpenNebulaNetwork(unittest.TestCase): class TestParseShellConfig: @pytest.mark.allow_subp_for("bash") def test_no_seconds(self): - cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"]) + cfg = "\n".join(["foo=bar", "SECONDS=2", "xx=foo"]) # we could test 'sleep 2', but that would make the test run slower. ret = ds.parse_shell_config(cfg) assert ret == {"foo": "bar", "xx": "foo"} @@ -971,7 +1078,8 @@ class TestGetPhysicalNicsByMac: def populate_context_dir(path, variables): data = "# Context variables generated by OpenNebula\n" for k, v in variables.items(): - data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) - populate_dir(path, {'context.sh': data}) + data += "%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")) + populate_dir(path, {"context.sh": data}) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 0d6fb04a..c111bbcd 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -5,74 +5,74 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy -import httpretty as hp import json import re from io import StringIO from urllib.parse import urlparse -from tests.unittests import helpers as test_helpers +import httpretty as hp -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET +from cloudinit import helpers, settings, util +from cloudinit.sources import UNSET, BrokenMetadata from cloudinit.sources import DataSourceOpenStack as ds +from cloudinit.sources import convert_vendordata from cloudinit.sources.helpers import openstack -from cloudinit import util +from tests.unittests import helpers as test_helpers BASE_URL = "http://169.254.169.254" -PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +PUBKEY = "ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n" EC2_META = { - 'ami-id': 'ami-00000001', - 'ami-launch-index': '0', - 'ami-manifest-path': 'FIXME', - 'hostname': 'sm-foo-test.novalocal', - 'instance-action': 'none', - 'instance-id': 'i-00000001', - 'instance-type': 'm1.tiny', - 'local-hostname': 'sm-foo-test.novalocal', - 'local-ipv4': '0.0.0.0', - 'public-hostname': 'sm-foo-test.novalocal', - 'public-ipv4': '0.0.0.1', - 'reservation-id': 'r-iru5qm4m', + "ami-id": "ami-00000001", + "ami-launch-index": "0", + "ami-manifest-path": "FIXME", + "hostname": "sm-foo-test.novalocal", + "instance-action": "none", + "instance-id": "i-00000001", + "instance-type": "m1.tiny", + "local-hostname": "sm-foo-test.novalocal", + "local-ipv4": "0.0.0.0", + "public-hostname": "sm-foo-test.novalocal", + "public-ipv4": "0.0.0.1", + "reservation-id": "r-iru5qm4m", } -USER_DATA = b'#!/bin/sh\necho This is user data\n' +USER_DATA = b"#!/bin/sh\necho This is user data\n" VENDOR_DATA = { - 'magic': '', -} -VENDOR_DATA2 = { - 'static': {} + "magic": "", } +VENDOR_DATA2 = {"static": {}} OSTACK_META = { - 'availability_zone': 'nova', - 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, - {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], - 'hostname': 'sm-foo-test.novalocal', - 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c', + "availability_zone": "nova", + "files": [ + {"content_path": "/content/0000", "path": "/etc/foo.cfg"}, + {"content_path": "/content/0001", "path": "/etc/bar/bar.cfg"}, + ], + "hostname": "sm-foo-test.novalocal", + "meta": {"dsmode": "local", "my-meta": "my-value"}, + "name": "sm-foo-test", + "public_keys": {"mykey": PUBKEY}, + "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c", } -CONTENT_0 = b'This is contents of /etc/foo.cfg\n' -CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' +CONTENT_0 = b"This is contents of /etc/foo.cfg\n" +CONTENT_1 = b"# this is /etc/bar/bar.cfg\n" OS_FILES = { - 'openstack/content/0000': CONTENT_0, - 'openstack/content/0001': CONTENT_1, - 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), - 'openstack/latest/network_data.json': json.dumps( - {'links': [], 'networks': [], 'services': []}), - 'openstack/latest/user_data': USER_DATA, - 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA), - 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2), + "openstack/content/0000": CONTENT_0, + "openstack/content/0001": CONTENT_1, + "openstack/latest/meta_data.json": json.dumps(OSTACK_META), + "openstack/latest/network_data.json": json.dumps( + {"links": [], "networks": [], "services": []} + ), + "openstack/latest/user_data": USER_DATA, + "openstack/latest/vendor_data.json": json.dumps(VENDOR_DATA), + "openstack/latest/vendor_data2.json": json.dumps(VENDOR_DATA2), } EC2_FILES = { - 'latest/user-data': USER_DATA, + "latest/user-data": USER_DATA, } EC2_VERSIONS = [ - 'latest', + "latest", ] -MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.' +MOCK_PATH = "cloudinit.sources.DataSourceOpenStack." # TODO _register_uris should leverage test_ec2.register_mock_metaserver. @@ -87,7 +87,7 @@ def _register_uris(version, ec2_files, ec2_meta, os_files): path = uri.path.lstrip("/") if path in ec2_files: return (200, headers, ec2_files.get(path)) - if path == 'latest/meta-data/': + if path == "latest/meta-data/": buf = StringIO() for (k, v) in ec2_meta.items(): if isinstance(v, (list, tuple)): @@ -96,7 +96,7 @@ def _register_uris(version, ec2_files, ec2_meta, os_files): buf.write("%s" % (k)) buf.write("\n") return (200, headers, buf.getvalue()) - if path.startswith('latest/meta-data/'): + if path.startswith("latest/meta-data/"): value = None pieces = path.split("/") if path.endswith("/"): @@ -107,26 +107,29 @@ def _register_uris(version, ec2_files, ec2_meta, os_files): value = util.get_cfg_by_path(ec2_meta, pieces) if value is not None: return (200, headers, str(value)) - return (404, headers, '') + return (404, headers, "") def match_os_uri(uri, headers): path = uri.path.strip("/") - if path == 'openstack': + if path == "openstack": return (200, headers, "\n".join([openstack.OS_LATEST])) path = uri.path.lstrip("/") if path in os_files: return (200, headers, os_files.get(path)) - return (404, headers, '') + return (404, headers, "") def get_request_callback(method, uri, headers): uri = urlparse(uri) path = uri.path.lstrip("/").split("/") - if path[0] == 'openstack': + if path[0] == "openstack": return match_os_uri(uri, headers) return match_ec2_url(uri, headers) - hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), - body=get_request_callback) + hp.register_uri( + hp.GET, + re.compile(r"http://169.254.169.254/.*"), + body=get_request_callback, + ) def _read_metadata_service(): @@ -136,7 +139,7 @@ def _read_metadata_service(): class TestOpenStackDataSource(test_helpers.HttprettyTestCase): with_logs = True - VERSION = 'latest' + VERSION = "latest" def setUp(self): super(TestOpenStackDataSource, self).setUp() @@ -145,40 +148,43 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_successful(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertEqual(2, len(f['files'])) - self.assertEqual(USER_DATA, f.get('userdata')) - self.assertEqual(EC2_META, f.get('ec2-metadata')) - self.assertEqual(2, f.get('version')) - metadata = f['metadata'] - self.assertEqual('nova', metadata.get('availability_zone')) - self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname')) - self.assertEqual('sm-foo-test.novalocal', - metadata.get('local-hostname')) - self.assertEqual('sm-foo-test', metadata.get('name')) - self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', - metadata.get('uuid')) - self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', - metadata.get('instance-id')) + self.assertEqual(VENDOR_DATA, f.get("vendordata")) + self.assertEqual(VENDOR_DATA2, f.get("vendordata2")) + self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) + self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) + self.assertEqual(2, len(f["files"])) + self.assertEqual(USER_DATA, f.get("userdata")) + self.assertEqual(EC2_META, f.get("ec2-metadata")) + self.assertEqual(2, f.get("version")) + metadata = f["metadata"] + self.assertEqual("nova", metadata.get("availability_zone")) + self.assertEqual("sm-foo-test.novalocal", metadata.get("hostname")) + self.assertEqual( + "sm-foo-test.novalocal", metadata.get("local-hostname") + ) + self.assertEqual("sm-foo-test", metadata.get("name")) + self.assertEqual( + "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("uuid") + ) + self.assertEqual( + "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("instance-id") + ) def test_no_ec2(self): _register_uris(self.VERSION, {}, {}, OS_FILES) f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertEqual(USER_DATA, f.get('userdata')) - self.assertEqual({}, f.get('ec2-metadata')) - self.assertEqual(2, f.get('version')) + self.assertEqual(VENDOR_DATA, f.get("vendordata")) + self.assertEqual(VENDOR_DATA2, f.get("vendordata2")) + self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) + self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) + self.assertEqual(USER_DATA, f.get("userdata")) + self.assertEqual({}, f.get("ec2-metadata")) + self.assertEqual(2, f.get("version")) def test_bad_metadata(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('meta_data.json'): + if k.endswith("meta_data.json"): os_files.pop(k, None) _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.NonReadable, _read_metadata_service) @@ -186,9 +192,9 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_bad_uuid(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) - os_meta.pop('uuid') + os_meta.pop("uuid") for k in list(os_files.keys()): - if k.endswith('meta_data.json'): + if k.endswith("meta_data.json"): os_files[k] = json.dumps(os_meta) _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(BrokenMetadata, _read_metadata_service) @@ -196,77 +202,78 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_userdata_empty(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('user_data'): + if k.endswith("user_data"): os_files.pop(k, None) _register_uris(self.VERSION, {}, {}, os_files) f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('userdata')) + self.assertEqual(VENDOR_DATA, f.get("vendordata")) + self.assertEqual(VENDOR_DATA2, f.get("vendordata2")) + self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) + self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) + self.assertFalse(f.get("userdata")) def test_vendordata_empty(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('vendor_data.json'): + if k.endswith("vendor_data.json"): os_files.pop(k, None) _register_uris(self.VERSION, {}, {}, os_files) f = _read_metadata_service() - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('vendordata')) + self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) + self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) + self.assertFalse(f.get("vendordata")) def test_vendordata2_empty(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('vendor_data2.json'): + if k.endswith("vendor_data2.json"): os_files.pop(k, None) _register_uris(self.VERSION, {}, {}, os_files) f = _read_metadata_service() - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('vendordata2')) + self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) + self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) + self.assertFalse(f.get("vendordata2")) def test_vendordata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('vendor_data.json'): - os_files[k] = '{' # some invalid json + if k.endswith("vendor_data.json"): + os_files[k] = "{" # some invalid json _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(BrokenMetadata, _read_metadata_service) def test_vendordata2_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('vendor_data2.json'): - os_files[k] = '{' # some invalid json + if k.endswith("vendor_data2.json"): + os_files[k] = "{" # some invalid json _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(BrokenMetadata, _read_metadata_service) def test_metadata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = '{' # some invalid json + if k.endswith("meta_data.json"): + os_files[k] = "{" # some invalid json _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(BrokenMetadata, _read_metadata_service) - @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_datasource(self, m_dhcp): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' + mock_path = MOCK_PATH + "detect_openstack" with test_helpers.mock.patch(mock_path) as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() self.assertTrue(found) self.assertEqual(2, ds_os.version) md = dict(ds_os.metadata) - md.pop('instance-id', None) - md.pop('local-hostname', None) + md.pop("instance-id", None) + md.pop("local-hostname", None) self.assertEqual(OSTACK_META, md) self.assertEqual(EC2_META, ds_os.ec2_metadata) self.assertEqual(USER_DATA, ds_os.userdata_raw) @@ -277,29 +284,35 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): m_dhcp.assert_not_called() @hp.activate - @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @test_helpers.mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network") + @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_local_datasource(self, m_dhcp, m_net): """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os_local = ds.DataSourceOpenStackLocal( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'broadcast-address': '192.168.2.255'}] + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) + ds_os_local._fallback_interface = "eth9" # Monkey patch for dhcp + m_dhcp.return_value = [ + { + "interface": "eth9", + "fixed-address": "192.168.2.9", + "routers": "192.168.2.1", + "subnet-mask": "255.255.255.0", + "broadcast-address": "192.168.2.255", + } + ] self.assertIsNone(ds_os_local.version) - mock_path = MOCK_PATH + 'detect_openstack' + mock_path = MOCK_PATH + "detect_openstack" with test_helpers.mock.patch(mock_path) as m_detect_os: m_detect_os.return_value = True found = ds_os_local.get_data() self.assertTrue(found) self.assertEqual(2, ds_os_local.version) md = dict(ds_os_local.metadata) - md.pop('instance-id', None) - md.pop('local-hostname', None) + md.pop("instance-id", None) + md.pop("local-hostname", None) self.assertEqual(OSTACK_META, md) self.assertEqual(EC2_META, ds_os_local.ec2_metadata) self.assertEqual(USER_DATA, ds_os_local.userdata_raw) @@ -307,44 +320,45 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) self.assertIsNone(ds_os_local.vendordata_raw) - m_dhcp.assert_called_with('eth9', None) + m_dhcp.assert_called_with("eth9", None) def test_bad_datasource_meta(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = '{' # some invalid json + if k.endswith("meta_data.json"): + os_files[k] = "{" # some invalid json _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' + mock_path = MOCK_PATH + "detect_openstack" with test_helpers.mock.patch(mock_path) as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) self.assertIn( - 'InvalidMetaDataException: Broken metadata address' - ' http://169.254.169.25', - self.logs.getvalue()) + "InvalidMetaDataException: Broken metadata address" + " http://169.254.169.25", + self.logs.getvalue(), + ) def test_no_datasource(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): - if k.endswith('meta_data.json'): + if k.endswith("meta_data.json"): os_files.pop(k) _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) ds_os.ds_cfg = { - 'max_wait': 0, - 'timeout': 0, + "max_wait": 0, + "timeout": 0, } self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' + mock_path = MOCK_PATH + "detect_openstack" with test_helpers.mock.patch(mock_path) as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() @@ -353,12 +367,16 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_network_config_disabled_by_datasource_config(self): """The network_config can be disabled from datasource config.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' + mock_path = MOCK_PATH + "openstack.convert_net_json" ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os.ds_cfg = {'apply_network_config': False} - sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], - 'networks': [], 'services': []} + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) + ds_os.ds_cfg = {"apply_network_config": False} + sample_json = { + "links": [{"ethernet_mac_address": "mymac"}], + "networks": [], + "services": [], + } ds_os.network_json = sample_json # Ignore this content from metadata with test_helpers.mock.patch(mock_path) as m_convert_json: self.assertIsNone(ds_os.network_config) @@ -366,26 +384,32 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_network_config_from_network_json(self): """The datasource gets network_config from network_data.json.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - example_cfg = {'version': 1, 'config': []} + mock_path = MOCK_PATH + "openstack.convert_net_json" + example_cfg = {"version": 1, "config": []} ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], - 'networks': [], 'services': []} + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) + sample_json = { + "links": [{"ethernet_mac_address": "mymac"}], + "networks": [], + "services": [], + } ds_os.network_json = sample_json with test_helpers.mock.patch(mock_path) as m_convert_json: m_convert_json.return_value = example_cfg self.assertEqual(example_cfg, ds_os.network_config) self.assertIn( - 'network config provided via network_json', self.logs.getvalue()) + "network config provided via network_json", self.logs.getvalue() + ) m_convert_json.assert_called_with(sample_json, known_macs=None) def test_network_config_cached(self): """The datasource caches the network_config property.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - example_cfg = {'version': 1, 'config': []} + mock_path = MOCK_PATH + "openstack.convert_net_json" + example_cfg = {"version": 1, "config": []} ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) ds_os._network_config = example_cfg with test_helpers.mock.patch(mock_path) as m_convert_json: self.assertEqual(example_cfg, ds_os.network_config) @@ -394,22 +418,22 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_disabled_datasource(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) - os_meta['meta'] = { - 'dsmode': 'disabled', + os_meta["meta"] = { + "dsmode": "disabled", } for k in list(os_files.keys()): - if k.endswith('meta_data.json'): + if k.endswith("meta_data.json"): os_files[k] = json.dumps(os_meta) _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) ds_os.ds_cfg = { - 'max_wait': 0, - 'timeout': 0, + "max_wait": 0, + "timeout": 0, } self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' + mock_path = MOCK_PATH + "detect_openstack" with test_helpers.mock.patch(mock_path) as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() @@ -421,30 +445,42 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): """_crawl_metadata returns current metadata and does not cache.""" _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) crawled_data = ds_os._crawl_metadata() self.assertEqual(UNSET, ds_os.ec2_metadata) self.assertIsNone(ds_os.userdata_raw) self.assertEqual(0, len(ds_os.files)) self.assertIsNone(ds_os.vendordata_raw) self.assertEqual( - ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', - 'userdata', 'vendordata', 'vendordata2', 'version'], - sorted(crawled_data.keys())) - self.assertEqual('local', crawled_data['dsmode']) - self.assertEqual(EC2_META, crawled_data['ec2-metadata']) - self.assertEqual(2, len(crawled_data['files'])) - md = copy.deepcopy(crawled_data['metadata']) - md.pop('instance-id') - md.pop('local-hostname') + [ + "dsmode", + "ec2-metadata", + "files", + "metadata", + "networkdata", + "userdata", + "vendordata", + "vendordata2", + "version", + ], + sorted(crawled_data.keys()), + ) + self.assertEqual("local", crawled_data["dsmode"]) + self.assertEqual(EC2_META, crawled_data["ec2-metadata"]) + self.assertEqual(2, len(crawled_data["files"])) + md = copy.deepcopy(crawled_data["metadata"]) + md.pop("instance-id") + md.pop("local-hostname") self.assertEqual(OSTACK_META, md) self.assertEqual( - json.loads(OS_FILES['openstack/latest/network_data.json']), - crawled_data['networkdata']) - self.assertEqual(USER_DATA, crawled_data['userdata']) - self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) - self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2']) - self.assertEqual(2, crawled_data['version']) + json.loads(OS_FILES["openstack/latest/network_data.json"]), + crawled_data["networkdata"], + ) + self.assertEqual(USER_DATA, crawled_data["userdata"]) + self.assertEqual(VENDOR_DATA, crawled_data["vendordata"]) + self.assertEqual(VENDOR_DATA2, crawled_data["vendordata2"]) + self.assertEqual(2, crawled_data["version"]) class TestVendorDataLoading(test_helpers.TestCase): @@ -459,261 +495,289 @@ class TestVendorDataLoading(test_helpers.TestCase): self.assertEqual(self.cvj("foobar"), "foobar") def test_vd_load_list(self): - data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] + data = [{"foo": "bar"}, "mystring", list(["another", "list"])] self.assertEqual(self.cvj(data), data) def test_vd_load_dict_no_ci(self): - self.assertIsNone(self.cvj({'foo': 'bar'})) + self.assertIsNone(self.cvj({"foo": "bar"})) def test_vd_load_dict_ci_dict(self): - self.assertRaises(ValueError, self.cvj, - {'foo': 'bar', 'cloud-init': {'x': 1}}) + self.assertRaises( + ValueError, self.cvj, {"foo": "bar", "cloud-init": {"x": 1}} + ) def test_vd_load_dict_ci_string(self): - data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} - self.assertEqual(self.cvj(data), data['cloud-init']) + data = {"foo": "bar", "cloud-init": "VENDOR_DATA"} + self.assertEqual(self.cvj(data), data["cloud-init"]) def test_vd_load_dict_ci_list(self): - data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} - self.assertEqual(self.cvj(data), data['cloud-init']) + data = {"foo": "bar", "cloud-init": ["VD_1", "VD_2"]} + self.assertEqual(self.cvj(data), data["cloud-init"]) -@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86') +@test_helpers.mock.patch(MOCK_PATH + "util.is_x86") class TestDetectOpenStack(test_helpers.CiTestCase): - def test_detect_openstack_non_intel_x86(self, m_is_x86): """Return True on non-intel platforms because dmi isn't conclusive.""" m_is_x86.return_value = False self.assertTrue( - ds.detect_openstack(), 'Expected detect_openstack == True') - - @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env, - m_is_x86): + ds.detect_openstack(), "Expected detect_openstack == True" + ) + + @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env") + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_not_detect_openstack_intel_x86_ec2( + self, m_dmi, m_proc_env, m_is_x86 + ): """Return False on EC2 platforms.""" m_is_x86.return_value = True # No product_name in proc/1/environ - m_proc_env.return_value = {'HOME': '/'} + m_proc_env.return_value = {"HOME": "/"} def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' on EC2 - if dmi_key == 'chassis-asset-tag': - return '' # Empty string on EC2 - assert False, 'Unexpected dmi read of %s' % dmi_key + if dmi_key == "system-product-name": + return "HVM domU" # Nothing 'openstackish' on EC2 + if dmi_key == "chassis-asset-tag": + return "" # Empty string on EC2 + assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read self.assertFalse( - ds.detect_openstack(), 'Expected detect_openstack == False on EC2') + ds.detect_openstack(), "Expected detect_openstack == False on EC2" + ) m_proc_env.assert_called_with(1) - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_intel_product_name_compute(self, m_dmi, - m_is_x86): + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_detect_openstack_intel_product_name_compute( + self, m_dmi, m_is_x86 + ): """Return True on OpenStack compute and nova instances.""" m_is_x86.return_value = True - openstack_product_names = ['OpenStack Nova', 'OpenStack Compute'] + openstack_product_names = ["OpenStack Nova", "OpenStack Compute"] for product_name in openstack_product_names: m_dmi.return_value = product_name self.assertTrue( - ds.detect_openstack(), 'Failed to detect_openstack') + ds.detect_openstack(), "Failed to detect_openstack" + ) - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi, - m_is_x86): + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_detect_openstack_opentelekomcloud_chassis_asset_tag( + self, m_dmi, m_is_x86 + ): """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" m_is_x86.return_value = True def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud - if dmi_key == 'chassis-asset-tag': - return 'OpenTelekomCloud' - assert False, 'Unexpected dmi read of %s' % dmi_key + if dmi_key == "system-product-name": + return "HVM domU" # Nothing 'openstackish' on OpenTelekomCloud + if dmi_key == "chassis-asset-tag": + return "OpenTelekomCloud" + assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read self.assertTrue( ds.detect_openstack(), - 'Expected detect_openstack == True on OpenTelekomCloud') + "Expected detect_openstack == True on OpenTelekomCloud", + ) - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi, - m_is_x86): + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_detect_openstack_sapccloud_chassis_asset_tag( + self, m_dmi, m_is_x86 + ): """Return True on OpenStack reporting SAP CCloud VM asset-tag.""" m_is_x86.return_value = True def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'VMware Virtual Platform' # SAP CCloud uses VMware - if dmi_key == 'chassis-asset-tag': - return 'SAP CCloud VM' - assert False, 'Unexpected dmi read of %s' % dmi_key + if dmi_key == "system-product-name": + return "VMware Virtual Platform" # SAP CCloud uses VMware + if dmi_key == "chassis-asset-tag": + return "SAP CCloud VM" + assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read self.assertTrue( ds.detect_openstack(), - 'Expected detect_openstack == True on SAP CCloud VM') + "Expected detect_openstack == True on SAP CCloud VM", + ) - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, - m_is_x86): + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_detect_openstack_oraclecloud_chassis_asset_tag( + self, m_dmi, m_is_x86 + ): """Return True on OpenStack reporting Oracle cloud asset-tag.""" m_is_x86.return_value = True def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'Standard PC (i440FX + PIIX, 1996)' # No match - if dmi_key == 'chassis-asset-tag': - return 'OracleCloud.com' - assert False, 'Unexpected dmi read of %s' % dmi_key + if dmi_key == "system-product-name": + return "Standard PC (i440FX + PIIX, 1996)" # No match + if dmi_key == "chassis-asset-tag": + return "OracleCloud.com" + assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read self.assertTrue( ds.detect_openstack(accept_oracle=True), - 'Expected detect_openstack == True on OracleCloud.com') + "Expected detect_openstack == True on OracleCloud.com", + ) self.assertFalse( ds.detect_openstack(accept_oracle=False), - 'Expected detect_openstack == False.') + "Expected detect_openstack == False.", + ) - def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi, - m_is_x86, - chassis_tag): + def _test_detect_openstack_nova_compute_chassis_asset_tag( + self, m_dmi, m_is_x86, chassis_tag + ): """Return True on OpenStack reporting generic asset-tag.""" m_is_x86.return_value = True def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'Generic OpenStack Platform' - if dmi_key == 'chassis-asset-tag': + if dmi_key == "system-product-name": + return "Generic OpenStack Platform" + if dmi_key == "chassis-asset-tag": return chassis_tag - assert False, 'Unexpected dmi read of %s' % dmi_key + assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read self.assertTrue( ds.detect_openstack(), - 'Expected detect_openstack == True on Generic OpenStack Platform') + "Expected detect_openstack == True on Generic OpenStack Platform", + ) - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, - m_is_x86): + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, m_is_x86): self._test_detect_openstack_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, 'OpenStack Nova') + m_dmi, m_is_x86, "OpenStack Nova" + ) - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, - m_is_x86): + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, m_is_x86): self._test_detect_openstack_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, 'OpenStack Compute') - - @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, - m_is_x86): + m_dmi, m_is_x86, "OpenStack Compute" + ) + + @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env") + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_detect_openstack_by_proc_1_environ( + self, m_dmi, m_proc_env, m_is_x86 + ): """Return True when nova product_name specified in /proc/1/environ.""" m_is_x86.return_value = True # Nova product_name in proc/1/environ m_proc_env.return_value = { - 'HOME': '/', 'product_name': 'OpenStack Nova'} + "HOME": "/", + "product_name": "OpenStack Nova", + } def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' - if dmi_key == 'chassis-asset-tag': - return '' # Nothin 'openstackish' - assert False, 'Unexpected dmi read of %s' % dmi_key + if dmi_key == "system-product-name": + return "HVM domU" # Nothing 'openstackish' + if dmi_key == "chassis-asset-tag": + return "" # Nothin 'openstackish' + assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read self.assertTrue( ds.detect_openstack(), - 'Expected detect_openstack == True on OpenTelekomCloud') + "Expected detect_openstack == True on OpenTelekomCloud", + ) m_proc_env.assert_called_with(1) class TestMetadataReader(test_helpers.HttprettyTestCase): """Test the MetadataReader.""" - burl = 'http://169.254.169.254/' + + burl = "http://169.254.169.254/" md_base = { - 'availability_zone': 'myaz1', - 'hostname': 'sm-foo-test.novalocal', + "availability_zone": "myaz1", + "hostname": "sm-foo-test.novalocal", "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], - 'launch_index': 0, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} + "launch_index": 0, + "name": "sm-foo-test", + "public_keys": {"mykey": PUBKEY}, + "project_id": "6a103f813b774b9fb15a4fcd36e1c056", + "uuid": "b0fa911b-69d4-4476-bbe2-1c92bff6535c", + } def register(self, path, body=None, status=200): - content = body if not isinstance(body, str) else body.encode('utf-8') + content = body if not isinstance(body, str) else body.encode("utf-8") hp.register_uri( - hp.GET, self.burl + "openstack" + path, status=status, - body=content) + hp.GET, self.burl + "openstack" + path, status=status, body=content + ) def register_versions(self, versions): - self.register("", '\n'.join(versions)) - self.register("/", '\n'.join(versions)) + self.register("", "\n".join(versions)) + self.register("/", "\n".join(versions)) def register_version(self, version, data): - content = '\n'.join(sorted(data.keys())) + content = "\n".join(sorted(data.keys())) self.register(version, content) self.register(version + "/", content) for path, content in data.items(): self.register("/%s/%s" % (version, path), content) self.register("/%s/%s" % (version, path), content) - if 'user_data' not in data: + if "user_data" not in data: self.register("/%s/user_data" % version, "nodata", status=404) def test__find_working_version(self): """Test a working version ignores unsupported.""" unsup = "2016-11-09" self.register_versions( - [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, - openstack.OS_LATEST]) + [ + openstack.OS_FOLSOM, + openstack.OS_LIBERTY, + unsup, + openstack.OS_LATEST, + ] + ) self.assertEqual( openstack.OS_LIBERTY, - openstack.MetadataReader(self.burl)._find_working_version()) + openstack.MetadataReader(self.burl)._find_working_version(), + ) def test__find_working_version_uses_latest(self): """'latest' should be used if no supported versions.""" - unsup1, unsup2 = ("2016-11-09", '2017-06-06') + unsup1, unsup2 = ("2016-11-09", "2017-06-06") self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) self.assertEqual( openstack.OS_LATEST, - openstack.MetadataReader(self.burl)._find_working_version()) + openstack.MetadataReader(self.burl)._find_working_version(), + ) def test_read_v2_os_ocata(self): """Validate return value of read_v2 for os_ocata data.""" md = copy.deepcopy(self.md_base) - md['devices'] = [] - network_data = {'links': [], 'networks': [], 'services': []} + md["devices"] = [] + network_data = {"links": [], "networks": [], "services": []} vendor_data = {} vendor_data2 = {"static": {}} data = { - 'meta_data.json': json.dumps(md), - 'network_data.json': json.dumps(network_data), - 'vendor_data.json': json.dumps(vendor_data), - 'vendor_data2.json': json.dumps(vendor_data2), + "meta_data.json": json.dumps(md), + "network_data.json": json.dumps(network_data), + "vendor_data.json": json.dumps(vendor_data), + "vendor_data2.json": json.dumps(vendor_data2), } self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) self.register_version(openstack.OS_OCATA, data) mock_read_ec2 = test_helpers.mock.MagicMock( - return_value={'instance-id': 'unused-ec2'}) + return_value={"instance-id": "unused-ec2"} + ) expected_md = copy.deepcopy(md) expected_md.update( - {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) + {"instance-id": md["uuid"], "local-hostname": md["hostname"]} + ) expected = { - 'userdata': '', # Annoying, no user-data results in empty string. - 'version': 2, - 'metadata': expected_md, - 'vendordata': vendor_data, - 'vendordata2': vendor_data2, - 'networkdata': network_data, - 'ec2-metadata': mock_read_ec2.return_value, - 'files': {}, + "userdata": "", # Annoying, no user-data results in empty string. + "version": 2, + "metadata": expected_md, + "vendordata": vendor_data, + "vendordata2": vendor_data2, + "networkdata": network_data, + "ec2-metadata": mock_read_ec2.return_value, + "files": {}, } reader = openstack.MetadataReader(self.burl) reader._read_ec2_metadata = mock_read_ec2 diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index 2aab097c..e0e79c8c 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -11,8 +11,8 @@ import pytest from cloudinit.sources import DataSourceOracle as oracle from cloudinit.sources import NetworkConfigSource from cloudinit.sources.DataSourceOracle import OpcMetadata -from tests.unittests import helpers as test_helpers from cloudinit.url_helper import UrlError +from tests.unittests import helpers as test_helpers DS_PATH = "cloudinit.sources.DataSourceOracle" @@ -119,7 +119,9 @@ def oracle_ds(request, fixture_utils, paths, metadata_version): return_value=metadata, ): yield oracle.DataSourceOracle( - sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths, + sys_cfg=sys_cfg, + distro=mock.Mock(), + paths=paths, ) @@ -129,18 +131,22 @@ class TestDataSourceOracle: assert "oracle" == oracle_ds.platform_type def test_subplatform_before_fetch(self, oracle_ds): - assert 'unknown' == oracle_ds.subplatform + assert "unknown" == oracle_ds.subplatform def test_platform_info_after_fetch(self, oracle_ds): oracle_ds._get_data() - assert 'metadata (http://169.254.169.254/opc/v2/)' == \ - oracle_ds.subplatform + assert ( + "metadata (http://169.254.169.254/opc/v2/)" + == oracle_ds.subplatform + ) - @pytest.mark.parametrize('metadata_version', [1]) + @pytest.mark.parametrize("metadata_version", [1]) def test_v1_platform_info_after_fetch(self, oracle_ds): oracle_ds._get_data() - assert 'metadata (http://169.254.169.254/opc/v1/)' == \ - oracle_ds.subplatform + assert ( + "metadata (http://169.254.169.254/opc/v1/)" + == oracle_ds.subplatform + ) def test_secondary_nics_disabled_by_default(self, oracle_ds): assert not oracle_ds.ds_cfg["configure_secondary_nics"] @@ -153,29 +159,30 @@ class TestDataSourceOracle: class TestIsPlatformViable(test_helpers.CiTestCase): - @mock.patch(DS_PATH + ".dmi.read_dmi_data", - return_value=oracle.CHASSIS_ASSET_TAG) + @mock.patch( + DS_PATH + ".dmi.read_dmi_data", return_value=oracle.CHASSIS_ASSET_TAG + ) def test_expected_viable(self, m_read_dmi_data): """System with known chassis tag is viable.""" self.assertTrue(oracle._is_platform_viable()) - m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")]) @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None) def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data): """System without known chassis tag is not viable.""" self.assertFalse(oracle._is_platform_viable()) - m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")]) @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs") def test_expected_not_viable_other(self, m_read_dmi_data): """System with unnown chassis tag is not viable.""" self.assertFalse(oracle._is_platform_viable()) - m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")]) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestNetworkConfigFromOpcImds: def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): @@ -192,222 +199,317 @@ class TestNetworkConfigFromOpcImds: # operations are used oracle_ds._network_config = object() oracle_ds._add_network_config_from_opc_imds() - assert 'bare metal machine' in caplog.text + assert "bare metal machine" in caplog.text def test_missing_mac_skipped(self, oracle_ds, caplog): oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) oracle_ds._network_config = { - 'version': 1, 'config': [{'primary': 'nic'}] + "version": 1, + "config": [{"primary": "nic"}], } with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): oracle_ds._add_network_config_from_opc_imds() - assert 1 == len(oracle_ds.network_config['config']) - assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ - caplog.text + assert 1 == len(oracle_ds.network_config["config"]) + assert ( + "Interface with MAC 00:00:17:02:2b:b1 not found; skipping" + in caplog.text + ) def test_missing_mac_skipped_v2(self, oracle_ds, caplog): oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) oracle_ds._network_config = { - 'version': 2, 'ethernets': {'primary': {'nic': {}}} + "version": 2, + "ethernets": {"primary": {"nic": {}}}, } with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): oracle_ds._add_network_config_from_opc_imds() - assert 1 == len(oracle_ds.network_config['ethernets']) - assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ - caplog.text + assert 1 == len(oracle_ds.network_config["ethernets"]) + assert ( + "Interface with MAC 00:00:17:02:2b:b1 not found; skipping" + in caplog.text + ) def test_secondary_nic(self, oracle_ds): oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) oracle_ds._network_config = { - 'version': 1, 'config': [{'primary': 'nic'}] + "version": 1, + "config": [{"primary": "nic"}], } - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' - with mock.patch(DS_PATH + ".get_interfaces_by_mac", - return_value={mac_addr: nic_name}): + mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3" + with mock.patch( + DS_PATH + ".get_interfaces_by_mac", + return_value={mac_addr: nic_name}, + ): oracle_ds._add_network_config_from_opc_imds() # The input is mutated - assert 2 == len(oracle_ds.network_config['config']) + assert 2 == len(oracle_ds.network_config["config"]) - secondary_nic_cfg = oracle_ds.network_config['config'][1] - assert nic_name == secondary_nic_cfg['name'] - assert 'physical' == secondary_nic_cfg['type'] - assert mac_addr == secondary_nic_cfg['mac_address'] - assert 9000 == secondary_nic_cfg['mtu'] + secondary_nic_cfg = oracle_ds.network_config["config"][1] + assert nic_name == secondary_nic_cfg["name"] + assert "physical" == secondary_nic_cfg["type"] + assert mac_addr == secondary_nic_cfg["mac_address"] + assert 9000 == secondary_nic_cfg["mtu"] - assert 1 == len(secondary_nic_cfg['subnets']) - subnet_cfg = secondary_nic_cfg['subnets'][0] + assert 1 == len(secondary_nic_cfg["subnets"]) + subnet_cfg = secondary_nic_cfg["subnets"][0] # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE - assert '10.0.0.231' == subnet_cfg['address'] + assert "10.0.0.231" == subnet_cfg["address"] def test_secondary_nic_v2(self, oracle_ds): oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) oracle_ds._network_config = { - 'version': 2, 'ethernets': {'primary': {'nic': {}}} + "version": 2, + "ethernets": {"primary": {"nic": {}}}, } - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' - with mock.patch(DS_PATH + ".get_interfaces_by_mac", - return_value={mac_addr: nic_name}): + mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3" + with mock.patch( + DS_PATH + ".get_interfaces_by_mac", + return_value={mac_addr: nic_name}, + ): oracle_ds._add_network_config_from_opc_imds() # The input is mutated - assert 2 == len(oracle_ds.network_config['ethernets']) + assert 2 == len(oracle_ds.network_config["ethernets"]) - secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3'] - assert secondary_nic_cfg['dhcp4'] is False - assert secondary_nic_cfg['dhcp6'] is False - assert mac_addr == secondary_nic_cfg['match']['macaddress'] - assert 9000 == secondary_nic_cfg['mtu'] + secondary_nic_cfg = oracle_ds.network_config["ethernets"]["ens3"] + assert secondary_nic_cfg["dhcp4"] is False + assert secondary_nic_cfg["dhcp6"] is False + assert mac_addr == secondary_nic_cfg["match"]["macaddress"] + assert 9000 == secondary_nic_cfg["mtu"] - assert 1 == len(secondary_nic_cfg['addresses']) + assert 1 == len(secondary_nic_cfg["addresses"]) # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE - assert '10.0.0.231' == secondary_nic_cfg['addresses'][0] + assert "10.0.0.231" == secondary_nic_cfg["addresses"][0] class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase): - def setUp(self): super(TestNetworkConfigFiltersNetFailover, self).setUp() - self.add_patch(DS_PATH + '.get_interfaces_by_mac', - 'm_get_interfaces_by_mac') - self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master') + self.add_patch( + DS_PATH + ".get_interfaces_by_mac", "m_get_interfaces_by_mac" + ) + self.add_patch(DS_PATH + ".is_netfail_master", "m_netfail_master") def test_ignore_bogus_network_config(self): - netcfg = {'something': 'here'} + netcfg = {"something": "here"} passed_netcfg = copy.copy(netcfg) oracle._ensure_netfailover_safe(passed_netcfg) self.assertEqual(netcfg, passed_netcfg) def test_ignore_network_config_unknown_versions(self): - netcfg = {'something': 'here', 'version': 3} + netcfg = {"something": "here", "version": 3} passed_netcfg = copy.copy(netcfg) oracle._ensure_netfailover_safe(passed_netcfg) self.assertEqual(netcfg, passed_netcfg) def test_checks_v1_type_physical_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3" self.m_get_interfaces_by_mac.return_value = { mac_addr: nic_name, } - netcfg = {'version': 1, 'config': [ - {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr, - 'subnets': [{'type': 'dhcp4'}]}]} + netcfg = { + "version": 1, + "config": [ + { + "type": "physical", + "name": nic_name, + "mac_address": mac_addr, + "subnets": [{"type": "dhcp4"}], + } + ], + } passed_netcfg = copy.copy(netcfg) self.m_netfail_master.return_value = False oracle._ensure_netfailover_safe(passed_netcfg) self.assertEqual(netcfg, passed_netcfg) - self.assertEqual([mock.call(nic_name)], - self.m_netfail_master.call_args_list) + self.assertEqual( + [mock.call(nic_name)], self.m_netfail_master.call_args_list + ) def test_checks_v1_skips_non_phys_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0' + mac_addr, nic_name = "00:00:17:02:2b:b1", "bond0" self.m_get_interfaces_by_mac.return_value = { mac_addr: nic_name, } - netcfg = {'version': 1, 'config': [ - {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr, - 'subnets': [{'type': 'dhcp4'}]}]} + netcfg = { + "version": 1, + "config": [ + { + "type": "bond", + "name": nic_name, + "mac_address": mac_addr, + "subnets": [{"type": "dhcp4"}], + } + ], + } passed_netcfg = copy.copy(netcfg) oracle._ensure_netfailover_safe(passed_netcfg) self.assertEqual(netcfg, passed_netcfg) self.assertEqual(0, self.m_netfail_master.call_count) def test_removes_master_mac_property_v1(self): - nic_master, mac_master = 'ens3', self.random_string() - nic_other, mac_other = 'ens7', self.random_string() - nic_extra, mac_extra = 'enp0s1f2', self.random_string() + nic_master, mac_master = "ens3", self.random_string() + nic_other, mac_other = "ens7", self.random_string() + nic_extra, mac_extra = "enp0s1f2", self.random_string() self.m_get_interfaces_by_mac.return_value = { mac_master: nic_master, mac_other: nic_other, mac_extra: nic_extra, } - netcfg = {'version': 1, 'config': [ - {'type': 'physical', 'name': nic_master, - 'mac_address': mac_master}, - {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, - {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, - ]} + netcfg = { + "version": 1, + "config": [ + { + "type": "physical", + "name": nic_master, + "mac_address": mac_master, + }, + { + "type": "physical", + "name": nic_other, + "mac_address": mac_other, + }, + { + "type": "physical", + "name": nic_extra, + "mac_address": mac_extra, + }, + ], + } def _is_netfail_master(iface): - if iface == 'ens3': + if iface == "ens3": return True return False + self.m_netfail_master.side_effect = _is_netfail_master - expected_cfg = {'version': 1, 'config': [ - {'type': 'physical', 'name': nic_master}, - {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, - {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, - ]} + expected_cfg = { + "version": 1, + "config": [ + {"type": "physical", "name": nic_master}, + { + "type": "physical", + "name": nic_other, + "mac_address": mac_other, + }, + { + "type": "physical", + "name": nic_extra, + "mac_address": mac_extra, + }, + ], + } oracle._ensure_netfailover_safe(netcfg) self.assertEqual(expected_cfg, netcfg) def test_checks_v2_type_ethernet_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3" self.m_get_interfaces_by_mac.return_value = { mac_addr: nic_name, } - netcfg = {'version': 2, 'ethernets': { - nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, - 'match': {'macaddress': mac_addr}}}} + netcfg = { + "version": 2, + "ethernets": { + nic_name: { + "dhcp4": True, + "critical": True, + "set-name": nic_name, + "match": {"macaddress": mac_addr}, + } + }, + } passed_netcfg = copy.copy(netcfg) self.m_netfail_master.return_value = False oracle._ensure_netfailover_safe(passed_netcfg) self.assertEqual(netcfg, passed_netcfg) - self.assertEqual([mock.call(nic_name)], - self.m_netfail_master.call_args_list) + self.assertEqual( + [mock.call(nic_name)], self.m_netfail_master.call_args_list + ) def test_skips_v2_non_ethernet_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0' + mac_addr, nic_name = "00:00:17:02:2b:b1", "wlps0" self.m_get_interfaces_by_mac.return_value = { mac_addr: nic_name, } - netcfg = {'version': 2, 'wifis': { - nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, - 'match': {'macaddress': mac_addr}}}} + netcfg = { + "version": 2, + "wifis": { + nic_name: { + "dhcp4": True, + "critical": True, + "set-name": nic_name, + "match": {"macaddress": mac_addr}, + } + }, + } passed_netcfg = copy.copy(netcfg) oracle._ensure_netfailover_safe(passed_netcfg) self.assertEqual(netcfg, passed_netcfg) self.assertEqual(0, self.m_netfail_master.call_count) def test_removes_master_mac_property_v2(self): - nic_master, mac_master = 'ens3', self.random_string() - nic_other, mac_other = 'ens7', self.random_string() - nic_extra, mac_extra = 'enp0s1f2', self.random_string() + nic_master, mac_master = "ens3", self.random_string() + nic_other, mac_other = "ens7", self.random_string() + nic_extra, mac_extra = "enp0s1f2", self.random_string() self.m_get_interfaces_by_mac.return_value = { mac_master: nic_master, mac_other: nic_other, mac_extra: nic_extra, } - netcfg = {'version': 2, 'ethernets': { - nic_extra: {'dhcp4': True, 'set-name': nic_extra, - 'match': {'macaddress': mac_extra}}, - nic_other: {'dhcp4': True, 'set-name': nic_other, - 'match': {'macaddress': mac_other}}, - nic_master: {'dhcp4': True, 'set-name': nic_master, - 'match': {'macaddress': mac_master}}, - }} + netcfg = { + "version": 2, + "ethernets": { + nic_extra: { + "dhcp4": True, + "set-name": nic_extra, + "match": {"macaddress": mac_extra}, + }, + nic_other: { + "dhcp4": True, + "set-name": nic_other, + "match": {"macaddress": mac_other}, + }, + nic_master: { + "dhcp4": True, + "set-name": nic_master, + "match": {"macaddress": mac_master}, + }, + }, + } def _is_netfail_master(iface): - if iface == 'ens3': + if iface == "ens3": return True return False + self.m_netfail_master.side_effect = _is_netfail_master - expected_cfg = {'version': 2, 'ethernets': { - nic_master: {'dhcp4': True, 'match': {'name': nic_master}}, - nic_extra: {'dhcp4': True, 'set-name': nic_extra, - 'match': {'macaddress': mac_extra}}, - nic_other: {'dhcp4': True, 'set-name': nic_other, - 'match': {'macaddress': mac_other}}, - }} + expected_cfg = { + "version": 2, + "ethernets": { + nic_master: {"dhcp4": True, "match": {"name": nic_master}}, + nic_extra: { + "dhcp4": True, + "set-name": nic_extra, + "match": {"macaddress": mac_extra}, + }, + nic_other: { + "dhcp4": True, + "set-name": nic_other, + "match": {"macaddress": mac_other}, + }, + }, + } oracle._ensure_netfailover_safe(netcfg) import pprint + pprint.pprint(netcfg) - print('---- ^^ modified ^^ ---- vv original vv ----') + print("---- ^^ modified ^^ ---- vv original vv ----") pprint.pprint(expected_cfg) self.assertEqual(expected_cfg, netcfg) @@ -425,12 +527,12 @@ def _mock_v2_urls(httpretty): httpretty.register_uri( httpretty.GET, "http://169.254.169.254/opc/v2/instance/", - body=instance_callback + body=instance_callback, ) httpretty.register_uri( httpretty.GET, "http://169.254.169.254/opc/v2/vnics/", - body=vnics_callback + body=vnics_callback, ) @@ -443,12 +545,12 @@ def _mock_no_v2_urls(httpretty): httpretty.register_uri( httpretty.GET, "http://169.254.169.254/opc/v1/instance/", - body=OPC_V1_METADATA + body=OPC_V1_METADATA, ) httpretty.register_uri( httpretty.GET, "http://169.254.169.254/opc/v1/vnics/", - body=OPC_BM_SECONDARY_VNIC_RESPONSE + body=OPC_BM_SECONDARY_VNIC_RESPONSE, ) @@ -459,18 +561,34 @@ class TestReadOpcMetadata: @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) @pytest.mark.parametrize( - 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [ - (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True, - json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), + "version,setup_urls,instance_data,fetch_vnics,vnics_data", + [ + ( + 2, + _mock_v2_urls, + json.loads(OPC_V2_METADATA), + True, + json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE), + ), (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None), - (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True, - json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), + ( + 1, + _mock_no_v2_urls, + json.loads(OPC_V1_METADATA), + True, + json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE), + ), (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None), - ] + ], ) def test_metadata_returned( - self, version, setup_urls, instance_data, - fetch_vnics, vnics_data, httpretty + self, + version, + setup_urls, + instance_data, + fetch_vnics, + vnics_data, + httpretty, ): setup_urls(httpretty) metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics) @@ -490,10 +608,16 @@ class TestReadOpcMetadata: (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()), (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()), (3, 3, None, pytest.raises(UrlError)), - ] + ], ) - def test_retries(self, v2_failure_count, v1_failure_count, - expected_body, expectation, httpretty): + def test_retries( + self, + v2_failure_count, + v1_failure_count, + expected_body, + expectation, + httpretty, + ): v2_responses = [httpretty.Response("", status=404)] * v2_failure_count v2_responses.append(httpretty.Response(OPC_V2_METADATA)) v1_responses = [httpretty.Response("", status=404)] * v1_failure_count @@ -548,7 +672,8 @@ class TestCommon_GetDataBehaviour: DS_PATH + "._is_platform_viable", mock.Mock(return_value=False) ) def test_false_if_platform_not_viable( - self, parameterized_oracle_ds, + self, + parameterized_oracle_ds, ): assert not parameterized_oracle_ds._get_data() @@ -571,7 +696,10 @@ class TestCommon_GetDataBehaviour: ), ) def test_metadata_keys_set_correctly( - self, keyname, expected_value, parameterized_oracle_ds, + self, + keyname, + expected_value, + parameterized_oracle_ds, ): assert parameterized_oracle_ds._get_data() assert expected_value == parameterized_oracle_ds.metadata[keyname] @@ -591,7 +719,10 @@ class TestCommon_GetDataBehaviour: DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid") ) def test_attributes_set_correctly( - self, attribute_name, expected_value, parameterized_oracle_ds, + self, + attribute_name, + expected_value, + parameterized_oracle_ds, ): assert parameterized_oracle_ds._get_data() assert expected_value == getattr( @@ -624,7 +755,8 @@ class TestCommon_GetDataBehaviour: instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys metadata = OpcMetadata(None, instance_data, None) with mock.patch( - DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + DS_PATH + ".read_opc_metadata", + mock.Mock(return_value=metadata), ): assert parameterized_oracle_ds._get_data() assert ( @@ -638,7 +770,8 @@ class TestCommon_GetDataBehaviour: del instance_data["metadata"]["user_data"] metadata = OpcMetadata(None, instance_data, None) with mock.patch( - DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + DS_PATH + ".read_opc_metadata", + mock.Mock(return_value=metadata), ): assert parameterized_oracle_ds._get_data() @@ -651,7 +784,8 @@ class TestCommon_GetDataBehaviour: del instance_data["metadata"] metadata = OpcMetadata(None, instance_data, None) with mock.patch( - DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + DS_PATH + ".read_opc_metadata", + mock.Mock(return_value=metadata), ): assert parameterized_oracle_ds._get_data() @@ -697,11 +831,9 @@ class TestNonIscsiRoot_GetDataBehaviour: mock.call( iface=m_find_fallback_nic.return_value, connectivity_url_data={ - 'headers': { - 'Authorization': 'Bearer Oracle' - }, - 'url': 'http://169.254.169.254/opc/v2/instance/' - } + "headers": {"Authorization": "Bearer Oracle"}, + "url": "http://169.254.169.254/opc/v2/instance/", + }, ) ] == m_EphemeralDHCPv4.call_args_list @@ -761,9 +893,10 @@ class TestNetworkConfig: def side_effect(self): self._network_config["secondary_added"] = mock.sentinel.needle - oracle_ds._vnics_data = 'DummyData' + oracle_ds._vnics_data = "DummyData" with mock.patch.object( - oracle.DataSourceOracle, "_add_network_config_from_opc_imds", + oracle.DataSourceOracle, + "_add_network_config_from_opc_imds", new=side_effect, ): was_secondary_added = "secondary_added" in oracle_ds.network_config @@ -779,8 +912,9 @@ class TestNetworkConfig: oracle_ds._vnics_data = "DummyData" with mock.patch.object( - oracle.DataSourceOracle, "_add_network_config_from_opc_imds", - side_effect=Exception() + oracle.DataSourceOracle, + "_add_network_config_from_opc_imds", + side_effect=Exception(), ): network_config = oracle_ds.network_config assert network_config == m_read_initramfs_config.return_value diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py index da516731..c2c87f12 100644 --- a/tests/unittests/sources/test_ovf.py +++ b/tests/unittests/sources/test_ovf.py @@ -6,20 +6,19 @@ import base64 import os - from collections import OrderedDict from textwrap import dedent -from cloudinit import subp -from cloudinit import util -from tests.unittests.helpers import CiTestCase, mock, wrap_and_call +from cloudinit import subp, util from cloudinit.helpers import Paths +from cloudinit.safeyaml import YAMLError from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( - CustomScriptNotFound) -from cloudinit.safeyaml import YAMLError + CustomScriptNotFound, +) +from tests.unittests.helpers import CiTestCase, mock, wrap_and_call -MPATH = 'cloudinit.sources.DataSourceOVF.' +MPATH = "cloudinit.sources.DataSourceOVF." NOT_FOUND = None @@ -50,7 +49,7 @@ def fill_properties(props, template=OVF_ENV_CONTENT): for key, val in props.items(): lines.append(prop_tmpl.format(key=key, val=val)) indent = " " - properties = ''.join([indent + line + "\n" for line in lines]) + properties = "".join([indent + line + "\n" for line in lines]) return template.format(properties=properties) @@ -58,13 +57,16 @@ class TestReadOvfEnv(CiTestCase): def test_with_b64_userdata(self): user_data = "#!/bin/sh\necho hello world\n" user_data_b64 = base64.b64encode(user_data.encode()).decode() - props = {"user-data": user_data_b64, "password": "passw0rd", - "instance-id": "inst-001"} + props = { + "user-data": user_data_b64, + "password": "passw0rd", + "instance-id": "inst-001", + } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env) self.assertEqual({"instance-id": "inst-001"}, md) self.assertEqual(user_data.encode(), ud) - self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual({"password": "passw0rd"}, cfg) def test_with_non_b64_userdata(self): user_data = "my-user-data" @@ -80,11 +82,12 @@ class TestReadOvfEnv(CiTestCase): env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env) self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual({"password": "passw0rd"}, cfg) self.assertIsNone(ud) def test_with_b64_network_config_enable_read_network(self): - network_config = dedent("""\ + network_config = dedent( + """\ network: version: 2 ethernets: @@ -101,30 +104,41 @@ class TestReadOvfEnv(CiTestCase): dhcp4: false addresses: - 10.10.10.1/24 - """) + """ + ) network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} + props = { + "network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001", + } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env, True) self.assertEqual("inst-001", md["instance-id"]) - self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual({"password": "passw0rd"}, cfg) self.assertEqual( - {'version': 2, 'ethernets': - {'nics': - {'nameservers': - {'addresses': ['127.0.0.53'], - 'search': ['eng.vmware.com', 'vmware.com']}, - 'match': {'name': 'eth*'}, - 'gateway4': '10.10.10.253', - 'dhcp4': False, - 'addresses': ['10.10.10.1/24']}}}, - md["network-config"]) + { + "version": 2, + "ethernets": { + "nics": { + "nameservers": { + "addresses": ["127.0.0.53"], + "search": ["eng.vmware.com", "vmware.com"], + }, + "match": {"name": "eth*"}, + "gateway4": "10.10.10.253", + "dhcp4": False, + "addresses": ["10.10.10.1/24"], + } + }, + }, + md["network-config"], + ) self.assertIsNone(ud) def test_with_non_b64_network_config_enable_read_network(self): - network_config = dedent("""\ + network_config = dedent( + """\ network: version: 2 ethernets: @@ -141,18 +155,22 @@ class TestReadOvfEnv(CiTestCase): dhcp4: false addresses: - 10.10.10.1/24 - """) - props = {"network-config": network_config, - "password": "passw0rd", - "instance-id": "inst-001"} + """ + ) + props = { + "network-config": network_config, + "password": "passw0rd", + "instance-id": "inst-001", + } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env, True) self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual({"password": "passw0rd"}, cfg) self.assertIsNone(ud) def test_with_b64_network_config_disable_read_network(self): - network_config = dedent("""\ + network_config = dedent( + """\ network: version: 2 ethernets: @@ -169,20 +187,22 @@ class TestReadOvfEnv(CiTestCase): dhcp4: false addresses: - 10.10.10.1/24 - """) + """ + ) network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} + props = { + "network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001", + } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env) self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual({"password": "passw0rd"}, cfg) self.assertIsNone(ud) class TestMarkerFiles(CiTestCase): - def setUp(self): super(TestMarkerFiles, self).setUp() self.tdir = self.tmp_dir() @@ -190,25 +210,23 @@ class TestMarkerFiles(CiTestCase): def test_false_when_markerid_none(self): """Return False when markerid provided is None.""" self.assertFalse( - dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)) + dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir) + ) def test_markerid_file_exist(self): """Return False when markerid file path does not exist, True otherwise.""" - self.assertFalse( - dsovf.check_marker_exists('123', self.tdir)) + self.assertFalse(dsovf.check_marker_exists("123", self.tdir)) - marker_file = self.tmp_path('.markerfile-123.txt', self.tdir) - util.write_file(marker_file, '') - self.assertTrue( - dsovf.check_marker_exists('123', self.tdir) - ) + marker_file = self.tmp_path(".markerfile-123.txt", self.tdir) + util.write_file(marker_file, "") + self.assertTrue(dsovf.check_marker_exists("123", self.tdir)) def test_marker_file_setup(self): """Test creation of marker files.""" - markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir) + markerfilepath = self.tmp_path(".markerfile-hi.txt", self.tdir) self.assertFalse(os.path.exists(markerfilepath)) - dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir) + dsovf.setup_marker_files(markerid="hi", marker_dir=self.tdir) self.assertTrue(os.path.exists(markerfilepath)) @@ -223,233 +241,298 @@ class TestDatasourceOVF(CiTestCase): def test_get_data_false_on_none_dmi_data(self): """When dmi for system-product-name is None, get_data returns False.""" - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource(sys_cfg={}, distro={}, paths=paths) retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': None, - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": None, + "transport_iso9660": NOT_FOUND, + "transport_vmware_guestinfo": NOT_FOUND, + }, + ds.get_data, + ) + self.assertFalse(retcode, "Expected False return from ds.get_data") self.assertIn( - 'DEBUG: No system-product-name found', self.logs.getvalue()) + "DEBUG: No system-product-name found", self.logs.getvalue() + ) def test_get_data_vmware_customization_disabled(self): """When vmware customization is disabled via sys_cfg and allow_raw_data is disabled via ds_cfg, log a message. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True, - 'datasource': {'OVF': {'allow_raw_data': False}}}, - distro={}, paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + sys_cfg={ + "disable_vmware_customization": True, + "datasource": {"OVF": {"allow_raw_data": False}}, + }, + distro={}, + paths=paths, + ) + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [MISC] MARKER-ID = 12345345 - """) + """ + ) util.write_file(conf_file, conf_content) retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "transport_iso9660": NOT_FOUND, + "transport_vmware_guestinfo": NOT_FOUND, + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + }, + ds.get_data, + ) + self.assertFalse(retcode, "Expected False return from ds.get_data") self.assertIn( - 'DEBUG: Customization for VMware platform is disabled.', - self.logs.getvalue()) + "DEBUG: Customization for VMware platform is disabled.", + self.logs.getvalue(), + ) def test_get_data_vmware_customization_sys_cfg_disabled(self): """When vmware customization is disabled via sys_cfg and no meta data is found, log a message. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True, - 'datasource': {'OVF': {'allow_raw_data': True}}}, - distro={}, paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + sys_cfg={ + "disable_vmware_customization": True, + "datasource": {"OVF": {"allow_raw_data": True}}, + }, + distro={}, + paths=paths, + ) + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [MISC] MARKER-ID = 12345345 - """) + """ + ) util.write_file(conf_file, conf_content) retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "transport_iso9660": NOT_FOUND, + "transport_vmware_guestinfo": NOT_FOUND, + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + }, + ds.get_data, + ) + self.assertFalse(retcode, "Expected False return from ds.get_data") self.assertIn( - 'DEBUG: Customization using VMware config is disabled.', - self.logs.getvalue()) + "DEBUG: Customization using VMware config is disabled.", + self.logs.getvalue(), + ) def test_get_data_allow_raw_data_disabled(self): """When allow_raw_data is disabled via ds_cfg and meta data is found, log a message. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': False, - 'datasource': {'OVF': {'allow_raw_data': False}}}, - distro={}, paths=paths) + sys_cfg={ + "disable_vmware_customization": False, + "datasource": {"OVF": {"allow_raw_data": False}}, + }, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CLOUDINIT] METADATA = test-meta - """) + """ + ) util.write_file(conf_file, conf_content) # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_file = self.tmp_path("test-meta", self.tdir) util.write_file(metadata_file, "This is meta data") retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "transport_iso9660": NOT_FOUND, + "transport_vmware_guestinfo": NOT_FOUND, + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "collect_imc_file_paths": [self.tdir + "/test-meta", "", ""], + }, + ds.get_data, + ) + self.assertFalse(retcode, "Expected False return from ds.get_data") self.assertIn( - 'DEBUG: Customization using raw data is disabled.', - self.logs.getvalue()) + "DEBUG: Customization using raw data is disabled.", + self.logs.getvalue(), + ) def test_get_data_vmware_customization_enabled(self): """When cloud-init workflow for vmware is enabled via sys_cfg log a message. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345345 - """) + """ + ) util.write_file(conf_file, conf_content) - with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with mock.patch(MPATH + "get_tools_config", return_value="true"): with self.assertRaises(CustomScriptNotFound) as context: wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "get_nics_to_enable": "", + }, + ds.get_data, + ) + customscript = self.tmp_path("test-script", self.tdir) + self.assertIn( + "Script %s not found!!" % customscript, str(context.exception) + ) def test_get_data_cust_script_disabled(self): """If custom script is disabled by VMware tools configuration, raise a RuntimeError. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345346 - """) + """ + ) util.write_file(conf_file, conf_content) # Prepare the custom sript - customscript = self.tmp_path('test-script', self.tdir) + customscript = self.tmp_path("test-script", self.tdir) util.write_file(customscript, "This is the post cust script") - with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch(MPATH + "get_tools_config", return_value="invalid"): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): with self.assertRaises(RuntimeError) as context: wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - self.assertIn('Custom script is disabled by VM Administrator', - str(context.exception)) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "get_nics_to_enable": "", + }, + ds.get_data, + ) + self.assertIn( + "Custom script is disabled by VM Administrator", + str(context.exception), + ) def test_get_data_cust_script_enabled(self): """If custom script is enabled by VMware tools configuration, execute the script. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345346 - """) + """ + ) util.write_file(conf_file, conf_content) # Mock custom script is enabled by return true when calling # get_tools_config - with mock.patch(MPATH + 'get_tools_config', return_value="true"): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch(MPATH + "get_tools_config", return_value="true"): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): with self.assertRaises(CustomScriptNotFound) as context: wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "get_nics_to_enable": "", + }, + ds.get_data, + ) # Verify custom script is trying to be executed - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) + customscript = self.tmp_path("test-script", self.tdir) + self.assertIn( + "Script %s not found!!" % customscript, str(context.exception) + ) def test_get_data_force_run_post_script_is_yes(self): """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if enable-custom-scripts is not defined in VM Tools configuration """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) + conf_file = self.tmp_path("test-cust", self.tdir) # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts # default value is TRUE - conf_content = dedent("""\ + conf_content = dedent( + """\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345346 DEFAULT-RUN-POST-CUST-SCRIPT = yes - """) + """ + ) util.write_file(conf_file, conf_content) # Mock get_tools_config(section, key, defaultVal) to return @@ -457,81 +540,89 @@ class TestDatasourceOVF(CiTestCase): def my_get_tools_config(*args, **kwargs): return args[2] - with mock.patch(MPATH + 'get_tools_config', - side_effect=my_get_tools_config): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch( + MPATH + "get_tools_config", side_effect=my_get_tools_config + ): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): with self.assertRaises(CustomScriptNotFound) as context: wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "get_nics_to_enable": "", + }, + ds.get_data, + ) # Verify custom script still runs although it is # disabled by VMware Tools - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) + customscript = self.tmp_path("test-script", self.tdir) + self.assertIn( + "Script %s not found!!" % customscript, str(context.exception) + ) def test_get_data_non_vmware_seed_platform_info(self): """Platform info properly reports when on non-vmware platforms.""" - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) # Write ovf-env.xml seed file - seed_dir = self.tmp_path('seed', dir=self.tdir) - ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + seed_dir = self.tmp_path("seed", dir=self.tdir) + ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir) util.write_file(ovf_env, OVF_ENV_CONTENT) ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - self.assertEqual('ovf', ds.cloud_name) - self.assertEqual('ovf', ds.platform_type) - with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'): - with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: - with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + self.assertEqual("ovf", ds.cloud_name) + self.assertEqual("ovf", ds.platform_type) + with mock.patch(MPATH + "dmi.read_dmi_data", return_value="!VMware"): + with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd: + with mock.patch(MPATH + "transport_iso9660") as m_iso9660: m_iso9660.return_value = NOT_FOUND m_guestd.return_value = NOT_FOUND self.assertTrue(ds.get_data()) self.assertEqual( - 'ovf (%s/seed/ovf-env.xml)' % self.tdir, - ds.subplatform) + "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform + ) def test_get_data_vmware_seed_platform_info(self): """Platform info properly reports when on VMware platform.""" - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) # Write ovf-env.xml seed file - seed_dir = self.tmp_path('seed', dir=self.tdir) - ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + seed_dir = self.tmp_path("seed", dir=self.tdir) + ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir) util.write_file(ovf_env, OVF_ENV_CONTENT) ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - self.assertEqual('ovf', ds.cloud_name) - self.assertEqual('ovf', ds.platform_type) - with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'): - with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: - with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + self.assertEqual("ovf", ds.cloud_name) + self.assertEqual("ovf", ds.platform_type) + with mock.patch(MPATH + "dmi.read_dmi_data", return_value="VMWare"): + with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd: + with mock.patch(MPATH + "transport_iso9660") as m_iso9660: m_iso9660.return_value = NOT_FOUND m_guestd.return_value = NOT_FOUND self.assertTrue(ds.get_data()) self.assertEqual( - 'vmware (%s/seed/ovf-env.xml)' % self.tdir, - ds.subplatform) + "vmware (%s/seed/ovf-env.xml)" % self.tdir, + ds.subplatform, + ) - @mock.patch('cloudinit.subp.subp') - @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + @mock.patch("cloudinit.subp.subp") + @mock.patch("cloudinit.sources.DataSource.persist_instance_data") def test_get_data_vmware_guestinfo_with_network_config( self, m_persist, m_subp ): self._test_get_data_with_network_config(guestinfo=False, iso=True) - @mock.patch('cloudinit.subp.subp') - @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + @mock.patch("cloudinit.subp.subp") + @mock.patch("cloudinit.sources.DataSource.persist_instance_data") def test_get_data_iso9660_with_network_config(self, m_persist, m_subp): self._test_get_data_with_network_config(guestinfo=True, iso=False) def _test_get_data_with_network_config(self, guestinfo, iso): - network_config = dedent("""\ + network_config = dedent( + """\ network: version: 2 ethernets: @@ -547,50 +638,69 @@ class TestDatasourceOVF(CiTestCase): dhcp4: false addresses: - 10.10.10.1/24 - """) + """ + ) network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} + props = { + "network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001", + } env = fill_properties(props) - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - with mock.patch(MPATH + 'transport_vmware_guestinfo', - return_value=env if guestinfo else NOT_FOUND): - with mock.patch(MPATH + 'transport_iso9660', - return_value=env if iso else NOT_FOUND): + with mock.patch( + MPATH + "transport_vmware_guestinfo", + return_value=env if guestinfo else NOT_FOUND, + ): + with mock.patch( + MPATH + "transport_iso9660", + return_value=env if iso else NOT_FOUND, + ): self.assertTrue(ds.get_data()) - self.assertEqual('inst-001', ds.metadata['instance-id']) + self.assertEqual("inst-001", ds.metadata["instance-id"]) self.assertEqual( - {'version': 2, 'ethernets': - {'nics': - {'nameservers': - {'addresses': ['127.0.0.53'], - 'search': ['vmware.com']}, - 'match': {'name': 'eth*'}, - 'gateway4': '10.10.10.253', - 'dhcp4': False, - 'addresses': ['10.10.10.1/24']}}}, - ds.network_config) + { + "version": 2, + "ethernets": { + "nics": { + "nameservers": { + "addresses": ["127.0.0.53"], + "search": ["vmware.com"], + }, + "match": {"name": "eth*"}, + "gateway4": "10.10.10.253", + "dhcp4": False, + "addresses": ["10.10.10.1/24"], + } + }, + }, + ds.network_config, + ) def test_get_data_cloudinit_metadata_json(self): """Test metadata can be loaded to cloud-init metadata and network. The metadata format is json. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CLOUDINIT] METADATA = test-meta - """) + """ + ) util.write_file(conf_file, conf_content) # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ { "instance-id": "cloud-vm", "local-hostname": "my-host.domain.com", @@ -606,45 +716,59 @@ class TestDatasourceOVF(CiTestCase): } } } - """) + """ + ) util.write_file(metadata_file, metadata_content) - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], - 'get_nics_to_enable': ''}, - ds._get_data) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "collect_imc_file_paths": [ + self.tdir + "/test-meta", + "", + "", + ], + "get_nics_to_enable": "", + }, + ds._get_data, + ) self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) - self.assertEqual(2, ds.network_config['version']) - self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) + self.assertEqual("cloud-vm", ds.metadata["instance-id"]) + self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"]) + self.assertEqual(2, ds.network_config["version"]) + self.assertTrue(ds.network_config["ethernets"]["eths"]["dhcp4"]) def test_get_data_cloudinit_metadata_yaml(self): """Test metadata can be loaded to cloud-init metadata and network. The metadata format is yaml. """ - paths = Paths({'cloud_dir': self.tdir}) + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CLOUDINIT] METADATA = test-meta - """) + """ + ) util.write_file(conf_file, conf_content) # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ instance-id: cloud-vm local-hostname: my-host.domain.com network: @@ -654,116 +778,147 @@ class TestDatasourceOVF(CiTestCase): match: name: ens* dhcp4: yes - """) + """ + ) util.write_file(metadata_file, metadata_content) - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], - 'get_nics_to_enable': ''}, - ds._get_data) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "collect_imc_file_paths": [ + self.tdir + "/test-meta", + "", + "", + ], + "get_nics_to_enable": "", + }, + ds._get_data, + ) self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) - self.assertEqual(2, ds.network_config['version']) - self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) + self.assertEqual("cloud-vm", ds.metadata["instance-id"]) + self.assertEqual("my-host.domain.com", ds.metadata["local-hostname"]) + self.assertEqual(2, ds.network_config["version"]) + self.assertTrue(ds.network_config["ethernets"]["nics"]["dhcp4"]) def test_get_data_cloudinit_metadata_not_valid(self): - """Test metadata is not JSON or YAML format. - """ - paths = Paths({'cloud_dir': self.tdir}) + """Test metadata is not JSON or YAML format.""" + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CLOUDINIT] METADATA = test-meta - """) + """ + ) util.write_file(conf_file, conf_content) # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_file = self.tmp_path("test-meta", self.tdir) metadata_content = "[This is not json or yaml format]a=b" util.write_file(metadata_file, metadata_content) - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): with self.assertRaises(YAMLError) as context: wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [ - self.tdir + '/test-meta', '', '' - ], - 'get_nics_to_enable': ''}, - ds.get_data) - - self.assertIn("expected '<document start>', but found '<scalar>'", - str(context.exception)) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "collect_imc_file_paths": [ + self.tdir + "/test-meta", + "", + "", + ], + "get_nics_to_enable": "", + }, + ds.get_data, + ) + + self.assertIn( + "expected '<document start>', but found '<scalar>'", + str(context.exception), + ) def test_get_data_cloudinit_metadata_not_found(self): - """Test metadata file can't be found. - """ - paths = Paths({'cloud_dir': self.tdir}) + """Test metadata file can't be found.""" + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CLOUDINIT] METADATA = test-meta - """) + """ + ) util.write_file(conf_file, conf_content) # Don't prepare the meta data file - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): with self.assertRaises(FileNotFoundError) as context: wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "get_nics_to_enable": "", + }, + ds.get_data, + ) - self.assertIn('is not found', str(context.exception)) + self.assertIn("is not found", str(context.exception)) def test_get_data_cloudinit_userdata(self): - """Test user data can be loaded to cloud-init user data. - """ - paths = Paths({'cloud_dir': self.tdir}) + """Test user data can be loaded to cloud-init user data.""" + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": False}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CLOUDINIT] METADATA = test-meta USERDATA = test-user - """) + """ + ) util.write_file(conf_file, conf_content) # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ instance-id: cloud-vm local-hostname: my-host.domain.com network: @@ -773,51 +928,63 @@ class TestDatasourceOVF(CiTestCase): match: name: ens* dhcp4: yes - """) + """ + ) util.write_file(metadata_file, metadata_content) # Prepare the user data file - userdata_file = self.tmp_path('test-user', self.tdir) + userdata_file = self.tmp_path("test-user", self.tdir) userdata_content = "This is the user data" util.write_file(userdata_file, userdata_content) - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', - self.tdir + '/test-user', ''], - 'get_nics_to_enable': ''}, - ds._get_data) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "collect_imc_file_paths": [ + self.tdir + "/test-meta", + self.tdir + "/test-user", + "", + ], + "get_nics_to_enable": "", + }, + ds._get_data, + ) self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("cloud-vm", ds.metadata["instance-id"]) self.assertEqual(userdata_content, ds.userdata_raw) def test_get_data_cloudinit_userdata_not_found(self): - """Test userdata file can't be found. - """ - paths = Paths({'cloud_dir': self.tdir}) + """Test userdata file can't be found.""" + paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) + sys_cfg={"disable_vmware_customization": True}, + distro={}, + paths=paths, + ) # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ + conf_file = self.tmp_path("test-cust", self.tdir) + conf_content = dedent( + """\ [CLOUDINIT] METADATA = test-meta USERDATA = test-user - """) + """ + ) util.write_file(conf_file, conf_content) # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ + metadata_file = self.tmp_path("test-meta", self.tdir) + metadata_content = dedent( + """\ instance-id: cloud-vm local-hostname: my-host.domain.com network: @@ -827,45 +994,49 @@ class TestDatasourceOVF(CiTestCase): match: name: ens* dhcp4: yes - """) + """ + ) util.write_file(metadata_file, metadata_content) # Don't prepare the user data file - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): + with mock.patch( + MPATH + "set_customization_status", return_value=("msg", b"") + ): with self.assertRaises(FileNotFoundError) as context: wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + "cloudinit.sources.DataSourceOVF", + { + "dmi.read_dmi_data": "vmware", + "util.del_dir": True, + "search_file": self.tdir, + "wait_for_imc_cfg_file": conf_file, + "get_nics_to_enable": "", + }, + ds.get_data, + ) - self.assertIn('is not found', str(context.exception)) + self.assertIn("is not found", str(context.exception)) class TestTransportIso9660(CiTestCase): - def setUp(self): super(TestTransportIso9660, self).setUp() - self.add_patch('cloudinit.util.find_devs_with', - 'm_find_devs_with') - self.add_patch('cloudinit.util.mounts', 'm_mounts') - self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb') - self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env', - 'm_get_ovf_env') - self.m_get_ovf_env.return_value = ('myfile', 'mycontent') + self.add_patch("cloudinit.util.find_devs_with", "m_find_devs_with") + self.add_patch("cloudinit.util.mounts", "m_mounts") + self.add_patch("cloudinit.util.mount_cb", "m_mount_cb") + self.add_patch( + "cloudinit.sources.DataSourceOVF.get_ovf_env", "m_get_ovf_env" + ) + self.m_get_ovf_env.return_value = ("myfile", "mycontent") def test_find_already_mounted(self): """Check we call get_ovf_env from on matching mounted devices""" mounts = { - '/dev/sr9': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', + "/dev/sr9": { + "fstype": "iso9660", + "mountpoint": "wark/media/sr9", + "opts": "ro", } } self.m_mounts.return_value = mounts @@ -875,33 +1046,34 @@ class TestTransportIso9660(CiTestCase): def test_find_already_mounted_skips_non_iso9660(self): """Check we call get_ovf_env ignoring non iso9660""" mounts = { - '/dev/xvdb': { - 'fstype': 'vfat', - 'mountpoint': 'wark/foobar', - 'opts': 'defaults,noatime', + "/dev/xvdb": { + "fstype": "vfat", + "mountpoint": "wark/foobar", + "opts": "defaults,noatime", + }, + "/dev/xvdc": { + "fstype": "iso9660", + "mountpoint": "wark/media/sr9", + "opts": "ro", }, - '/dev/xvdc': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', - } } # We use an OrderedDict here to ensure we check xvdb before xvdc # as we're not mocking the regex matching, however, if we place # an entry in the results then we can be reasonably sure that # we're skipping an entry which fails to match. - self.m_mounts.return_value = ( - OrderedDict(sorted(mounts.items(), key=lambda t: t[0]))) + self.m_mounts.return_value = OrderedDict( + sorted(mounts.items(), key=lambda t: t[0]) + ) self.assertEqual("mycontent", dsovf.transport_iso9660()) def test_find_already_mounted_matches_kname(self): """Check we dont regex match on basename of the device""" mounts = { - '/dev/foo/bar/xvdc': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', + "/dev/foo/bar/xvdc": { + "fstype": "iso9660", + "mountpoint": "wark/media/sr9", + "opts": "ro", } } # we're skipping an entry which fails to match. @@ -912,28 +1084,33 @@ class TestTransportIso9660(CiTestCase): def test_mount_cb_called_on_blkdevs_with_iso9660(self): """Check we call mount_cb on blockdevs with iso9660 only""" self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/sr0'] + self.m_find_devs_with.return_value = ["/dev/sr0"] self.m_mount_cb.return_value = ("myfile", "mycontent") self.assertEqual("mycontent", dsovf.transport_iso9660()) self.m_mount_cb.assert_called_with( - "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660" + ) def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self): """Check we call mount_cb on blockdevs with iso9660 and match regex""" self.m_mounts.return_value = {} self.m_find_devs_with.return_value = [ - '/dev/abc', '/dev/my-cdrom', '/dev/sr0'] + "/dev/abc", + "/dev/my-cdrom", + "/dev/sr0", + ] self.m_mount_cb.return_value = ("myfile", "mycontent") self.assertEqual("mycontent", dsovf.transport_iso9660()) self.m_mount_cb.assert_called_with( - "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660" + ) def test_mount_cb_not_called_no_matches(self): """Check we don't call mount_cb if nothing matches""" self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/vg/myovf'] + self.m_find_devs_with.return_value = ["/dev/vg/myovf"] self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) self.assertEqual(0, self.m_mount_cb.call_count) @@ -941,76 +1118,85 @@ class TestTransportIso9660(CiTestCase): def test_mount_cb_called_require_iso_false(self): """Check we call mount_cb on blockdevs with require_iso=False""" self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/xvdz'] + self.m_find_devs_with.return_value = ["/dev/xvdz"] self.m_mount_cb.return_value = ("myfile", "mycontent") self.assertEqual( - "mycontent", dsovf.transport_iso9660(require_iso=False)) + "mycontent", dsovf.transport_iso9660(require_iso=False) + ) self.m_mount_cb.assert_called_with( - "/dev/xvdz", dsovf.get_ovf_env, mtype=None) + "/dev/xvdz", dsovf.get_ovf_env, mtype=None + ) def test_maybe_cdrom_device_none(self): """Test maybe_cdrom_device returns False for none/empty input""" self.assertFalse(dsovf.maybe_cdrom_device(None)) - self.assertFalse(dsovf.maybe_cdrom_device('')) + self.assertFalse(dsovf.maybe_cdrom_device("")) def test_maybe_cdrom_device_non_string_exception(self): """Test maybe_cdrom_device raises ValueError on non-string types""" with self.assertRaises(ValueError): - dsovf.maybe_cdrom_device({'a': 'eleven'}) + dsovf.maybe_cdrom_device({"a": "eleven"}) def test_maybe_cdrom_device_false_on_multi_dir_paths(self): """Test maybe_cdrom_device is false on /dev[/.*]/* paths""" - self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device("/dev/foo/sr0")) + self.assertFalse(dsovf.maybe_cdrom_device("foo/sr0")) + self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0")) + self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0")) def test_maybe_cdrom_device_true_on_hd_partitions(self): """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1')) - self.assertTrue(dsovf.maybe_cdrom_device('hdz9')) + self.assertTrue(dsovf.maybe_cdrom_device("/dev/hda1")) + self.assertTrue(dsovf.maybe_cdrom_device("hdz9")) def test_maybe_cdrom_device_true_on_valid_relative_paths(self): """Test maybe_cdrom_device normalizes paths""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9')) - self.assertTrue(dsovf.maybe_cdrom_device('///sr0')) - self.assertTrue(dsovf.maybe_cdrom_device('/sr0')) - self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda')) + self.assertTrue(dsovf.maybe_cdrom_device("/dev/wark/../sr9")) + self.assertTrue(dsovf.maybe_cdrom_device("///sr0")) + self.assertTrue(dsovf.maybe_cdrom_device("/sr0")) + self.assertTrue(dsovf.maybe_cdrom_device("//dev//hda")) def test_maybe_cdrom_device_true_on_xvd_partitions(self): """Test maybe_cdrom_device returns true on xvd*""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda')) - self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1')) - self.assertTrue(dsovf.maybe_cdrom_device('xvdza1')) + self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda")) + self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda1")) + self.assertTrue(dsovf.maybe_cdrom_device("xvdza1")) @mock.patch(MPATH + "subp.which") @mock.patch(MPATH + "subp.subp") class TestTransportVmwareGuestinfo(CiTestCase): """Test the com.vmware.guestInfo transport implemented in - transport_vmware_guestinfo.""" + transport_vmware_guestinfo.""" - rpctool = 'vmware-rpctool' + rpctool = "vmware-rpctool" with_logs = True - rpctool_path = '/not/important/vmware-rpctool' + rpctool_path = "/not/important/vmware-rpctool" def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which): m_which.return_value = None self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(0, m_subp.call_count, - "subp should not be called if no rpctool in path.") + self.assertEqual( + 0, + m_subp.call_count, + "subp should not be called if no rpctool in path.", + ) def test_notfound_on_exit_code_1(self, m_subp, m_which): """If vmware-rpctool exits 1, then must return not found.""" m_which.return_value = self.rpctool_path m_subp.side_effect = subp.ProcessExecutionError( - stdout="", stderr="No value found", exit_code=1, cmd=["unused"]) + stdout="", stderr="No value found", exit_code=1, cmd=["unused"] + ) self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) self.assertEqual(1, m_subp.call_count) - self.assertNotIn("WARNING", self.logs.getvalue(), - "exit code of 1 by rpctool should not cause warning.") + self.assertNotIn( + "WARNING", + self.logs.getvalue(), + "exit code of 1 by rpctool should not cause warning.", + ) def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): """If vmware-rpctool exited 0 with no stdout is normal not-found. @@ -1020,7 +1206,7 @@ class TestTransportVmwareGuestinfo(CiTestCase): the case where it exited 0 and just wrote nothing to stdout. """ m_which.return_value = self.rpctool_path - m_subp.return_value = ('', '') + m_subp.return_value = ("", "") self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) self.assertEqual(1, m_subp.call_count) @@ -1028,19 +1214,24 @@ class TestTransportVmwareGuestinfo(CiTestCase): """If vmware-rpctool exits non zero or 1, warnings should be logged.""" m_which.return_value = self.rpctool_path m_subp.side_effect = subp.ProcessExecutionError( - stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]) + stdout=None, stderr="No value found", exit_code=2, cmd=["unused"] + ) self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) self.assertEqual(1, m_subp.call_count) - self.assertIn("WARNING", self.logs.getvalue(), - "exit code of 2 by rpctool should log WARNING.") + self.assertIn( + "WARNING", + self.logs.getvalue(), + "exit code of 2 by rpctool should log WARNING.", + ) def test_found_when_guestinfo_present(self, m_subp, m_which): """When there is a ovf info, transport should return it.""" m_which.return_value = self.rpctool_path content = fill_properties({}) - m_subp.return_value = (content, '') + m_subp.return_value = (content, "") self.assertEqual(content, dsovf.transport_vmware_guestinfo()) self.assertEqual(1, m_subp.call_count) + # # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py index c1294c92..475bf498 100644 --- a/tests/unittests/sources/test_rbx.py +++ b/tests/unittests/sources/test_rbx.py @@ -1,38 +1,42 @@ import json -from cloudinit import helpers -from cloudinit import distros +from cloudinit import distros, helpers, subp from cloudinit.sources import DataSourceRbxCloud as ds -from tests.unittests.helpers import mock, CiTestCase, populate_dir -from cloudinit import subp +from tests.unittests.helpers import CiTestCase, mock, populate_dir DS_PATH = "cloudinit.sources.DataSourceRbxCloud" -CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ - "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ - "tToyGP41.s1" +CRYPTO_PASS = ( + "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" + "tToyGP41.s1" +) CLOUD_METADATA = { "vm": { "memory": 4, "cpu": 2, "name": "vm-image-builder", - "_id": "5beab44f680cffd11f0e60fc" + "_id": "5beab44f680cffd11f0e60fc", }, "additionalMetadata": { "username": "guru", "sshKeys": ["ssh-rsa ..."], - "password": { - "sha512": CRYPTO_PASS - } + "password": {"sha512": CRYPTO_PASS}, }, "disk": [ - {"size": 10, "type": "ssd", - "name": "vm-image-builder-os", - "_id": "5beab450680cffd11f0e60fe"}, - {"size": 2, "type": "ssd", - "name": "ubuntu-1804-bionic", - "_id": "5bef002c680cffd11f107590"} + { + "size": 10, + "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe", + }, + { + "size": 2, + "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590", + }, ], "netadp": [ { @@ -44,12 +48,12 @@ CLOUD_METADATA = { "netmask": "255.255.248.0", "name": "public", "type": "public", - "_id": "5784e97be2627505227b578c" + "_id": "5784e97be2627505227b578c", }, "speed": 1000, "type": "hv", "macaddress": "00:15:5D:FF:0F:03", - "_id": "5beab450680cffd11f0e6102" + "_id": "5beab450680cffd11f0e6102", }, { "ip": [{"address": "10.209.78.11"}], @@ -60,21 +64,21 @@ CLOUD_METADATA = { "netmask": "255.255.255.0", "name": "network-determined-bardeen", "type": "private", - "_id": "5beaec64680cffd11f0e7c31" + "_id": "5beaec64680cffd11f0e7c31", }, "speed": 1000, "type": "hv", "macaddress": "00:15:5D:FF:0F:24", - "_id": "5bec18c6680cffd11f0f0d8b" - } + "_id": "5bec18c6680cffd11f0f0d8b", + }, ], - "dvddrive": [{"iso": {}}] + "dvddrive": [{"iso": {}}], } class TestRbxDataSource(CiTestCase): parsed_user = None - allowed_subp = ['bash'] + allowed_subp = ["bash"] def _fetch_distro(self, kind): cls = distros.fetch(kind) @@ -85,30 +89,30 @@ class TestRbxDataSource(CiTestCase): super(TestRbxDataSource, self).setUp() self.tmp = self.tmp_dir() self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp} + {"cloud_dir": self.tmp, "run_dir": self.tmp} ) # defaults for few tests self.ds = ds.DataSourceRbxCloud self.seed_dir = self.paths.seed_dir - self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + self.sys_cfg = {"datasource": {"RbxCloud": {"dsmode": "local"}}} def test_seed_read_user_data_callback_empty_file(self): - populate_user_metadata(self.seed_dir, '') + populate_user_metadata(self.seed_dir, "") populate_cloud_metadata(self.seed_dir, {}) results = ds.read_user_data_callback(self.seed_dir) self.assertIsNone(results) def test_seed_read_user_data_callback_valid_disk(self): - populate_user_metadata(self.seed_dir, '') + populate_user_metadata(self.seed_dir, "") populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) results = ds.read_user_data_callback(self.seed_dir) self.assertNotEqual(results, None) - self.assertTrue('userdata' in results) - self.assertTrue('metadata' in results) - self.assertTrue('cfg' in results) + self.assertTrue("userdata" in results) + self.assertTrue("metadata" in results) + self.assertTrue("cfg" in results) def test_seed_read_user_data_callback_userdata(self): userdata = "#!/bin/sh\nexit 1" @@ -118,121 +122,120 @@ class TestRbxDataSource(CiTestCase): results = ds.read_user_data_callback(self.seed_dir) self.assertNotEqual(results, None) - self.assertTrue('userdata' in results) - self.assertEqual(results['userdata'], userdata) + self.assertTrue("userdata" in results) + self.assertEqual(results["userdata"], userdata) def test_generate_network_config(self): expected = { - 'version': 1, - 'config': [ + "version": 1, + "config": [ { - 'subnets': [ - {'control': 'auto', - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'netmask': '255.255.248.0', - 'address': '62.181.8.174', - 'type': 'static', 'gateway': '62.181.8.1'} + "subnets": [ + { + "control": "auto", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "netmask": "255.255.248.0", + "address": "62.181.8.174", + "type": "static", + "gateway": "62.181.8.1", + } ], - 'type': 'physical', - 'name': 'eth0', - 'mac_address': '00:15:5d:ff:0f:03' + "type": "physical", + "name": "eth0", + "mac_address": "00:15:5d:ff:0f:03", }, { - 'subnets': [ - {'control': 'auto', - 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], - 'netmask': '255.255.255.0', - 'address': '10.209.78.11', - 'type': 'static', - 'gateway': '10.209.78.1'} + "subnets": [ + { + "control": "auto", + "dns_nameservers": ["9.9.9.9", "8.8.8.8"], + "netmask": "255.255.255.0", + "address": "10.209.78.11", + "type": "static", + "gateway": "10.209.78.1", + } ], - 'type': 'physical', - 'name': 'eth1', - 'mac_address': '00:15:5d:ff:0f:24' - } - ] + "type": "physical", + "name": "eth1", + "mac_address": "00:15:5d:ff:0f:24", + }, + ], } self.assertTrue( - ds.generate_network_config(CLOUD_METADATA['netadp']), - expected + ds.generate_network_config(CLOUD_METADATA["netadp"]), expected ) - @mock.patch(DS_PATH + '.subp.subp') + @mock.patch(DS_PATH + ".subp.subp") def test_gratuitous_arp_run_standard_arping(self, m_subp): """Test handle run arping & parameters.""" items = [ + {"destination": "172.17.0.2", "source": "172.16.6.104"}, { - 'destination': '172.17.0.2', - 'source': '172.16.6.104' - }, - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104', + "destination": "172.17.0.2", + "source": "172.16.6.104", }, ] - ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) - self.assertEqual([ - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]), - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]) - ], m_subp.call_args_list + ds.gratuitous_arp(items, self._fetch_distro("ubuntu")) + self.assertEqual( + [ + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + ], + m_subp.call_args_list, ) - @mock.patch(DS_PATH + '.subp.subp') + @mock.patch(DS_PATH + ".subp.subp") def test_handle_rhel_like_arping(self, m_subp): """Test handle on RHEL-like distros.""" items = [ { - 'source': '172.16.6.104', - 'destination': '172.17.0.2', + "source": "172.16.6.104", + "destination": "172.17.0.2", } ] - ds.gratuitous_arp(items, self._fetch_distro('fedora')) - self.assertEqual([ - mock.call( - ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] - )], - m_subp.call_args_list + ds.gratuitous_arp(items, self._fetch_distro("fedora")) + self.assertEqual( + [ + mock.call( + ["arping", "-c", "2", "-s", "172.16.6.104", "172.17.0.2"] + ) + ], + m_subp.call_args_list, ) @mock.patch( - DS_PATH + '.subp.subp', - side_effect=subp.ProcessExecutionError() + DS_PATH + ".subp.subp", side_effect=subp.ProcessExecutionError() ) def test_continue_on_arping_error(self, m_subp): """Continue when command error""" items = [ + {"destination": "172.17.0.2", "source": "172.16.6.104"}, { - 'destination': '172.17.0.2', - 'source': '172.16.6.104' - }, - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104', + "destination": "172.17.0.2", + "source": "172.16.6.104", }, ] - ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) - self.assertEqual([ - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]), - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]) - ], m_subp.call_args_list + ds.gratuitous_arp(items, self._fetch_distro("ubuntu")) + self.assertEqual( + [ + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + ], + m_subp.call_args_list, ) def populate_cloud_metadata(path, data): - populate_dir(path, {'cloud.json': json.dumps(data)}) + populate_dir(path, {"cloud.json": json.dumps(data)}) def populate_user_metadata(path, data): - populate_dir(path, {'user.data': data}) + populate_dir(path, {"user.data": data}) diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py index 33ae26b8..d7e8b969 100644 --- a/tests/unittests/sources/test_scaleway.py +++ b/tests/unittests/sources/test_scaleway.py @@ -5,12 +5,9 @@ import json import httpretty import requests -from cloudinit import helpers -from cloudinit import settings -from cloudinit import sources +from cloudinit import helpers, settings, sources from cloudinit.sources import DataSourceScaleway - -from tests.unittests.helpers import mock, HttprettyTestCase, CiTestCase +from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock class DataResponses(object): @@ -24,11 +21,11 @@ class DataResponses(object): @staticmethod def rate_limited(method, uri, headers): - return 429, headers, '' + return 429, headers, "" @staticmethod def api_error(method, uri, headers): - return 500, headers, '' + return 500, headers, "" @classmethod def get_ok(cls, method, uri, headers): @@ -39,7 +36,7 @@ class DataResponses(object): """ No user data for this server. """ - return 404, headers, '' + return 404, headers, "" class MetadataResponses(object): @@ -48,18 +45,21 @@ class MetadataResponses(object): """ FAKE_METADATA = { - 'id': '00000000-0000-0000-0000-000000000000', - 'hostname': 'scaleway.host', - 'tags': [ + "id": "00000000-0000-0000-0000-000000000000", + "hostname": "scaleway.host", + "tags": [ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", ], - 'ssh_public_keys': [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] + "ssh_public_keys": [ + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + "fingerprint": "2048 06:ae:... login (RSA)", + }, + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "fingerprint": "2048 06:ff:... login2 (RSA)", + }, + ], } @classmethod @@ -68,46 +68,49 @@ class MetadataResponses(object): class TestOnScaleway(CiTestCase): - def setUp(self): super(TestOnScaleway, self).setUp() self.tmp = self.tmp_dir() def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): mock, faked = fake_dmi - mock.return_value = 'Scaleway' if faked else 'Whatever' + mock.return_value = "Scaleway" if faked else "Whatever" mock, faked = fake_file_exists mock.return_value = faked mock, faked = fake_cmdline - mock.return_value = \ - 'initrd=initrd showopts scaleway nousb' if faked \ - else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): + mock.return_value = ( + "initrd=initrd showopts scaleway nousb" + if faked + else "BOOT_IMAGE=/vmlinuz-3.11.0-26-generic" + ) + + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("os.path.exists") + @mock.patch("cloudinit.dmi.read_dmi_data") + def test_not_on_scaleway( + self, m_read_dmi_data, m_file_exists, m_get_cmdline + ): self.install_mocks( fake_dmi=(m_read_dmi_data, False), fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, False) + fake_cmdline=(m_get_cmdline, False), ) self.assertFalse(DataSourceScaleway.on_scaleway()) # When not on Scaleway, get_data() returns False. datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) ) self.assertFalse(datasource.get_data()) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("os.path.exists") + @mock.patch("cloudinit.dmi.read_dmi_data") + def test_on_scaleway_dmi( + self, m_read_dmi_data, m_file_exists, m_get_cmdline + ): """ dmidecode returns "Scaleway". """ @@ -115,37 +118,39 @@ class TestOnScaleway(CiTestCase): self.install_mocks( fake_dmi=(m_read_dmi_data, True), fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, False) + fake_cmdline=(m_get_cmdline, False), ) self.assertTrue(DataSourceScaleway.on_scaleway()) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("os.path.exists") + @mock.patch("cloudinit.dmi.read_dmi_data") + def test_on_scaleway_var_run_scaleway( + self, m_read_dmi_data, m_file_exists, m_get_cmdline + ): """ /var/run/scaleway exists. """ self.install_mocks( fake_dmi=(m_read_dmi_data, False), fake_file_exists=(m_file_exists, True), - fake_cmdline=(m_get_cmdline, False) + fake_cmdline=(m_get_cmdline, False), ) self.assertTrue(DataSourceScaleway.on_scaleway()) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("os.path.exists") + @mock.patch("cloudinit.dmi.read_dmi_data") + def test_on_scaleway_cmdline( + self, m_read_dmi_data, m_file_exists, m_get_cmdline + ): """ "scaleway" in /proc/cmdline. """ self.install_mocks( fake_dmi=(m_read_dmi_data, False), fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, True) + fake_cmdline=(m_get_cmdline, True), ) self.assertTrue(DataSourceScaleway.on_scaleway()) @@ -160,65 +165,86 @@ def get_source_address_adapter(*args, **kwargs): This function removes the bind on a privileged address, since anyway the HTTP call is mocked by httpretty. """ - kwargs.pop('source_address') + kwargs.pop("source_address") return requests.adapters.HTTPAdapter(*args, **kwargs) class TestDataSourceScaleway(HttprettyTestCase): - def setUp(self): tmp = self.tmp_dir() self.datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": tmp}) ) super(TestDataSourceScaleway, self).setUp() - self.metadata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] - self.userdata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] - self.vendordata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + self.metadata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[ + "metadata_url" + ] + self.userdata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[ + "userdata_url" + ] + self.vendordata_url = DataSourceScaleway.BUILTIN_DS_CONFIG[ + "vendordata_url" + ] - self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', - '_m_on_scaleway', return_value=True) self.add_patch( - 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', - '_m_find_fallback_nic', return_value='scalewaynic0') - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) + "cloudinit.sources.DataSourceScaleway.on_scaleway", + "_m_on_scaleway", + return_value=True, + ) + self.add_patch( + "cloudinit.sources.DataSourceScaleway.net.find_fallback_nic", + "_m_find_fallback_nic", + return_value="scalewaynic0", + ) + + @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4") + @mock.patch( + "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter", + get_source_address_adapter, + ) + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("time.sleep", return_value=None) def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, user data and vendor data. """ - m_get_cmdline.return_value = 'scaleway' + m_get_cmdline.return_value = "scaleway" # Make user data API return a valid response - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.userdata_url, - body=DataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.get_ok) + httpretty.register_uri( + httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok + ) + httpretty.register_uri( + httpretty.GET, self.userdata_url, body=DataResponses.get_ok + ) + httpretty.register_uri( + httpretty.GET, self.vendordata_url, body=DataResponses.get_ok + ) self.datasource.get_data() - self.assertEqual(self.datasource.get_instance_id(), - MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - self.assertEqual(self.datasource.get_hostname(), - MetadataResponses.FAKE_METADATA['hostname']) - self.assertEqual(self.datasource.get_userdata_raw(), - DataResponses.FAKE_USER_DATA) - self.assertEqual(self.datasource.get_vendordata_raw(), - DataResponses.FAKE_USER_DATA) + self.assertEqual( + self.datasource.get_instance_id(), + MetadataResponses.FAKE_METADATA["id"], + ) + self.assertEqual( + self.datasource.get_public_ssh_keys().sort(), + [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + ].sort(), + ) + self.assertEqual( + self.datasource.get_hostname(), + MetadataResponses.FAKE_METADATA["hostname"], + ) + self.assertEqual( + self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA + ) + self.assertEqual( + self.datasource.get_vendordata_raw(), DataResponses.FAKE_USER_DATA + ) self.assertIsNone(self.datasource.availability_zone) self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) @@ -228,246 +254,273 @@ class TestDataSourceScaleway(HttprettyTestCase): get_public_ssh_keys() should return empty list if no ssh key are available """ - self.datasource.metadata['tags'] = [] - self.datasource.metadata['ssh_public_keys'] = [] + self.datasource.metadata["tags"] = [] + self.datasource.metadata["ssh_public_keys"] = [] self.assertEqual(self.datasource.get_public_ssh_keys(), []) def test_ssh_keys_only_tags(self): """ get_public_ssh_keys() should return list of keys available in tags """ - self.datasource.metadata['tags'] = [ + self.datasource.metadata["tags"] = [ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", ] - self.datasource.metadata['ssh_public_keys'] = [] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - ].sort()) + self.datasource.metadata["ssh_public_keys"] = [] + self.assertEqual( + self.datasource.get_public_ssh_keys().sort(), + [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ].sort(), + ) def test_ssh_keys_only_conf(self): """ get_public_ssh_keys() should return list of keys available in ssh_public_keys field """ - self.datasource.metadata['tags'] = [] - self.datasource.metadata['ssh_public_keys'] = [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) + self.datasource.metadata["tags"] = [] + self.datasource.metadata["ssh_public_keys"] = [ + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + "fingerprint": "2048 06:ae:... login (RSA)", + }, + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "fingerprint": "2048 06:ff:... login2 (RSA)", + }, + ] + self.assertEqual( + self.datasource.get_public_ssh_keys().sort(), + [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + ].sort(), + ) def test_ssh_keys_both(self): """ get_public_ssh_keys() should return a merge of keys available in ssh_public_keys and tags """ - self.datasource.metadata['tags'] = [ + self.datasource.metadata["tags"] = [ "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", ] - self.datasource.metadata['ssh_public_keys'] = [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) + self.datasource.metadata["ssh_public_keys"] = [ + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + "fingerprint": "2048 06:ae:... login (RSA)", + }, + { + "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "fingerprint": "2048 06:ff:... login2 (RSA)", + }, + ] + self.assertEqual( + self.datasource.get_public_ssh_keys().sort(), + [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + ].sort(), + ) + + @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4") + @mock.patch( + "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter", + get_source_address_adapter, + ) + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("time.sleep", return_value=None) def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): """ get_data() returns metadata, but no user data nor vendor data. """ - m_get_cmdline.return_value = 'scaleway' + m_get_cmdline.return_value = "scaleway" # Make user and vendor data APIs return HTTP/404, which means there is # no user / vendor data for the server. - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.userdata_url, - body=DataResponses.empty) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.empty) + httpretty.register_uri( + httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok + ) + httpretty.register_uri( + httpretty.GET, self.userdata_url, body=DataResponses.empty + ) + httpretty.register_uri( + httpretty.GET, self.vendordata_url, body=DataResponses.empty + ) self.datasource.get_data() self.assertIsNone(self.datasource.get_userdata_raw()) self.assertIsNone(self.datasource.get_vendordata_raw()) self.assertEqual(sleep.call_count, 0) - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) + @mock.patch("cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4") + @mock.patch( + "cloudinit.sources.DataSourceScaleway.SourceAddressAdapter", + get_source_address_adapter, + ) + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("time.sleep", return_value=None) def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): """ get_data() is rate limited two times by the metadata API when fetching user data. """ - m_get_cmdline.return_value = 'scaleway' + m_get_cmdline.return_value = "scaleway" - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.empty) + httpretty.register_uri( + httpretty.GET, self.metadata_url, body=MetadataResponses.get_ok + ) + httpretty.register_uri( + httpretty.GET, self.vendordata_url, body=DataResponses.empty + ) httpretty.register_uri( - httpretty.GET, self.userdata_url, + httpretty.GET, + self.userdata_url, responses=[ httpretty.Response(body=DataResponses.rate_limited), httpretty.Response(body=DataResponses.rate_limited), httpretty.Response(body=DataResponses.get_ok), - ] + ], ) self.datasource.get_data() - self.assertEqual(self.datasource.get_userdata_raw(), - DataResponses.FAKE_USER_DATA) + self.assertEqual( + self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA + ) self.assertEqual(sleep.call_count, 2) - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.util.get_cmdline") def test_network_config_ok(self, m_get_cmdline, fallback_nic): """ network_config will only generate IPv4 config if no ipv6 data is available in the metadata """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None + m_get_cmdline.return_value = "scaleway" + fallback_nic.return_value = "ens2" + self.datasource.metadata["ipv6"] = None netcfg = self.datasource.network_config resp = { - 'version': 1, - 'config': [ + "version": 1, + "config": [ { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] + "type": "physical", + "name": "ens2", + "subnets": [{"type": "dhcp4"}], } - ] + ], } self.assertEqual(netcfg, resp) - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.util.get_cmdline") def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): """ network_config will only generate IPv4/v6 configs if ipv6 data is available in the metadata """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = { - 'address': '2000:abc:4444:9876::42:999', - 'gateway': '2000:abc:4444:9876::42:000', - 'netmask': '127', + m_get_cmdline.return_value = "scaleway" + fallback_nic.return_value = "ens2" + self.datasource.metadata["ipv6"] = { + "address": "2000:abc:4444:9876::42:999", + "gateway": "2000:abc:4444:9876::42:000", + "netmask": "127", } netcfg = self.datasource.network_config resp = { - 'version': 1, - 'config': [ + "version": 1, + "config": [ { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [ + "type": "physical", + "name": "ens2", + "subnets": [ + {"type": "dhcp4"}, { - 'type': 'dhcp4' + "type": "static", + "address": "2000:abc:4444:9876::42:999", + "gateway": "2000:abc:4444:9876::42:000", + "netmask": "127", }, - { - 'type': 'static', - 'address': '2000:abc:4444:9876::42:999', - 'gateway': '2000:abc:4444:9876::42:000', - 'netmask': '127', - } - ] + ], } - ] + ], } self.assertEqual(netcfg, resp) - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.util.get_cmdline") def test_network_config_existing(self, m_get_cmdline, fallback_nic): """ network_config() should return the same data if a network config already exists """ - m_get_cmdline.return_value = 'scaleway' - self.datasource._network_config = '0xdeadbeef' + m_get_cmdline.return_value = "scaleway" + self.datasource._network_config = "0xdeadbeef" netcfg = self.datasource.network_config - self.assertEqual(netcfg, '0xdeadbeef') + self.assertEqual(netcfg, "0xdeadbeef") - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.util.get_cmdline") def test_network_config_unset(self, m_get_cmdline, fallback_nic): """ _network_config will be set to sources.UNSET after the first boot. Make sure it behave correctly. """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None + m_get_cmdline.return_value = "scaleway" + fallback_nic.return_value = "ens2" + self.datasource.metadata["ipv6"] = None self.datasource._network_config = sources.UNSET resp = { - 'version': 1, - 'config': [ + "version": 1, + "config": [ { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] + "type": "physical", + "name": "ens2", + "subnets": [{"type": "dhcp4"}], } - ] + ], } netcfg = self.datasource.network_config self.assertEqual(netcfg, resp) - @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, - logwarning): + @mock.patch("cloudinit.sources.DataSourceScaleway.LOG.warning") + @mock.patch("cloudinit.sources.DataSourceScaleway.net.find_fallback_nic") + @mock.patch("cloudinit.util.get_cmdline") + def test_network_config_cached_none( + self, m_get_cmdline, fallback_nic, logwarning + ): """ network_config() should return config data if cached data is None rather than sources.UNSET """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None + m_get_cmdline.return_value = "scaleway" + fallback_nic.return_value = "ens2" + self.datasource.metadata["ipv6"] = None self.datasource._network_config = None resp = { - 'version': 1, - 'config': [ + "version": 1, + "config": [ { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] + "type": "physical", + "name": "ens2", + "subnets": [{"type": "dhcp4"}], } - ] + ], } netcfg = self.datasource.network_config self.assertEqual(netcfg, resp) - logwarning.assert_called_with('Found None as cached _network_config. ' - 'Resetting to %s', sources.UNSET) + logwarning.assert_called_with( + "Found None as cached _network_config. Resetting to %s", + sources.UNSET, + ) diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py index e306eded..55239c4e 100644 --- a/tests/unittests/sources/test_smartos.py +++ b/tests/unittests/sources/test_smartos.py @@ -5,14 +5,13 @@ # # This file is part of cloud-init. See LICENSE file for license information. -'''This is a testcase for the SmartOS datasource. +"""This is a testcase for the SmartOS datasource. It replicates a serial console and acts like the SmartOS console does in order to validate return responses. -''' +""" -from binascii import crc32 import json import multiprocessing import os @@ -22,32 +21,40 @@ import signal import stat import unittest import uuid +from binascii import crc32 +from cloudinit import helpers as c_helpers from cloudinit import serial +from cloudinit.event import EventScope, EventType from cloudinit.sources import DataSourceSmartOS +from cloudinit.sources.DataSourceSmartOS import SERIAL_DEVICE, SMARTOS_ENV_KVM from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, - SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, - identify_file) -from cloudinit.event import EventScope, EventType - -from cloudinit import helpers as c_helpers -from cloudinit.util import (b64e, write_file) -from cloudinit.subp import (subp, ProcessExecutionError, which) - +) +from cloudinit.sources.DataSourceSmartOS import ( + get_smartos_environ, + identify_file, +) +from cloudinit.subp import ProcessExecutionError, subp, which +from cloudinit.util import b64e, write_file from tests.unittests.helpers import ( - CiTestCase, mock, FilesystemMockingTestCase, skipIf) - + CiTestCase, + FilesystemMockingTestCase, + mock, + skipIf, +) try: import serial as _pyserial + assert _pyserial # avoid pyflakes error F401: import unused HAS_PYSERIAL = True except ImportError: HAS_PYSERIAL = False -DSMOS = 'cloudinit.sources.DataSourceSmartOS' -SDC_NICS = json.loads(""" +DSMOS = "cloudinit.sources.DataSourceSmartOS" +SDC_NICS = json.loads( + """ [ { "nic_tag": "external", @@ -87,10 +94,12 @@ SDC_NICS = json.loads(""" ] } ] -""") +""" +) -SDC_NICS_ALT = json.loads(""" +SDC_NICS_ALT = json.loads( + """ [ { "interface": "net0", @@ -126,9 +135,11 @@ SDC_NICS_ALT = json.loads(""" "mtu": 1500 } ] -""") +""" +) -SDC_NICS_DHCP = json.loads(""" +SDC_NICS_DHCP = json.loads( + """ [ { "interface": "net0", @@ -164,9 +175,11 @@ SDC_NICS_DHCP = json.loads(""" "mtu": 1500 } ] -""") +""" +) -SDC_NICS_MIP = json.loads(""" +SDC_NICS_MIP = json.loads( + """ [ { "interface": "net0", @@ -204,9 +217,11 @@ SDC_NICS_MIP = json.loads(""" "mtu": 1500 } ] -""") +""" +) -SDC_NICS_MIP_IPV6 = json.loads(""" +SDC_NICS_MIP_IPV6 = json.loads( + """ [ { "interface": "net0", @@ -243,9 +258,11 @@ SDC_NICS_MIP_IPV6 = json.loads(""" "mtu": 1500 } ] -""") +""" +) -SDC_NICS_IPV4_IPV6 = json.loads(""" +SDC_NICS_IPV4_IPV6 = json.loads( + """ [ { "interface": "net0", @@ -277,9 +294,11 @@ SDC_NICS_IPV4_IPV6 = json.loads(""" "mtu": 1500 } ] -""") +""" +) -SDC_NICS_SINGLE_GATEWAY = json.loads(""" +SDC_NICS_SINGLE_GATEWAY = json.loads( + """ [ { "interface":"net0", @@ -309,32 +328,33 @@ SDC_NICS_SINGLE_GATEWAY = json.loads(""" "mtu":1500 } ] -""") +""" +) MOCK_RETURNS = { - 'hostname': 'test-host', - 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', - 'disable_iptables_flag': None, - 'enable_motd_sys_info': None, - 'test-var1': 'some data', - 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), - 'sdc:datacenter_name': 'somewhere2', - 'sdc:operator-script': '\n'.join(['bin/true', '']), - 'sdc:uuid': str(uuid.uuid4()), - 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), - 'user-data': '\n'.join(['something', '']), - 'user-script': '\n'.join(['/bin/true', '']), - 'sdc:nics': json.dumps(SDC_NICS), + "hostname": "test-host", + "root_authorized_keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname", + "disable_iptables_flag": None, + "enable_motd_sys_info": None, + "test-var1": "some data", + "cloud-init:user-data": "\n".join(["#!/bin/sh", "/bin/true", ""]), + "sdc:datacenter_name": "somewhere2", + "sdc:operator-script": "\n".join(["bin/true", ""]), + "sdc:uuid": str(uuid.uuid4()), + "sdc:vendor-data": "\n".join(["VENDOR_DATA", ""]), + "user-data": "\n".join(["something", ""]), + "user-script": "\n".join(["/bin/true", ""]), + "sdc:nics": json.dumps(SDC_NICS), } -DMI_DATA_RETURN = 'smartdc' +DMI_DATA_RETURN = "smartdc" # Useful for calculating the length of a frame body. A SUCCESS body will be # followed by more characters or be one character less if SUCCESS with no # payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. -SUCCESS_LEN = len('0123abcd SUCCESS ') -NOTFOUND_LEN = len('0123abcd NOTFOUND') +SUCCESS_LEN = len("0123abcd SUCCESS ") +NOTFOUND_LEN = len("0123abcd NOTFOUND") class PsuedoJoyentClient(object): @@ -364,11 +384,11 @@ class PsuedoJoyentClient(object): return True def open_transport(self): - assert(not self._is_open) + assert not self._is_open self._is_open = True def close_transport(self): - assert(self._is_open) + assert self._is_open self._is_open = False @@ -381,21 +401,35 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") - self.legacy_user_d = self.tmp_path('legacy_user_tmp') + self.legacy_user_d = self.tmp_path("legacy_user_tmp") os.mkdir(self.legacy_user_d) - self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", - autospec=False, new=self.legacy_user_d) - self.add_patch(DSMOS + ".identify_file", "m_identify_file", - return_value="text/plain") + self.add_patch( + DSMOS + ".LEGACY_USER_D", + "m_legacy_user_d", + autospec=False, + new=self.legacy_user_d, + ) + self.add_patch( + DSMOS + ".identify_file", + "m_identify_file", + return_value="text/plain", + ) - def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, - sys_cfg=None, ds_cfg=None): + def _get_ds( + self, + mockdata=None, + mode=DataSourceSmartOS.SMARTOS_ENV_KVM, + sys_cfg=None, + ds_cfg=None, + ): self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) self.get_smartos_environ.return_value = mode tmpd = self.tmp_dir() - dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), - 'run_dir': self.tmp_path('run_dir')} + dirs = { + "cloud_dir": self.tmp_path("cloud_dir", tmpd), + "run_dir": self.tmp_path("run_dir"), + } for d in dirs.values(): os.mkdir(d) paths = c_helpers.Paths(dirs) @@ -404,14 +438,15 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): sys_cfg = {} if ds_cfg is not None: - sys_cfg['datasource'] = sys_cfg.get('datasource', {}) - sys_cfg['datasource']['SmartOS'] = ds_cfg + sys_cfg["datasource"] = sys_cfg.get("datasource", {}) + sys_cfg["datasource"]["SmartOS"] = ds_cfg return DataSourceSmartOS.DataSourceSmartOS( - sys_cfg, distro=None, paths=paths) + sys_cfg, distro=None, paths=paths + ) def test_no_base64(self): - ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} + ds_cfg = {"no_base64_decode": ["test_var1"], "all_base": True} dsrc = self._get_ds(ds_cfg=ds_cfg) ret = dsrc.get_data() self.assertTrue(ret) @@ -420,166 +455,180 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['sdc:uuid'], - dsrc.metadata['instance-id']) + self.assertEqual( + MOCK_RETURNS["sdc:uuid"], dsrc.metadata["instance-id"] + ) def test_platform_info(self): """All platform-related attributes are properly set.""" dsrc = self._get_ds(mockdata=MOCK_RETURNS) - self.assertEqual('joyent', dsrc.cloud_name) - self.assertEqual('joyent', dsrc.platform_type) - self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform) + self.assertEqual("joyent", dsrc.cloud_name) + self.assertEqual("joyent", dsrc.platform_type) + self.assertEqual("serial (/dev/ttyS1)", dsrc.subplatform) def test_root_keys(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['root_authorized_keys'], - dsrc.metadata['public-keys']) + self.assertEqual( + MOCK_RETURNS["root_authorized_keys"], dsrc.metadata["public-keys"] + ) def test_hostname_b64(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['hostname'], - dsrc.metadata['local-hostname']) + self.assertEqual( + MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"] + ) def test_hostname(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['hostname'], - dsrc.metadata['local-hostname']) + self.assertEqual( + MOCK_RETURNS["hostname"], dsrc.metadata["local-hostname"] + ) def test_hostname_if_no_sdc_hostname(self): my_returns = MOCK_RETURNS.copy() - my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"] dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(my_returns['hostname'], - dsrc.metadata['local-hostname']) + self.assertEqual( + my_returns["hostname"], dsrc.metadata["local-hostname"] + ) def test_sdc_hostname_if_no_hostname(self): my_returns = MOCK_RETURNS.copy() - my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] - del my_returns['hostname'] + my_returns["sdc:hostname"] = "sdc-" + my_returns["hostname"] + del my_returns["hostname"] dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(my_returns['sdc:hostname'], - dsrc.metadata['local-hostname']) + self.assertEqual( + my_returns["sdc:hostname"], dsrc.metadata["local-hostname"] + ) def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): my_returns = MOCK_RETURNS.copy() - del my_returns['hostname'] + del my_returns["hostname"] dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(my_returns['sdc:uuid'], - dsrc.metadata['local-hostname']) + self.assertEqual( + my_returns["sdc:uuid"], dsrc.metadata["local-hostname"] + ) def test_userdata(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-data'], - dsrc.metadata['legacy-user-data']) - self.assertEqual(MOCK_RETURNS['cloud-init:user-data'], - dsrc.userdata_raw) + self.assertEqual( + MOCK_RETURNS["user-data"], dsrc.metadata["legacy-user-data"] + ) + self.assertEqual( + MOCK_RETURNS["cloud-init:user-data"], dsrc.userdata_raw + ) def test_sdc_nics(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']), - dsrc.metadata['network-data']) + self.assertEqual( + json.loads(MOCK_RETURNS["sdc:nics"]), dsrc.metadata["network-data"] + ) def test_sdc_scripts(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-script'], - dsrc.metadata['user-script']) + self.assertEqual( + MOCK_RETURNS["user-script"], dsrc.metadata["user-script"] + ) legacy_script_f = "%s/user-script" % self.legacy_user_d print("legacy_script_f=%s" % legacy_script_f) self.assertTrue(os.path.exists(legacy_script_f)) self.assertTrue(os.path.islink(legacy_script_f)) user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] - self.assertEqual(user_script_perm, '700') + self.assertEqual(user_script_perm, "700") def test_scripts_shebanged(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-script'], - dsrc.metadata['user-script']) + self.assertEqual( + MOCK_RETURNS["user-script"], dsrc.metadata["user-script"] + ) legacy_script_f = "%s/user-script" % self.legacy_user_d self.assertTrue(os.path.exists(legacy_script_f)) self.assertTrue(os.path.islink(legacy_script_f)) shebang = None - with open(legacy_script_f, 'r') as f: + with open(legacy_script_f, "r") as f: shebang = f.readlines()[0].strip() self.assertEqual(shebang, "#!/bin/bash") user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] - self.assertEqual(user_script_perm, '700') + self.assertEqual(user_script_perm, "700") def test_scripts_shebang_not_added(self): """ - Test that the SmartOS requirement that plain text scripts - are executable. This test makes sure that plain texts scripts - with out file magic have it added appropriately by cloud-init. + Test that the SmartOS requirement that plain text scripts + are executable. This test makes sure that plain texts scripts + with out file magic have it added appropriately by cloud-init. """ my_returns = MOCK_RETURNS.copy() - my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', - 'print("hi")', '']) + my_returns["user-script"] = "\n".join( + ["#!/usr/bin/perl", 'print("hi")', ""] + ) dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(my_returns['user-script'], - dsrc.metadata['user-script']) + self.assertEqual( + my_returns["user-script"], dsrc.metadata["user-script"] + ) legacy_script_f = "%s/user-script" % self.legacy_user_d self.assertTrue(os.path.exists(legacy_script_f)) self.assertTrue(os.path.islink(legacy_script_f)) shebang = None - with open(legacy_script_f, 'r') as f: + with open(legacy_script_f, "r") as f: shebang = f.readlines()[0].strip() self.assertEqual(shebang, "#!/usr/bin/perl") def test_userdata_removed(self): """ - User-data in the SmartOS world is supposed to be written to a file - each and every boot. This tests to make sure that in the event the - legacy user-data is removed, the existing user-data is backed-up - and there is no /var/db/user-data left. + User-data in the SmartOS world is supposed to be written to a file + each and every boot. This tests to make sure that in the event the + legacy user-data is removed, the existing user-data is backed-up + and there is no /var/db/user-data left. """ user_data_f = "%s/mdata-user-data" % self.legacy_user_d - with open(user_data_f, 'w') as f: + with open(user_data_f, "w") as f: f.write("PREVIOUS") my_returns = MOCK_RETURNS.copy() - del my_returns['user-data'] + del my_returns["user-data"] dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() self.assertTrue(ret) - self.assertFalse(dsrc.metadata.get('legacy-user-data')) + self.assertFalse(dsrc.metadata.get("legacy-user-data")) found_new = False for root, _dirs, files in os.walk(self.legacy_user_d): for name in files: name_f = os.path.join(root, name) permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] - if re.match(r'.*\/mdata-user-data$', name_f): + if re.match(r".*\/mdata-user-data$", name_f): found_new = True print(name_f) - self.assertEqual(permissions, '400') + self.assertEqual(permissions, "400") self.assertFalse(found_new) @@ -587,17 +636,18 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['sdc:vendor-data'], - dsrc.metadata['vendor-data']) + self.assertEqual( + MOCK_RETURNS["sdc:vendor-data"], dsrc.metadata["vendor-data"] + ) def test_default_vendor_data(self): my_returns = MOCK_RETURNS.copy() - def_op_script = my_returns['sdc:vendor-data'] - del my_returns['sdc:vendor-data'] + def_op_script = my_returns["sdc:vendor-data"] + del my_returns["sdc:vendor-data"] dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() self.assertTrue(ret) - self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data']) + self.assertNotEqual(def_op_script, dsrc.metadata["vendor-data"]) # we expect default vendor-data is a boothook self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook")) @@ -606,15 +656,19 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['disable_iptables_flag'], - dsrc.metadata['iptables_disable']) + self.assertEqual( + MOCK_RETURNS["disable_iptables_flag"], + dsrc.metadata["iptables_disable"], + ) def test_motd_sys_info(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'], - dsrc.metadata['motd_sys_info']) + self.assertEqual( + MOCK_RETURNS["enable_motd_sys_info"], + dsrc.metadata["motd_sys_info"], + ) def test_default_ephemeral(self): # Test to make sure that the builtin config has the ephemeral @@ -625,16 +679,16 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): ret = dsrc.get_data() self.assertTrue(ret) - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) + assert "disk_setup" in cfg + assert "fs_setup" in cfg + self.assertIsInstance(cfg["disk_setup"], dict) + self.assertIsInstance(cfg["fs_setup"], list) def test_override_disk_aliases(self): # Test to make sure that the built-in DS is overriden builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG - mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}} + mydscfg = {"disk_aliases": {"FOO": "/dev/bar"}} # expect that these values are in builtin, or this is pointless for k in mydscfg: @@ -644,25 +698,30 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): ret = dsrc.get_data() self.assertTrue(ret) - self.assertEqual(mydscfg['disk_aliases']['FOO'], - dsrc.ds_cfg['disk_aliases']['FOO']) + self.assertEqual( + mydscfg["disk_aliases"]["FOO"], dsrc.ds_cfg["disk_aliases"]["FOO"] + ) - self.assertEqual(dsrc.device_name_to_device('FOO'), - mydscfg['disk_aliases']['FOO']) + self.assertEqual( + dsrc.device_name_to_device("FOO"), mydscfg["disk_aliases"]["FOO"] + ) def test_reconfig_network_on_boot(self): # Test to ensure that network is configured from metadata on each boot dsrc = self._get_ds(mockdata=MOCK_RETURNS) self.assertSetEqual( - {EventType.BOOT_NEW_INSTANCE, - EventType.BOOT, - EventType.BOOT_LEGACY}, - dsrc.default_update_events[EventScope.NETWORK] + { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + }, + dsrc.default_update_events[EventScope.NETWORK], ) class TestIdentifyFile(CiTestCase): """Test the 'identify_file' utility.""" + @skipIf(not which("file"), "command 'file' not available.") def test_file_happy_path(self): """Test file is available and functional on plain text.""" @@ -680,14 +739,16 @@ class TestIdentifyFile(CiTestCase): self.assertEqual(None, identify_file(fname)) self.assertEqual( [mock.call(["file", "--brief", "--mime-type", fname])], - m_subp.call_args_list) + m_subp.call_args_list, + ) class ShortReader(object): """Implements a 'read' interface for bytes provided. much like io.BytesIO but the 'endbyte' acts as if EOF. When it is reached a short will be returned.""" - def __init__(self, initial_bytes, endbyte=b'\0'): + + def __init__(self, initial_bytes, endbyte=b"\0"): self.data = initial_bytes self.index = 0 self.len = len(self.data) @@ -700,7 +761,7 @@ class ShortReader(object): def read(self, size=-1): """Read size bytes but not past a null.""" if size == 0 or self.index >= self.len: - return b'' + return b"" rsize = size if size < 0 or size + self.index > self.len: @@ -711,7 +772,7 @@ class ShortReader(object): rsize = next_null - self.index + 1 i = self.index self.index += rsize - ret = self.data[i:i + rsize] + ret = self.data[i : i + rsize] if len(ret) and ret[-1:] == self.endbyte: ret = ret[:-1] return ret @@ -719,32 +780,34 @@ class ShortReader(object): class TestJoyentMetadataClient(FilesystemMockingTestCase): - invalid = b'invalid command\n' - failure = b'FAILURE\n' - v2_ok = b'V2_OK\n' + invalid = b"invalid command\n" + failure = b"FAILURE\n" + v2_ok = b"V2_OK\n" def setUp(self): super(TestJoyentMetadataClient, self).setUp() self.serial = mock.MagicMock(spec=serial.Serial) - self.request_id = 0xabcdef12 - self.metadata_value = 'value' + self.request_id = 0xABCDEF12 + self.metadata_value = "value" self.response_parts = { - 'command': 'SUCCESS', - 'crc': 'b5a9ff00', - 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), - 'payload': b64e(self.metadata_value), - 'request_id': '{0:08x}'.format(self.request_id), + "command": "SUCCESS", + "crc": "b5a9ff00", + "length": SUCCESS_LEN + len(b64e(self.metadata_value)), + "payload": b64e(self.metadata_value), + "request_id": "{0:08x}".format(self.request_id), } def make_response(): - payloadstr = '' - if 'payload' in self.response_parts: - payloadstr = ' {0}'.format(self.response_parts['payload']) - return ('V2 {length} {crc} {request_id} ' - '{command}{payloadstr}\n'.format( - payloadstr=payloadstr, - **self.response_parts).encode('ascii')) + payloadstr = "" + if "payload" in self.response_parts: + payloadstr = " {0}".format(self.response_parts["payload"]) + return ( + "V2 {length} {crc} {request_id} " + "{command}{payloadstr}\n".format( + payloadstr=payloadstr, **self.response_parts + ).encode("ascii") + ) self.metasource_data = None @@ -758,41 +821,49 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): self.serial.read.side_effect = read_response self.patched_funcs.enter_context( - mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', - mock.Mock(return_value=self.request_id))) + mock.patch( + "cloudinit.sources.DataSourceSmartOS.random.randint", + mock.Mock(return_value=self.request_id), + ) + ) def _get_client(self): return DataSourceSmartOS.JoyentMetadataClient( - fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) + fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM + ) def _get_serial_client(self): self.serial.timeout = 1 - return DataSourceSmartOS.JoyentMetadataSerialClient(None, - fp=self.serial) + return DataSourceSmartOS.JoyentMetadataSerialClient( + None, fp=self.serial + ) def assertEndsWith(self, haystack, prefix): - self.assertTrue(haystack.endswith(prefix), - "{0} does not end with '{1}'".format( - repr(haystack), prefix)) + self.assertTrue( + haystack.endswith(prefix), + "{0} does not end with '{1}'".format(repr(haystack), prefix), + ) def assertStartsWith(self, haystack, prefix): - self.assertTrue(haystack.startswith(prefix), - "{0} does not start with '{1}'".format( - repr(haystack), prefix)) + self.assertTrue( + haystack.startswith(prefix), + "{0} does not start with '{1}'".format(repr(haystack), prefix), + ) def assertNoMoreSideEffects(self, obj): self.assertRaises(StopIteration, obj) def test_get_metadata_writes_a_single_line(self): client = self._get_client() - client.get('some_key') + client.get("some_key") self.assertEqual(1, self.serial.write.call_count) written_line = self.serial.write.call_args[0][0] - self.assertEndsWith(written_line.decode('ascii'), - b'\n'.decode('ascii')) - self.assertEqual(1, written_line.count(b'\n')) + self.assertEndsWith( + written_line.decode("ascii"), b"\n".decode("ascii") + ) + self.assertEqual(1, written_line.count(b"\n")) - def _get_written_line(self, key='some_key'): + def _get_written_line(self, key="some_key"): client = self._get_client() client.get(key) return self.serial.write.call_args[0][0] @@ -802,76 +873,86 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): def test_get_metadata_line_starts_with_v2(self): foo = self._get_written_line() - self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) + self.assertStartsWith(foo.decode("ascii"), b"V2".decode("ascii")) def test_get_metadata_uses_get_command(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - self.assertEqual('GET', parts[4]) + parts = self._get_written_line().decode("ascii").strip().split(" ") + self.assertEqual("GET", parts[4]) def test_get_metadata_base64_encodes_argument(self): - key = 'my_key' - parts = self._get_written_line(key).decode('ascii').strip().split(' ') + key = "my_key" + parts = self._get_written_line(key).decode("ascii").strip().split(" ") self.assertEqual(b64e(key), parts[5]) def test_get_metadata_calculates_length_correctly(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - expected_length = len(' '.join(parts[3:])) + parts = self._get_written_line().decode("ascii").strip().split(" ") + expected_length = len(" ".join(parts[3:])) self.assertEqual(expected_length, int(parts[1])) def test_get_metadata_uses_appropriate_request_id(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') + parts = self._get_written_line().decode("ascii").strip().split(" ") request_id = parts[3] self.assertEqual(8, len(request_id)) self.assertEqual(request_id, request_id.lower()) def test_get_metadata_uses_random_number_for_request_id(self): line = self._get_written_line() - request_id = line.decode('ascii').strip().split(' ')[3] - self.assertEqual('{0:08x}'.format(self.request_id), request_id) + request_id = line.decode("ascii").strip().split(" ")[3] + self.assertEqual("{0:08x}".format(self.request_id), request_id) def test_get_metadata_checksums_correctly(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - expected_checksum = '{0:08x}'.format( - crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff) + parts = self._get_written_line().decode("ascii").strip().split(" ") + expected_checksum = "{0:08x}".format( + crc32(" ".join(parts[3:]).encode("utf-8")) & 0xFFFFFFFF + ) checksum = parts[2] self.assertEqual(expected_checksum, checksum) def test_get_metadata_reads_a_line(self): client = self._get_client() - client.get('some_key') + client.get("some_key") self.assertEqual(self.metasource_data_len, self.serial.read.call_count) def test_get_metadata_returns_valid_value(self): client = self._get_client() - value = client.get('some_key') + value = client.get("some_key") self.assertEqual(self.metadata_value, value) def test_get_metadata_throws_exception_for_incorrect_length(self): - self.response_parts['length'] = 0 + self.response_parts["length"] = 0 client = self._get_client() - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') + self.assertRaises( + DataSourceSmartOS.JoyentMetadataFetchException, + client.get, + "some_key", + ) def test_get_metadata_throws_exception_for_incorrect_crc(self): - self.response_parts['crc'] = 'deadbeef' + self.response_parts["crc"] = "deadbeef" client = self._get_client() - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') + self.assertRaises( + DataSourceSmartOS.JoyentMetadataFetchException, + client.get, + "some_key", + ) def test_get_metadata_throws_exception_for_request_id_mismatch(self): - self.response_parts['request_id'] = 'deadbeef' + self.response_parts["request_id"] = "deadbeef" client = self._get_client() - client._checksum = lambda _: self.response_parts['crc'] - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') + client._checksum = lambda _: self.response_parts["crc"] + self.assertRaises( + DataSourceSmartOS.JoyentMetadataFetchException, + client.get, + "some_key", + ) def test_get_metadata_returns_None_if_value_not_found(self): - self.response_parts['payload'] = '' - self.response_parts['command'] = 'NOTFOUND' - self.response_parts['length'] = NOTFOUND_LEN + self.response_parts["payload"] = "" + self.response_parts["command"] = "NOTFOUND" + self.response_parts["length"] = NOTFOUND_LEN client = self._get_client() - client._checksum = lambda _: self.response_parts['crc'] - self.assertIsNone(client.get('some_key')) + client._checksum = lambda _: self.response_parts["crc"] + self.assertIsNone(client.get("some_key")) def test_negotiate(self): client = self._get_client() @@ -883,55 +964,58 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): def test_negotiate_short_response(self): client = self._get_client() # chopped '\n' from v2_ok. - reader = ShortReader(self.v2_ok[:-1] + b'\0') + reader = ShortReader(self.v2_ok[:-1] + b"\0") client.fp.read.side_effect = reader.read - self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, - client._negotiate) + self.assertRaises( + DataSourceSmartOS.JoyentMetadataTimeoutException, client._negotiate + ) self.assertTrue(reader.emptied) def test_negotiate_bad_response(self): client = self._get_client() - reader = ShortReader(b'garbage\n' + self.v2_ok) + reader = ShortReader(b"garbage\n" + self.v2_ok) client.fp.read.side_effect = reader.read - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client._negotiate) + self.assertRaises( + DataSourceSmartOS.JoyentMetadataFetchException, client._negotiate + ) self.assertEqual(self.v2_ok, client.fp.read()) def test_serial_open_transport(self): client = self._get_serial_client() - reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) + reader = ShortReader(b"garbage\0" + self.invalid + self.v2_ok) client.fp.read.side_effect = reader.read client.open_transport() self.assertTrue(reader.emptied) def test_flush_failure(self): client = self._get_serial_client() - reader = ShortReader(b'garbage' + b'\0' + self.failure + - self.invalid + self.v2_ok) + reader = ShortReader( + b"garbage" + b"\0" + self.failure + self.invalid + self.v2_ok + ) client.fp.read.side_effect = reader.read client.open_transport() self.assertTrue(reader.emptied) def test_flush_many_timeouts(self): client = self._get_serial_client() - reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) + reader = ShortReader(b"\0" * 100 + self.invalid + self.v2_ok) client.fp.read.side_effect = reader.read client.open_transport() self.assertTrue(reader.emptied) def test_list_metadata_returns_list(self): - parts = ['foo', 'bar'] - value = b64e('\n'.join(parts)) - self.response_parts['payload'] = value - self.response_parts['crc'] = '40873553' - self.response_parts['length'] = SUCCESS_LEN + len(value) + parts = ["foo", "bar"] + value = b64e("\n".join(parts)) + self.response_parts["payload"] = value + self.response_parts["crc"] = "40873553" + self.response_parts["length"] = SUCCESS_LEN + len(value) client = self._get_client() self.assertEqual(client.list(), parts) def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): - del self.response_parts['payload'] - self.response_parts['length'] = SUCCESS_LEN - 1 - self.response_parts['crc'] = '14e563ba' + del self.response_parts["payload"] + self.response_parts["length"] = SUCCESS_LEN - 1 + self.response_parts["crc"] = "14e563ba" client = self._get_client() self.assertEqual(client.list(), []) @@ -939,181 +1023,354 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): class TestNetworkConversion(CiTestCase): def test_convert_simple(self): expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.102/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '192.168.128.93/22'}], - 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} + "version": 1, + "config": [ + { + "name": "net0", + "type": "physical", + "subnets": [ + { + "type": "static", + "gateway": "8.12.42.1", + "address": "8.12.42.102/24", + } + ], + "mtu": 1500, + "mac_address": "90:b8:d0:f5:e4:f5", + }, + { + "name": "net1", + "type": "physical", + "subnets": [ + {"type": "static", "address": "192.168.128.93/22"} + ], + "mtu": 8500, + "mac_address": "90:b8:d0:a5:ff:cd", + }, + ], + } found = convert_net(SDC_NICS) self.assertEqual(expected, found) def test_convert_simple_alt(self): expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + "version": 1, + "config": [ + { + "name": "net0", + "type": "physical", + "subnets": [ + { + "type": "static", + "gateway": "8.12.42.1", + "address": "8.12.42.51/24", + } + ], + "mtu": 1500, + "mac_address": "90:b8:d0:ae:64:51", + }, + { + "name": "net1", + "type": "physical", + "subnets": [ + {"type": "static", "address": "10.210.1.217/24"} + ], + "mtu": 1500, + "mac_address": "90:b8:d0:bd:4f:9c", + }, + ], + } found = convert_net(SDC_NICS_ALT) self.assertEqual(expected, found) def test_convert_simple_dhcp(self): expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'dhcp4'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + "version": 1, + "config": [ + { + "name": "net0", + "type": "physical", + "subnets": [ + { + "type": "static", + "gateway": "8.12.42.1", + "address": "8.12.42.51/24", + } + ], + "mtu": 1500, + "mac_address": "90:b8:d0:ae:64:51", + }, + { + "name": "net1", + "type": "physical", + "subnets": [{"type": "dhcp4"}], + "mtu": 1500, + "mac_address": "90:b8:d0:bd:4f:9c", + }, + ], + } found = convert_net(SDC_NICS_DHCP) self.assertEqual(expected, found) def test_convert_simple_multi_ip(self): expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}, - {'type': 'static', - 'address': '8.12.42.52/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}, - {'type': 'static', - 'address': '10.210.1.151/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + "version": 1, + "config": [ + { + "name": "net0", + "type": "physical", + "subnets": [ + { + "type": "static", + "gateway": "8.12.42.1", + "address": "8.12.42.51/24", + }, + {"type": "static", "address": "8.12.42.52/24"}, + ], + "mtu": 1500, + "mac_address": "90:b8:d0:ae:64:51", + }, + { + "name": "net1", + "type": "physical", + "subnets": [ + {"type": "static", "address": "10.210.1.217/24"}, + {"type": "static", "address": "10.210.1.151/24"}, + ], + "mtu": 1500, + "mac_address": "90:b8:d0:bd:4f:9c", + }, + ], + } found = convert_net(SDC_NICS_MIP) self.assertEqual(expected, found) def test_convert_with_dns(self): expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'dhcp4'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, - {'type': 'nameserver', - 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} + "version": 1, + "config": [ + { + "name": "net0", + "type": "physical", + "subnets": [ + { + "type": "static", + "gateway": "8.12.42.1", + "address": "8.12.42.51/24", + } + ], + "mtu": 1500, + "mac_address": "90:b8:d0:ae:64:51", + }, + { + "name": "net1", + "type": "physical", + "subnets": [{"type": "dhcp4"}], + "mtu": 1500, + "mac_address": "90:b8:d0:bd:4f:9c", + }, + { + "type": "nameserver", + "address": ["8.8.8.8", "8.8.8.1"], + "search": ["local"], + }, + ], + } found = convert_net( - network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], - dns_domain="local") + network_data=SDC_NICS_DHCP, + dns_servers=["8.8.8.8", "8.8.8.1"], + dns_domain="local", + ) self.assertEqual(expected, found) def test_convert_simple_multi_ipv6(self): expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'address': - '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, - {'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + "version": 1, + "config": [ + { + "name": "net0", + "type": "physical", + "subnets": [ + { + "type": "static", + "address": ( + "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64" + ), + }, + { + "type": "static", + "gateway": "8.12.42.1", + "address": "8.12.42.51/24", + }, + ], + "mtu": 1500, + "mac_address": "90:b8:d0:ae:64:51", + }, + { + "name": "net1", + "type": "physical", + "subnets": [ + {"type": "static", "address": "10.210.1.217/24"} + ], + "mtu": 1500, + "mac_address": "90:b8:d0:bd:4f:9c", + }, + ], + } found = convert_net(SDC_NICS_MIP_IPV6) self.assertEqual(expected, found) def test_convert_simple_both_ipv4_ipv6(self): expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', - 'type': 'static'}, - {'address': '8.12.42.51/24', - 'gateway': '8.12.42.1', - 'type': 'static'}, - {'address': '2001::11/64', 'type': 'static'}, - {'address': '8.12.42.52/32', 'type': 'static'}]}, - {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.217/24', - 'type': 'static'}]}]} + "version": 1, + "config": [ + { + "mac_address": "90:b8:d0:ae:64:51", + "mtu": 1500, + "name": "net0", + "type": "physical", + "subnets": [ + { + "address": "2001::10/64", + "gateway": "2001::1", + "type": "static", + }, + { + "address": "8.12.42.51/24", + "gateway": "8.12.42.1", + "type": "static", + }, + {"address": "2001::11/64", "type": "static"}, + {"address": "8.12.42.52/32", "type": "static"}, + ], + }, + { + "mac_address": "90:b8:d0:bd:4f:9c", + "mtu": 1500, + "name": "net1", + "type": "physical", + "subnets": [ + {"address": "10.210.1.217/24", "type": "static"} + ], + }, + ], + } found = convert_net(SDC_NICS_IPV4_IPV6) self.assertEqual(expected, found) def test_gateways_not_on_all_nics(self): expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '8.12.42.26/24', - 'gateway': '8.12.42.1', 'type': 'static'}]}, - {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.27/24', - 'type': 'static'}]}]} + "version": 1, + "config": [ + { + "mac_address": "90:b8:d0:d8:82:b4", + "mtu": 1500, + "name": "net0", + "type": "physical", + "subnets": [ + { + "address": "8.12.42.26/24", + "gateway": "8.12.42.1", + "type": "static", + } + ], + }, + { + "mac_address": "90:b8:d0:0a:51:31", + "mtu": 1500, + "name": "net1", + "type": "physical", + "subnets": [ + {"address": "10.210.1.27/24", "type": "static"} + ], + }, + ], + } found = convert_net(SDC_NICS_SINGLE_GATEWAY) self.assertEqual(expected, found) def test_routes_on_all_nics(self): routes = [ - {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, - {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] + {"linklocal": False, "dst": "3.0.0.0/8", "gateway": "8.12.42.3"}, + {"linklocal": False, "dst": "4.0.0.0/8", "gateway": "10.210.1.4"}, + ] expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '8.12.42.26/24', - 'gateway': '8.12.42.1', 'type': 'static', - 'routes': [{'network': '3.0.0.0/8', - 'gateway': '8.12.42.3'}, - {'network': '4.0.0.0/8', - 'gateway': '10.210.1.4'}]}]}, - {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', - 'routes': [{'network': '3.0.0.0/8', - 'gateway': '8.12.42.3'}, - {'network': '4.0.0.0/8', - 'gateway': '10.210.1.4'}]}]}]} + "version": 1, + "config": [ + { + "mac_address": "90:b8:d0:d8:82:b4", + "mtu": 1500, + "name": "net0", + "type": "physical", + "subnets": [ + { + "address": "8.12.42.26/24", + "gateway": "8.12.42.1", + "type": "static", + "routes": [ + { + "network": "3.0.0.0/8", + "gateway": "8.12.42.3", + }, + { + "network": "4.0.0.0/8", + "gateway": "10.210.1.4", + }, + ], + } + ], + }, + { + "mac_address": "90:b8:d0:0a:51:31", + "mtu": 1500, + "name": "net1", + "type": "physical", + "subnets": [ + { + "address": "10.210.1.27/24", + "type": "static", + "routes": [ + { + "network": "3.0.0.0/8", + "gateway": "8.12.42.3", + }, + { + "network": "4.0.0.0/8", + "gateway": "10.210.1.4", + }, + ], + } + ], + }, + ], + } found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) self.maxDiff = None self.assertEqual(expected, found) -@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, - "Only supported on KVM and bhyve guests under SmartOS") -@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), - "Requires write access to " + SERIAL_DEVICE) +@unittest.skipUnless( + get_smartos_environ() == SMARTOS_ENV_KVM, + "Only supported on KVM and bhyve guests under SmartOS", +) +@unittest.skipUnless( + os.access(SERIAL_DEVICE, os.W_OK), + "Requires write access to " + SERIAL_DEVICE, +) @unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available") class TestSerialConcurrency(CiTestCase): """ - This class tests locking on an actual serial port, and as such can only - be run in a kvm or bhyve guest running on a SmartOS host. A test run on - a metadata socket will not be valid because a metadata socket ensures - there is only one session over a connection. In contrast, in the - absence of proper locking multiple processes opening the same serial - port can corrupt each others' exchanges with the metadata server. - - This takes on the order of 2 to 3 minutes to run. + This class tests locking on an actual serial port, and as such can only + be run in a kvm or bhyve guest running on a SmartOS host. A test run on + a metadata socket will not be valid because a metadata socket ensures + there is only one session over a connection. In contrast, in the + absence of proper locking multiple processes opening the same serial + port can corrupt each others' exchanges with the metadata server. + + This takes on the order of 2 to 3 minutes to run. """ - allowed_subp = ['mdata-get'] + + allowed_subp = ["mdata-get"] def setUp(self): self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) @@ -1128,16 +1385,16 @@ class TestSerialConcurrency(CiTestCase): def start_mdata_loop(self): """ - The mdata-get command is repeatedly run in a separate process so - that it may try to race with metadata operations performed in the - main test process. Use of mdata-get is better than two processes - using the protocol implementation in DataSourceSmartOS because we - are testing to be sure that cloud-init and mdata-get respect each - others locks. + The mdata-get command is repeatedly run in a separate process so + that it may try to race with metadata operations performed in the + main test process. Use of mdata-get is better than two processes + using the protocol implementation in DataSourceSmartOS because we + are testing to be sure that cloud-init and mdata-get respect each + others locks. """ rcs = list(range(0, 256)) while True: - subp(['mdata-get', 'sdc:routes'], rcs=rcs) + subp(["mdata-get", "sdc:routes"], rcs=rcs) def test_all_keys(self): self.assertIsNotNone(self.mdata_proc.pid) @@ -1160,4 +1417,5 @@ class TestSerialConcurrency(CiTestCase): self.assertIsNone(self.mdata_proc.exitcode) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py index 1d792066..e1125b65 100644 --- a/tests/unittests/sources/test_upcloud.py +++ b/tests/unittests/sources/test_upcloud.py @@ -4,15 +4,15 @@ import json -from cloudinit import helpers -from cloudinit import settings -from cloudinit import sources -from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ - DataSourceUpCloudLocal - -from tests.unittests.helpers import mock, CiTestCase - -UC_METADATA = json.loads(""" +from cloudinit import helpers, settings, sources +from cloudinit.sources.DataSourceUpCloud import ( + DataSourceUpCloud, + DataSourceUpCloudLocal, +) +from tests.unittests.helpers import CiTestCase, mock + +UC_METADATA = json.loads( + """ { "cloud_name": "upcloud", "instance_id": "00322b68-0096-4042-9406-faad61922128", @@ -130,14 +130,17 @@ UC_METADATA = json.loads(""" "user_data": "", "vendor_data": "" } -""") +""" +) -UC_METADATA["user_data"] = b"""#cloud-config +UC_METADATA[ + "user_data" +] = b"""#cloud-config runcmd: - [touch, /root/cloud-init-worked ] """ -MD_URL = 'http://169.254.169.254/metadata/v1.json' +MD_URL = "http://169.254.169.254/metadata/v1.json" def _mock_dmi(): @@ -148,25 +151,27 @@ class TestUpCloudMetadata(CiTestCase): """ Test reading the meta-data """ + def setUp(self): super(TestUpCloudMetadata, self).setUp() self.tmp = self.tmp_dir() def get_ds(self, get_sysinfo=_mock_dmi): ds = DataSourceUpCloud( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) if get_sysinfo: ds._get_sysinfo = get_sysinfo return ds - @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo') + @mock.patch("cloudinit.sources.helpers.upcloud.read_sysinfo") def test_returns_false_not_on_upcloud(self, m_read_sysinfo): m_read_sysinfo.return_value = (False, None) ds = self.get_ds(get_sysinfo=None) self.assertEqual(False, ds.get_data()) self.assertTrue(m_read_sysinfo.called) - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata") def test_metadata(self, mock_readmd): mock_readmd.return_value = UC_METADATA.copy() @@ -178,15 +183,17 @@ class TestUpCloudMetadata(CiTestCase): self.assertTrue(mock_readmd.called) - self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw()) - self.assertEqual(UC_METADATA.get('vendor_data'), - ds.get_vendordata_raw()) - self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) - self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + self.assertEqual(UC_METADATA.get("user_data"), ds.get_userdata_raw()) + self.assertEqual( + UC_METADATA.get("vendor_data"), ds.get_vendordata_raw() + ) + self.assertEqual(UC_METADATA.get("region"), ds.availability_zone) + self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name) - self.assertEqual(UC_METADATA.get('public_keys'), - ds.get_public_ssh_keys()) + self.assertEqual( + UC_METADATA.get("public_keys"), ds.get_public_ssh_keys() + ) self.assertIsInstance(ds.get_public_ssh_keys(), list) @@ -201,24 +208,30 @@ class TestUpCloudNetworkSetup(CiTestCase): def get_ds(self, get_sysinfo=_mock_dmi): ds = DataSourceUpCloudLocal( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) if get_sysinfo: ds._get_sysinfo = get_sysinfo return ds - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - def test_network_configured_metadata(self, m_net, m_dhcp, - m_fallback_nic, mock_readmd): + @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata") + @mock.patch("cloudinit.net.find_fallback_nic") + @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") + @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network") + def test_network_configured_metadata( + self, m_net, m_dhcp, m_fallback_nic, mock_readmd + ): mock_readmd.return_value = UC_METADATA.copy() - m_fallback_nic.return_value = 'eth1' - m_dhcp.return_value = [{ - 'interface': 'eth1', 'fixed-address': '10.6.3.27', - 'routers': '10.6.0.1', 'subnet-mask': '22', - 'broadcast-address': '10.6.3.255'} + m_fallback_nic.return_value = "eth1" + m_dhcp.return_value = [ + { + "interface": "eth1", + "fixed-address": "10.6.3.27", + "routers": "10.6.0.1", + "subnet-mask": "22", + "broadcast-address": "10.6.3.255", + } ] ds = self.get_ds() @@ -227,33 +240,36 @@ class TestUpCloudNetworkSetup(CiTestCase): self.assertTrue(ret) self.assertTrue(m_dhcp.called) - m_dhcp.assert_called_with('eth1', None) + m_dhcp.assert_called_with("eth1", None) m_net.assert_called_once_with( - broadcast='10.6.3.255', interface='eth1', - ip='10.6.3.27', prefix_or_mask='22', - router='10.6.0.1', static_routes=None + broadcast="10.6.3.255", + interface="eth1", + ip="10.6.3.27", + prefix_or_mask="22", + router="10.6.0.1", + static_routes=None, ) self.assertTrue(mock_readmd.called) - self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) - self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + self.assertEqual(UC_METADATA.get("region"), ds.availability_zone) + self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name) - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata") + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_network_configuration(self, m_get_by_mac, mock_readmd): mock_readmd.return_value = UC_METADATA.copy() - raw_ifaces = UC_METADATA.get('network').get('interfaces') + raw_ifaces = UC_METADATA.get("network").get("interfaces") self.assertEqual(4, len(raw_ifaces)) m_get_by_mac.return_value = { - raw_ifaces[0].get('mac'): 'eth0', - raw_ifaces[1].get('mac'): 'eth1', - raw_ifaces[2].get('mac'): 'eth2', - raw_ifaces[3].get('mac'): 'eth3', + raw_ifaces[0].get("mac"): "eth0", + raw_ifaces[1].get("mac"): "eth1", + raw_ifaces[2].get("mac"): "eth2", + raw_ifaces[3].get("mac"): "eth3", } ds = self.get_ds() @@ -266,49 +282,50 @@ class TestUpCloudNetworkSetup(CiTestCase): netcfg = ds.network_config - self.assertEqual(1, netcfg.get('version')) + self.assertEqual(1, netcfg.get("version")) - config = netcfg.get('config') + config = netcfg.get("config") self.assertIsInstance(config, list) self.assertEqual(5, len(config)) - self.assertEqual('physical', config[3].get('type')) + self.assertEqual("physical", config[3].get("type")) - self.assertEqual(raw_ifaces[2].get('mac'), config[2] - .get('mac_address')) - self.assertEqual(1, len(config[2].get('subnets'))) - self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0] - .get('type')) + self.assertEqual( + raw_ifaces[2].get("mac"), config[2].get("mac_address") + ) + self.assertEqual(1, len(config[2].get("subnets"))) + self.assertEqual( + "ipv6_dhcpv6-stateless", config[2].get("subnets")[0].get("type") + ) - self.assertEqual(2, len(config[0].get('subnets'))) - self.assertEqual('static', config[0].get('subnets')[1].get('type')) + self.assertEqual(2, len(config[0].get("subnets"))) + self.assertEqual("static", config[0].get("subnets")[1].get("type")) dns = config[4] - self.assertEqual('nameserver', dns.get('type')) - self.assertEqual(2, len(dns.get('address'))) + self.assertEqual("nameserver", dns.get("type")) + self.assertEqual(2, len(dns.get("address"))) self.assertEqual( - UC_METADATA.get('network').get('dns')[1], - dns.get('address')[1] + UC_METADATA.get("network").get("dns")[1], dns.get("address")[1] ) class TestUpCloudDatasourceLoading(CiTestCase): def test_get_datasource_list_returns_in_local(self): - deps = (sources.DEP_FILESYSTEM, ) + deps = (sources.DEP_FILESYSTEM,) ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceUpCloudLocal]) + self.assertEqual(ds_list, [DataSourceUpCloudLocal]) def test_get_datasource_list_returns_in_normal(self): deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceUpCloud]) + self.assertEqual(ds_list, [DataSourceUpCloud]) def test_list_sources_finds_ds(self): found = sources.list_sources( - ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), - ['cloudinit.sources']) - self.assertEqual([DataSourceUpCloud], - found) + ["UpCloud"], + (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), + ["cloudinit.sources"], + ) + self.assertEqual([DataSourceUpCloud], found) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py index d34d7782..dcdbda89 100644 --- a/tests/unittests/sources/test_vmware.py +++ b/tests/unittests/sources/test_vmware.py @@ -10,17 +10,15 @@ import os import pytest -from cloudinit import dmi, helpers, safeyaml -from cloudinit import settings +from cloudinit import dmi, helpers, safeyaml, settings from cloudinit.sources import DataSourceVMware from tests.unittests.helpers import ( - mock, CiTestCase, FilesystemMockingTestCase, + mock, populate_dir, ) - PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" PRODUCT_NAME = "VMware7,1" PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" @@ -61,11 +59,11 @@ runcmd: @pytest.yield_fixture(autouse=True) def common_patches(): - with mock.patch('cloudinit.util.platform.platform', return_value='Linux'): + with mock.patch("cloudinit.util.platform.platform", return_value="Linux"): with mock.patch.multiple( - 'cloudinit.dmi', + "cloudinit.dmi", is_container=mock.Mock(return_value=False), - is_FreeBSD=mock.Mock(return_value=False) + is_FreeBSD=mock.Mock(return_value=False), ): yield diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py index 40594b95..21d5bc17 100644 --- a/tests/unittests/sources/test_vultr.py +++ b/tests/unittests/sources/test_vultr.py @@ -7,255 +7,204 @@ import json -from cloudinit import helpers -from cloudinit import settings +from cloudinit import helpers, settings from cloudinit.sources import DataSourceVultr from cloudinit.sources.helpers import vultr - -from tests.unittests.helpers import mock, CiTestCase +from tests.unittests.helpers import CiTestCase, mock # Vultr metadata test data VULTR_V1_1 = { - 'bgp': { - 'ipv4': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' + "bgp": { + "ipv4": { + "my-address": "", + "my-asn": "", + "peer-address": "", + "peer-asn": "", + }, + "ipv6": { + "my-address": "", + "my-asn": "", + "peer-address": "", + "peer-asn": "", }, - 'ipv6': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - } }, - 'hostname': 'CLOUDINIT_1', - 'instanceid': '42506325', - 'interfaces': [ + "hostname": "CLOUDINIT_1", + "instanceid": "42506325", + "interfaces": [ { - 'ipv4': { - 'additional': [ - ], - 'address': '108.61.89.242', - 'gateway': '108.61.89.1', - 'netmask': '255.255.255.0' + "ipv4": { + "additional": [], + "address": "108.61.89.242", + "gateway": "108.61.89.1", + "netmask": "255.255.255.0", }, - 'ipv6': { - 'additional': [ - ], - 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', - 'network': '2001:19f0:5:56c2::', - 'prefix': '64' + "ipv6": { + "additional": [], + "address": "2001:19f0:5:56c2:5400:03ff:fe15:c465", + "network": "2001:19f0:5:56c2::", + "prefix": "64", }, - 'mac': '56:00:03:15:c4:65', - 'network-type': 'public' + "mac": "56:00:03:15:c4:65", + "network-type": "public", } ], - 'public-keys': [ - 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' - ], - 'region': { - 'regioncode': 'EWR' - }, - 'user-defined': [ - ], - 'startup-script': 'echo No configured startup script', - 'raid1-script': '', - 'user-data': [ - ], - 'vendor-data': [ + "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"], + "region": {"regioncode": "EWR"}, + "user-defined": [], + "startup-script": "echo No configured startup script", + "raid1-script": "", + "user-data": [], + "vendor-data": [ { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' - ] + "package_upgrade": "true", + "disable_root": 0, + "ssh_pwauth": 1, + "chpasswd": { + "expire": False, + "list": ["root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/"], }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } + "system_info": {"default_user": {"name": "root"}}, } - ] + ], } VULTR_V1_2 = { - 'bgp': { - 'ipv4': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' + "bgp": { + "ipv4": { + "my-address": "", + "my-asn": "", + "peer-address": "", + "peer-asn": "", + }, + "ipv6": { + "my-address": "", + "my-asn": "", + "peer-address": "", + "peer-asn": "", }, - 'ipv6': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - } }, - 'hostname': 'CLOUDINIT_2', - 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', - 'instanceid': '42872224', - 'interfaces': [ + "hostname": "CLOUDINIT_2", + "instance-v2-id": "29bea708-2e6e-480a-90ad-0e6b5d5ad62f", + "instanceid": "42872224", + "interfaces": [ { - 'ipv4': { - 'additional': [ - ], - 'address':'45.76.7.171', - 'gateway':'45.76.6.1', - 'netmask':'255.255.254.0' + "ipv4": { + "additional": [], + "address": "45.76.7.171", + "gateway": "45.76.6.1", + "netmask": "255.255.254.0", }, - 'ipv6':{ - 'additional': [ - ], - 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', - 'network':'2001:19f0:5:28a7::', - 'prefix':'64' + "ipv6": { + "additional": [], + "address": "2001:19f0:5:28a7:5400:03ff:fe1b:4eca", + "network": "2001:19f0:5:28a7::", + "prefix": "64", }, - 'mac':'56:00:03:1b:4e:ca', - 'network-type':'public' + "mac": "56:00:03:1b:4e:ca", + "network-type": "public", }, { - 'ipv4': { - 'additional': [ - ], - 'address':'10.1.112.3', - 'gateway':'', - 'netmask':'255.255.240.0' - }, - 'ipv6':{ - 'additional': [ - ], - 'network':'', - 'prefix':'' + "ipv4": { + "additional": [], + "address": "10.1.112.3", + "gateway": "", + "netmask": "255.255.240.0", }, - 'mac':'5a:00:03:1b:4e:ca', - 'network-type':'private', - 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', - 'networkid':'net5e7155329d730' - } - ], - 'public-keys': [ - 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' - ], - 'region': { - 'regioncode': 'EWR' - }, - 'user-defined': [ - ], - 'startup-script': 'echo No configured startup script', - 'user-data': [ + "ipv6": {"additional": [], "network": "", "prefix": ""}, + "mac": "5a:00:03:1b:4e:ca", + "network-type": "private", + "network-v2-id": "fbbe2b5b-b986-4396-87f5-7246660ccb64", + "networkid": "net5e7155329d730", + }, ], - - 'vendor-data': [ + "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"], + "region": {"regioncode": "EWR"}, + "user-defined": [], + "startup-script": "echo No configured startup script", + "user-data": [], + "vendor-data": [ { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' - ] + "package_upgrade": "true", + "disable_root": 0, + "ssh_pwauth": 1, + "chpasswd": { + "expire": False, + "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"], }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } + "system_info": {"default_user": {"name": "root"}}, } - ] + ], } -SSH_KEYS_1 = [ - "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" -] +SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"] # Expected generated objects # Expected config EXPECTED_VULTR_CONFIG = { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' - ] + "package_upgrade": "true", + "disable_root": 0, + "ssh_pwauth": 1, + "chpasswd": { + "expire": False, + "list": ["root:$6$SxXx...k2mJNIzZB5vMCDBlYT1"], }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } + "system_info": {"default_user": {"name": "root"}}, } # Expected network config object from generator EXPECTED_VULTR_NETWORK_1 = { - 'version': 1, - 'config': [ + "version": 1, + "config": [ + {"type": "nameserver", "address": ["108.61.10.10"]}, { - 'type': 'nameserver', - 'address': ['108.61.10.10'] - }, - { - 'name': 'eth0', - 'type': 'physical', - 'mac_address': '56:00:03:15:c4:65', - 'accept-ra': 1, - 'subnets': [ - {'type': 'dhcp', 'control': 'auto'}, - {'type': 'ipv6_slaac', 'control': 'auto'} + "name": "eth0", + "type": "physical", + "mac_address": "56:00:03:15:c4:65", + "accept-ra": 1, + "subnets": [ + {"type": "dhcp", "control": "auto"}, + {"type": "ipv6_slaac", "control": "auto"}, ], - } - ] + }, + ], } EXPECTED_VULTR_NETWORK_2 = { - 'version': 1, - 'config': [ + "version": 1, + "config": [ + {"type": "nameserver", "address": ["108.61.10.10"]}, { - 'type': 'nameserver', - 'address': ['108.61.10.10'] - }, - { - 'name': 'eth0', - 'type': 'physical', - 'mac_address': '56:00:03:1b:4e:ca', - 'accept-ra': 1, - 'subnets': [ - {'type': 'dhcp', 'control': 'auto'}, - {'type': 'ipv6_slaac', 'control': 'auto'} + "name": "eth0", + "type": "physical", + "mac_address": "56:00:03:1b:4e:ca", + "accept-ra": 1, + "subnets": [ + {"type": "dhcp", "control": "auto"}, + {"type": "ipv6_slaac", "control": "auto"}, ], }, { - 'name': 'eth1', - 'type': 'physical', - 'mac_address': '5a:00:03:1b:4e:ca', - 'subnets': [ + "name": "eth1", + "type": "physical", + "mac_address": "5a:00:03:1b:4e:ca", + "subnets": [ { "type": "static", "control": "auto", "address": "10.1.112.3", - "netmask": "255.255.240.0" + "netmask": "255.255.240.0", } ], - } - ] + }, + ], } INTERFACE_MAP = { - '56:00:03:15:c4:65': 'eth0', - '56:00:03:1b:4e:ca': 'eth0', - '5a:00:03:1b:4e:ca': 'eth1' + "56:00:03:15:c4:65": "eth0", + "56:00:03:1b:4e:ca": "eth0", + "5a:00:03:1b:4e:ca": "eth1", } @@ -264,41 +213,39 @@ class TestDataSourceVultr(CiTestCase): super(TestDataSourceVultr, self).setUp() # Stored as a dict to make it easier to maintain - raw1 = json.dumps(VULTR_V1_1['vendor-data'][0]) - raw2 = json.dumps(VULTR_V1_2['vendor-data'][0]) + raw1 = json.dumps(VULTR_V1_1["vendor-data"][0]) + raw2 = json.dumps(VULTR_V1_2["vendor-data"][0]) # Make expected format - VULTR_V1_1['vendor-data'] = [raw1] - VULTR_V1_2['vendor-data'] = [raw2] + VULTR_V1_1["vendor-data"] = [raw1] + VULTR_V1_2["vendor-data"] = [raw2] self.tmp = self.tmp_dir() # Test the datasource itself - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') - @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') - def test_datasource(self, - mock_getmeta, - mock_isvultr, - mock_netmap): + @mock.patch("cloudinit.net.get_interfaces_by_mac") + @mock.patch("cloudinit.sources.helpers.vultr.is_vultr") + @mock.patch("cloudinit.sources.helpers.vultr.get_metadata") + def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): mock_getmeta.return_value = VULTR_V1_2 mock_isvultr.return_value = True mock_netmap.return_value = INTERFACE_MAP source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + ) # Test for failure self.assertEqual(True, source._get_data()) # Test instance id - self.assertEqual("42872224", source.metadata['instanceid']) + self.assertEqual("42872224", source.metadata["instanceid"]) # Test hostname - self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) + self.assertEqual("CLOUDINIT_2", source.metadata["local-hostname"]) # Test ssh keys - self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) + self.assertEqual(SSH_KEYS_1, source.metadata["public-keys"]) # Test vendor data generation orig_val = self.maxDiff @@ -309,7 +256,8 @@ class TestDataSourceVultr(CiTestCase): # Test vendor config self.assertEqual( EXPECTED_VULTR_CONFIG, - json.loads(vendordata[0].replace("#cloud-config", ""))) + json.loads(vendordata[0].replace("#cloud-config", "")), + ) self.maxDiff = orig_val @@ -317,21 +265,24 @@ class TestDataSourceVultr(CiTestCase): self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) # Test network config generation - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_network_config(self, mock_netmap): mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_1['interfaces'] + interf = VULTR_V1_1["interfaces"] - self.assertEqual(EXPECTED_VULTR_NETWORK_1, - vultr.generate_network_config(interf)) + self.assertEqual( + EXPECTED_VULTR_NETWORK_1, vultr.generate_network_config(interf) + ) # Test Private Networking config generation - @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch("cloudinit.net.get_interfaces_by_mac") def test_private_network_config(self, mock_netmap): mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_2['interfaces'] + interf = VULTR_V1_2["interfaces"] + + self.assertEqual( + EXPECTED_VULTR_NETWORK_2, vultr.generate_network_config(interf) + ) - self.assertEqual(EXPECTED_VULTR_NETWORK_2, - vultr.generate_network_config(interf)) # vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py index fcbb9cd5..9b3e079f 100644 --- a/tests/unittests/sources/vmware/test_custom_script.py +++ b/tests/unittests/sources/vmware/test_custom_script.py @@ -7,12 +7,13 @@ import os import stat + from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptConstant, CustomScriptNotFound, - PreCustomScript, PostCustomScript, + PreCustomScript, ) from tests.unittests.helpers import CiTestCase, mock @@ -22,8 +23,7 @@ class TestVmwareCustomScript(CiTestCase): self.tmpDir = self.tmp_dir() # Mock the tmpDir as the root dir in VM. self.execDir = os.path.join(self.tmpDir, ".customization") - self.execScript = os.path.join(self.execDir, - ".customize.sh") + self.execScript = os.path.join(self.execDir, ".customize.sh") def test_prepare_custom_script(self): """ @@ -36,23 +36,24 @@ class TestVmwareCustomScript(CiTestCase): preCust = PreCustomScript("random-vmw-test", self.tmpDir) self.assertEqual("random-vmw-test", preCust.scriptname) self.assertEqual(self.tmpDir, preCust.directory) - self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), - preCust.scriptpath) + self.assertEqual( + self.tmp_path("random-vmw-test", self.tmpDir), preCust.scriptpath + ) with self.assertRaises(CustomScriptNotFound): preCust.prepare_script() # Custom script exists. custScript = self.tmp_path("test-cust", self.tmpDir) util.write_file(custScript, "test-CR-strip\r\r") - with mock.patch.object(CustomScriptConstant, - "CUSTOM_TMP_DIR", - self.execDir): - with mock.patch.object(CustomScriptConstant, - "CUSTOM_SCRIPT", - self.execScript): - postCust = PostCustomScript("test-cust", - self.tmpDir, - self.tmpDir) + with mock.patch.object( + CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir + ): + with mock.patch.object( + CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript + ): + postCust = PostCustomScript( + "test-cust", self.tmpDir, self.tmpDir + ) self.assertEqual("test-cust", postCust.scriptname) self.assertEqual(self.tmpDir, postCust.directory) self.assertEqual(custScript, postCust.scriptpath) @@ -84,26 +85,30 @@ class TestVmwareCustomScript(CiTestCase): ccScriptDir = self.tmp_dir() ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") markerFile = os.path.join(self.tmpDir, ".markerFile") - with mock.patch.object(CustomScriptConstant, - "CUSTOM_TMP_DIR", - self.execDir): - with mock.patch.object(CustomScriptConstant, - "CUSTOM_SCRIPT", - self.execScript): - with mock.patch.object(CustomScriptConstant, - "POST_CUSTOM_PENDING_MARKER", - markerFile): - postCust = PostCustomScript("test-cust", - self.tmpDir, - ccScriptDir) + with mock.patch.object( + CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir + ): + with mock.patch.object( + CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript + ): + with mock.patch.object( + CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile, + ): + postCust = PostCustomScript( + "test-cust", self.tmpDir, ccScriptDir + ) postCust.execute() # Check cc_scripts_per_instance and marker file # are created. self.assertTrue(os.path.exists(ccScript)) with open(ccScript, "r") as f: content = f.read() - self.assertEqual(content, - "This is the script to run post cust") + self.assertEqual( + content, "This is the script to run post cust" + ) self.assertTrue(os.path.exists(markerFile)) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py index 9114f0b9..fc63bcae 100644 --- a/tests/unittests/sources/vmware/test_guestcust_util.py +++ b/tests/unittests/sources/vmware/test_guestcust_util.py @@ -21,78 +21,89 @@ class TestGuestCustUtil(CiTestCase): This test is designed to verify the behavior if vmware-toolbox-cmd is not installed. """ - with mock.patch.object(subp, 'which', return_value=None): + with mock.patch.object(subp, "which", return_value=None): self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + get_tools_config("section", "key", "defaultVal"), "defaultVal" + ) def test_get_tools_config_internal_exception(self): """ This test is designed to verify the behavior if internal exception is raised. """ - with mock.patch.object(subp, 'which', return_value='/dummy/path'): - with mock.patch.object(subp, 'subp', - return_value=('key=value', b''), - side_effect=subp.ProcessExecutionError( - "subp failed", exit_code=99)): + with mock.patch.object(subp, "which", return_value="/dummy/path"): + with mock.patch.object( + subp, + "subp", + return_value=("key=value", b""), + side_effect=subp.ProcessExecutionError( + "subp failed", exit_code=99 + ), + ): # verify return value is 'defaultVal', not 'value'. self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'defaultVal') + get_tools_config("section", "key", "defaultVal"), + "defaultVal", + ) def test_get_tools_config_normal(self): """ This test is designed to verify the value could be parsed from key = value of the given [section] """ - with mock.patch.object(subp, 'which', return_value='/dummy/path'): + with mock.patch.object(subp, "which", return_value="/dummy/path"): # value is not blank - with mock.patch.object(subp, 'subp', - return_value=('key = value ', b'')): + with mock.patch.object( + subp, "subp", return_value=("key = value ", b"") + ): self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'value') + get_tools_config("section", "key", "defaultVal"), "value" + ) # value is blank - with mock.patch.object(subp, 'subp', - return_value=('key = ', b'')): + with mock.patch.object(subp, "subp", return_value=("key = ", b"")): self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - '') + get_tools_config("section", "key", "defaultVal"), "" + ) # value contains = - with mock.patch.object(subp, 'subp', - return_value=('key=Bar=Wark', b'')): + with mock.patch.object( + subp, "subp", return_value=("key=Bar=Wark", b"") + ): self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'Bar=Wark') + get_tools_config("section", "key", "defaultVal"), + "Bar=Wark", + ) # value contains specific characters - with mock.patch.object(subp, 'subp', - return_value=('[a] b.c_d=e-f', b'')): + with mock.patch.object( + subp, "subp", return_value=("[a] b.c_d=e-f", b"") + ): self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'e-f') + get_tools_config("section", "key", "defaultVal"), "e-f" + ) def test_set_gc_status(self): """ This test is designed to verify the behavior of set_gc_status """ # config is None, return None - self.assertEqual(set_gc_status(None, 'Successful'), None) + self.assertEqual(set_gc_status(None, "Successful"), None) # post gc status is NO, return None cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - self.assertEqual(set_gc_status(conf, 'Successful'), None) + self.assertEqual(set_gc_status(conf, "Successful"), None) # post gc status is YES, subp is called to execute command cf._insertKey("MISC|POST-GC-STATUS", "YES") conf = Config(cf) - with mock.patch.object(subp, 'subp', - return_value=('ok', b'')) as mockobj: - self.assertEqual( - set_gc_status(conf, 'Successful'), ('ok', b'')) + with mock.patch.object( + subp, "subp", return_value=("ok", b"") + ) as mockobj: + self.assertEqual(set_gc_status(conf, "Successful"), ("ok", b"")) mockobj.assert_called_once_with( - ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'], - rcs=[0]) + ["vmware-rpctool", "info-set guestinfo.gc.status Successful"], + rcs=[0], + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index 1d66ab4a..38d45d0e 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -12,15 +12,19 @@ import sys import tempfile import textwrap -from cloudinit.sources.DataSourceOVF import get_network_config_from_conf -from cloudinit.sources.DataSourceOVF import read_vmware_imc +from cloudinit.sources.DataSourceOVF import ( + get_network_config_from_conf, + read_vmware_imc, +) from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum from cloudinit.sources.helpers.vmware.imc.config import Config from cloudinit.sources.helpers.vmware.imc.config_file import ( ConfigFile as WrappedConfigFile, ) -from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet -from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator +from cloudinit.sources.helpers.vmware.imc.config_nic import ( + NicConfigurator, + gen_subnet, +) from tests.unittests.helpers import CiTestCase, cloud_init_project_dir logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) @@ -32,7 +36,6 @@ def ConfigFile(path: str): class TestVmwareConfigFile(CiTestCase): - def test_utility_methods(self): """Tests basic utility methods of ConfigFile class""" cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -45,12 +48,14 @@ class TestVmwareConfigFile(CiTestCase): cf._insertKey("BAR", " ") self.assertEqual(2, len(cf), "insert size") - self.assertEqual('foo', cf["PASSWORD|-PASS"], "password") + self.assertEqual("foo", cf["PASSWORD|-PASS"], "password") self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") - self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"), - "keepPassword") - self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"), - "removePassword") + self.assertFalse( + cf.should_keep_current_value("PASSWORD|-PASS"), "keepPassword" + ) + self.assertFalse( + cf.should_remove_current_value("PASSWORD|-PASS"), "removePassword" + ) self.assertFalse("FOO" in cf, "hasFoo") self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo") self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo") @@ -62,17 +67,17 @@ class TestVmwareConfigFile(CiTestCase): """Tests instance id for the DatasourceOVF""" cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - instance_id_prefix = 'iid-vmware-' + instance_id_prefix = "iid-vmware-" conf = Config(cf) (md1, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md1["instance-id"]) - self.assertEqual(md1["instance-id"], 'iid-vmware-imc') + self.assertEqual(md1["instance-id"], "iid-vmware-imc") (md2, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md2["instance-id"]) - self.assertEqual(md2["instance-id"], 'iid-vmware-imc') + self.assertEqual(md2["instance-id"], "iid-vmware-imc") self.assertEqual(md2["instance-id"], md1["instance-id"]) @@ -82,36 +87,38 @@ class TestVmwareConfigFile(CiTestCase): conf = Config(cf) - self.assertEqual('myhost1', conf.host_name, "hostName") - self.assertEqual('Africa/Abidjan', conf.timezone, "tz") + self.assertEqual("myhost1", conf.host_name, "hostName") + self.assertEqual("Africa/Abidjan", conf.timezone, "tz") self.assertTrue(conf.utc, "utc") - self.assertEqual(['10.20.145.1', '10.20.145.2'], - conf.name_servers, - "dns") - self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], - conf.dns_suffixes, - "suffixes") + self.assertEqual( + ["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns" + ) + self.assertEqual( + ["eng.vmware.com", "proxy.vmware.com"], + conf.dns_suffixes, + "suffixes", + ) nics = conf.nics ipv40 = nics[0].staticIpv4 self.assertEqual(2, len(nics), "nics") - self.assertEqual('NIC1', nics[0].name, "nic0") - self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual("NIC1", nics[0].name, "nic0") + self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0") self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0") - self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0") - self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0") + self.assertEqual("10.20.87.154", ipv40[0].ip, "ipv4Addr0") + self.assertEqual("255.255.252.0", ipv40[0].netmask, "ipv4Mask0") self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0") - self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0") - self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1") + self.assertEqual("10.20.87.253", ipv40[0].gateways[0], "ipv4Gw0_0") + self.assertEqual("10.20.87.105", ipv40[0].gateways[1], "ipv4Gw0_1") self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0") - self.assertEqual('fc00:10:20:87::154', - nics[0].staticIpv6[0].ip, - "ipv6Addr0") + self.assertEqual( + "fc00:10:20:87::154", nics[0].staticIpv6[0].ip, "ipv6Addr0" + ) - self.assertEqual('NIC2', nics[1].name, "nic1") + self.assertEqual("NIC2", nics[1].name, "nic1") self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp") def test_config_file_dhcp_2nics(self): @@ -121,8 +128,8 @@ class TestVmwareConfigFile(CiTestCase): conf = Config(cf) nics = conf.nics self.assertEqual(2, len(nics), "nics") - self.assertEqual('NIC1', nics[0].name, "nic0") - self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual("NIC1", nics[0].name, "nic0") + self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0") self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0") def test_config_password(self): @@ -132,7 +139,7 @@ class TestVmwareConfigFile(CiTestCase): cf._insertKey("PASSWORD|RESET", "no") conf = Config(cf) - self.assertEqual('test-password', conf.admin_password, "password") + self.assertEqual("test-password", conf.admin_password, "password") self.assertFalse(conf.reset_password, "do not reset password") def test_config_reset_passwd(self): @@ -161,67 +168,66 @@ class TestVmwareConfigFile(CiTestCase): network_config = get_network_config_from_conf(config, False) - self.assertEqual(1, network_config.get('version')) + self.assertEqual(1, network_config.get("version")) - config_types = network_config.get('config') + config_types = network_config.get("config") name_servers = None dns_suffixes = None for type in config_types: - if type.get('type') == 'nameserver': - name_servers = type.get('address') - dns_suffixes = type.get('search') + if type.get("type") == "nameserver": + name_servers = type.get("address") + dns_suffixes = type.get("search") break - self.assertEqual(['10.20.145.1', '10.20.145.2'], - name_servers, - "dns") - self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], - dns_suffixes, - "suffixes") + self.assertEqual(["10.20.145.1", "10.20.145.2"], name_servers, "dns") + self.assertEqual( + ["eng.vmware.com", "proxy.vmware.com"], dns_suffixes, "suffixes" + ) def test_gen_subnet(self): """Tests if gen_subnet properly calculates network subnet from - IPv4 address and netmask""" - ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'], - ['10.20.92.105', '255.255.252.0', '10.20.92.0'], - ['192.168.0.10', '255.255.0.0', '192.168.0.0']] + IPv4 address and netmask""" + ip_subnet_list = [ + ["10.20.87.253", "255.255.252.0", "10.20.84.0"], + ["10.20.92.105", "255.255.252.0", "10.20.92.0"], + ["192.168.0.10", "255.255.0.0", "192.168.0.0"], + ] for entry in ip_subnet_list: - self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]), - "Subnet for a specified ip and netmask") + self.assertEqual( + entry[2], + gen_subnet(entry[0], entry[1]), + "Subnet for a specified ip and netmask", + ) def test_get_config_dns_suffixes(self): """Tests if get_network_config_from_conf properly - generates nameservers and dns settings from a - specified configuration""" + generates nameservers and dns settings from a + specified configuration""" cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") config = Config(cf) network_config = get_network_config_from_conf(config, False) - self.assertEqual(1, network_config.get('version')) + self.assertEqual(1, network_config.get("version")) - config_types = network_config.get('config') + config_types = network_config.get("config") name_servers = None dns_suffixes = None for type in config_types: - if type.get('type') == 'nameserver': - name_servers = type.get('address') - dns_suffixes = type.get('search') + if type.get("type") == "nameserver": + name_servers = type.get("address") + dns_suffixes = type.get("search") break - self.assertEqual([], - name_servers, - "dns") - self.assertEqual(['eng.vmware.com'], - dns_suffixes, - "suffixes") + self.assertEqual([], name_servers, "dns") + self.assertEqual(["eng.vmware.com"], dns_suffixes, "suffixes") def test_get_nics_list_dhcp(self): """Tests if NicConfigurator properly calculates network subnets - for a configuration with a list of DHCP NICs""" + for a configuration with a list of DHCP NICs""" cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") config = Config(cf) @@ -231,37 +237,39 @@ class TestVmwareConfigFile(CiTestCase): self.assertEqual(2, len(nics_cfg_list), "number of config elements") - nic1 = {'name': 'NIC1'} - nic2 = {'name': 'NIC2'} + nic1 = {"name": "NIC1"} + nic2 = {"name": "NIC2"} for cfg in nics_cfg_list: - if cfg.get('name') == nic1.get('name'): + if cfg.get("name") == nic1.get("name"): nic1.update(cfg) - elif cfg.get('name') == nic2.get('name'): + elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) - self.assertEqual('physical', nic1.get('type'), 'type of NIC1') - self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') - self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), - 'mac address of NIC1') - subnets = nic1.get('subnets') - self.assertEqual(1, len(subnets), 'number of subnets for NIC1') + self.assertEqual("physical", nic1.get("type"), "type of NIC1") + self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") + self.assertEqual( + "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1" + ) + subnets = nic1.get("subnets") + self.assertEqual(1, len(subnets), "number of subnets for NIC1") subnet = subnets[0] - self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1') - self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type') - - self.assertEqual('physical', nic2.get('type'), 'type of NIC2') - self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') - self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'), - 'mac address of NIC2') - subnets = nic2.get('subnets') - self.assertEqual(1, len(subnets), 'number of subnets for NIC2') + self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC1") + self.assertEqual("auto", subnet.get("control"), "NIC1 Control type") + + self.assertEqual("physical", nic2.get("type"), "type of NIC2") + self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") + self.assertEqual( + "00:50:56:a6:5a:de", nic2.get("mac_address"), "mac address of NIC2" + ) + subnets = nic2.get("subnets") + self.assertEqual(1, len(subnets), "number of subnets for NIC2") subnet = subnets[0] - self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2') - self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type') + self.assertEqual("dhcp", subnet.get("type"), "DHCP type for NIC2") + self.assertEqual("auto", subnet.get("control"), "NIC2 Control type") def test_get_nics_list_static(self): """Tests if NicConfigurator properly calculates network subnets - for a configuration with 2 static NICs""" + for a configuration with 2 static NICs""" cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") config = Config(cf) @@ -271,80 +279,93 @@ class TestVmwareConfigFile(CiTestCase): self.assertEqual(2, len(nics_cfg_list), "number of elements") - nic1 = {'name': 'NIC1'} - nic2 = {'name': 'NIC2'} + nic1 = {"name": "NIC1"} + nic2 = {"name": "NIC2"} route_list = [] for cfg in nics_cfg_list: - cfg_type = cfg.get('type') - if cfg_type == 'physical': - if cfg.get('name') == nic1.get('name'): + cfg_type = cfg.get("type") + if cfg_type == "physical": + if cfg.get("name") == nic1.get("name"): nic1.update(cfg) - elif cfg.get('name') == nic2.get('name'): + elif cfg.get("name") == nic2.get("name"): nic2.update(cfg) - self.assertEqual('physical', nic1.get('type'), 'type of NIC1') - self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') - self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), - 'mac address of NIC1') + self.assertEqual("physical", nic1.get("type"), "type of NIC1") + self.assertEqual("NIC1", nic1.get("name"), "name of NIC1") + self.assertEqual( + "00:50:56:a6:8c:08", nic1.get("mac_address"), "mac address of NIC1" + ) - subnets = nic1.get('subnets') - self.assertEqual(2, len(subnets), 'Number of subnets') + subnets = nic1.get("subnets") + self.assertEqual(2, len(subnets), "Number of subnets") static_subnet = [] static6_subnet = [] for subnet in subnets: - subnet_type = subnet.get('type') - if subnet_type == 'static': + subnet_type = subnet.get("type") + if subnet_type == "static": static_subnet.append(subnet) - elif subnet_type == 'static6': + elif subnet_type == "static6": static6_subnet.append(subnet) else: - self.assertEqual(True, False, 'Unknown type') - if 'route' in subnet: - for route in subnet.get('routes'): + self.assertEqual(True, False, "Unknown type") + if "route" in subnet: + for route in subnet.get("routes"): route_list.append(route) - self.assertEqual(1, len(static_subnet), 'Number of static subnet') - self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet') + self.assertEqual(1, len(static_subnet), "Number of static subnet") + self.assertEqual(1, len(static6_subnet), "Number of static6 subnet") subnet = static_subnet[0] - self.assertEqual('10.20.87.154', subnet.get('address'), - 'IPv4 address of static subnet') - self.assertEqual('255.255.252.0', subnet.get('netmask'), - 'NetMask of static subnet') - self.assertEqual('auto', subnet.get('control'), - 'control for static subnet') + self.assertEqual( + "10.20.87.154", + subnet.get("address"), + "IPv4 address of static subnet", + ) + self.assertEqual( + "255.255.252.0", subnet.get("netmask"), "NetMask of static subnet" + ) + self.assertEqual( + "auto", subnet.get("control"), "control for static subnet" + ) subnet = static6_subnet[0] - self.assertEqual('fc00:10:20:87::154', subnet.get('address'), - 'IPv6 address of static subnet') - self.assertEqual('64', subnet.get('netmask'), - 'NetMask of static6 subnet') + self.assertEqual( + "fc00:10:20:87::154", + subnet.get("address"), + "IPv6 address of static subnet", + ) + self.assertEqual( + "64", subnet.get("netmask"), "NetMask of static6 subnet" + ) - route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10']) + route_set = set(["10.20.87.253", "10.20.87.105", "192.168.0.10"]) for route in route_list: - self.assertEqual(10000, route.get('metric'), 'metric of route') - gateway = route.get('gateway') + self.assertEqual(10000, route.get("metric"), "metric of route") + gateway = route.get("gateway") if gateway in route_set: route_set.discard(gateway) else: - self.assertEqual(True, False, 'invalid gateway %s' % (gateway)) + self.assertEqual(True, False, "invalid gateway %s" % (gateway)) - self.assertEqual('physical', nic2.get('type'), 'type of NIC2') - self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') - self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'), - 'mac address of NIC2') + self.assertEqual("physical", nic2.get("type"), "type of NIC2") + self.assertEqual("NIC2", nic2.get("name"), "name of NIC2") + self.assertEqual( + "00:50:56:a6:ef:7d", nic2.get("mac_address"), "mac address of NIC2" + ) - subnets = nic2.get('subnets') - self.assertEqual(1, len(subnets), 'Number of subnets for NIC2') + subnets = nic2.get("subnets") + self.assertEqual(1, len(subnets), "Number of subnets for NIC2") subnet = subnets[0] - self.assertEqual('static', subnet.get('type'), 'Subnet type') - self.assertEqual('192.168.6.102', subnet.get('address'), - 'Subnet address') - self.assertEqual('255.255.0.0', subnet.get('netmask'), - 'Subnet netmask') + self.assertEqual("static", subnet.get("type"), "Subnet type") + self.assertEqual( + "192.168.6.102", subnet.get("address"), "Subnet address" + ) + self.assertEqual( + "255.255.0.0", subnet.get("netmask"), "Subnet netmask" + ) def test_custom_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -385,8 +406,9 @@ class TestVmwareNetConfig(CiTestCase): def _get_NicConfigurator(self, text): fp = None try: - with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), - delete=False) as fp: + with tempfile.NamedTemporaryFile( + mode="w", dir=self.tmp_dir(), delete=False + ) as fp: fp.write(text) fp.close() cfg = Config(ConfigFile(fp.name)) @@ -397,7 +419,8 @@ class TestVmwareNetConfig(CiTestCase): def test_non_primary_nic_without_gateway(self): """A non primary nic set is not required to have a gateway.""" - config = textwrap.dedent("""\ + config = textwrap.dedent( + """\ [NETWORK] NETWORKING = yes BOOTPROTO = dhcp @@ -414,19 +437,32 @@ class TestVmwareNetConfig(CiTestCase): BOOTPROTO = static IPADDR = 10.20.87.154 NETMASK = 255.255.252.0 - """) + """ + ) nc = self._get_NicConfigurator(config) self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], - nc.generate()) + [ + { + "type": "physical", + "name": "NIC1", + "mac_address": "00:50:56:a6:8c:08", + "subnets": [ + { + "control": "auto", + "type": "static", + "address": "10.20.87.154", + "netmask": "255.255.252.0", + } + ], + } + ], + nc.generate(), + ) def test_non_primary_nic_with_gateway(self): """A non primary nic set can have a gateway.""" - config = textwrap.dedent("""\ + config = textwrap.dedent( + """\ [NETWORK] NETWORKING = yes BOOTPROTO = dhcp @@ -444,22 +480,40 @@ class TestVmwareNetConfig(CiTestCase): IPADDR = 10.20.87.154 NETMASK = 255.255.252.0 GATEWAY = 10.20.87.253 - """) + """ + ) nc = self._get_NicConfigurator(config) self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0', - 'routes': - [{'type': 'route', 'destination': '10.20.84.0/22', - 'gateway': '10.20.87.253', 'metric': 10000}]}]}], - nc.generate()) + [ + { + "type": "physical", + "name": "NIC1", + "mac_address": "00:50:56:a6:8c:08", + "subnets": [ + { + "control": "auto", + "type": "static", + "address": "10.20.87.154", + "netmask": "255.255.252.0", + "routes": [ + { + "type": "route", + "destination": "10.20.84.0/22", + "gateway": "10.20.87.253", + "metric": 10000, + } + ], + } + ], + } + ], + nc.generate(), + ) def test_cust_non_primary_nic_with_gateway_(self): """A customer non primary nic set can have a gateway.""" - config = textwrap.dedent("""\ + config = textwrap.dedent( + """\ [NETWORK] NETWORKING = yes BOOTPROTO = dhcp @@ -486,22 +540,40 @@ class TestVmwareNetConfig(CiTestCase): [DATETIME] UTC = yes - """) + """ + ) nc = self._get_NicConfigurator(config) self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:ac:d1:8a', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '100.115.223.75', 'netmask': '255.255.255.0', - 'routes': - [{'type': 'route', 'destination': '100.115.223.0/24', - 'gateway': '100.115.223.254', 'metric': 10000}]}]}], - nc.generate()) + [ + { + "type": "physical", + "name": "NIC1", + "mac_address": "00:50:56:ac:d1:8a", + "subnets": [ + { + "control": "auto", + "type": "static", + "address": "100.115.223.75", + "netmask": "255.255.255.0", + "routes": [ + { + "type": "route", + "destination": "100.115.223.0/24", + "gateway": "100.115.223.254", + "metric": 10000, + } + ], + } + ], + } + ], + nc.generate(), + ) def test_a_primary_nic_with_gateway(self): """A primary nic set can have a gateway.""" - config = textwrap.dedent("""\ + config = textwrap.dedent( + """\ [NETWORK] NETWORKING = yes BOOTPROTO = dhcp @@ -520,16 +592,28 @@ class TestVmwareNetConfig(CiTestCase): NETMASK = 255.255.252.0 PRIMARY = true GATEWAY = 10.20.87.253 - """) + """ + ) nc = self._get_NicConfigurator(config) self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0', - 'gateway': '10.20.87.253'}]}], - nc.generate()) + [ + { + "type": "physical", + "name": "NIC1", + "mac_address": "00:50:56:a6:8c:08", + "subnets": [ + { + "control": "auto", + "type": "static", + "address": "10.20.87.154", + "netmask": "255.255.252.0", + "gateway": "10.20.87.253", + } + ], + } + ], + nc.generate(), + ) def test_meta_data(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 4382a078..0ed8a120 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -5,14 +5,9 @@ import os import shutil import tempfile +from cloudinit import handlers, helpers, settings, url_helper, util from cloudinit.cmd import main -from cloudinit import handlers -from cloudinit import helpers -from cloudinit import settings -from cloudinit import url_helper -from cloudinit import util - -from tests.unittests.helpers import TestCase, CiTestCase, ExitStack, mock +from tests.unittests.helpers import CiTestCase, ExitStack, TestCase, mock class FakeModule(handlers.Handler): @@ -28,7 +23,6 @@ class FakeModule(handlers.Handler): class TestWalkerHandleHandler(TestCase): - def setUp(self): super(TestWalkerHandleHandler, self).setUp() tmpdir = tempfile.mkdtemp() @@ -39,13 +33,16 @@ class TestWalkerHandleHandler(TestCase): "frequency": "", "handlerdir": tmpdir, "handlers": helpers.ContentHandlers(), - "data": None} + "data": None, + } self.expected_module_name = "part-handler-%03d" % ( - self.data["handlercount"],) + self.data["handlercount"], + ) expected_file_name = "%s.py" % self.expected_module_name self.expected_file_fullname = os.path.join( - self.data["handlerdir"], expected_file_name) + self.data["handlerdir"], expected_file_name + ) self.module_fake = FakeModule() self.ctype = None self.filename = None @@ -56,45 +53,55 @@ class TestWalkerHandleHandler(TestCase): resources = ExitStack() self.addCleanup(resources.close) self.write_file_mock = resources.enter_context( - mock.patch('cloudinit.util.write_file')) + mock.patch("cloudinit.util.write_file") + ) def test_no_errors(self): """Payload gets written to file and added to C{pdata}.""" - with mock.patch('cloudinit.importer.import_module', - return_value=self.module_fake) as mockobj: - handlers.walker_handle_handler(self.data, self.ctype, - self.filename, self.payload) + with mock.patch( + "cloudinit.importer.import_module", return_value=self.module_fake + ) as mockobj: + handlers.walker_handle_handler( + self.data, self.ctype, self.filename, self.payload + ) mockobj.assert_called_once_with(self.expected_module_name) self.write_file_mock.assert_called_once_with( - self.expected_file_fullname, self.payload, 0o600) - self.assertEqual(self.data['handlercount'], 1) + self.expected_file_fullname, self.payload, 0o600 + ) + self.assertEqual(self.data["handlercount"], 1) def test_import_error(self): """Module import errors are logged. No handler added to C{pdata}.""" - with mock.patch('cloudinit.importer.import_module', - side_effect=ImportError) as mockobj: - handlers.walker_handle_handler(self.data, self.ctype, - self.filename, self.payload) + with mock.patch( + "cloudinit.importer.import_module", side_effect=ImportError + ) as mockobj: + handlers.walker_handle_handler( + self.data, self.ctype, self.filename, self.payload + ) mockobj.assert_called_once_with(self.expected_module_name) self.write_file_mock.assert_called_once_with( - self.expected_file_fullname, self.payload, 0o600) - self.assertEqual(self.data['handlercount'], 0) + self.expected_file_fullname, self.payload, 0o600 + ) + self.assertEqual(self.data["handlercount"], 0) def test_attribute_error(self): """Attribute errors are logged. No handler added to C{pdata}.""" - with mock.patch('cloudinit.importer.import_module', - side_effect=AttributeError, - return_value=self.module_fake) as mockobj: - handlers.walker_handle_handler(self.data, self.ctype, - self.filename, self.payload) + with mock.patch( + "cloudinit.importer.import_module", + side_effect=AttributeError, + return_value=self.module_fake, + ) as mockobj: + handlers.walker_handle_handler( + self.data, self.ctype, self.filename, self.payload + ) mockobj.assert_called_once_with(self.expected_module_name) self.write_file_mock.assert_called_once_with( - self.expected_file_fullname, self.payload, 0o600) - self.assertEqual(self.data['handlercount'], 0) + self.expected_file_fullname, self.payload, 0o600 + ) + self.assertEqual(self.data["handlercount"], 0) class TestHandlerHandlePart(TestCase): - def setUp(self): super(TestHandlerHandlePart, self).setUp() self.data = "fake data" @@ -103,7 +110,7 @@ class TestHandlerHandlePart(TestCase): self.payload = "fake payload" self.frequency = settings.PER_INSTANCE self.headers = { - 'Content-Type': self.ctype, + "Content-Type": self.ctype, } def test_normal_version_1(self): @@ -111,126 +118,172 @@ class TestHandlerHandlePart(TestCase): C{handle_part} is called without C{frequency} for C{handler_version} == 1. """ - mod_mock = mock.Mock(frequency=settings.PER_INSTANCE, - handler_version=1) - handlers.run_part(mod_mock, self.data, self.filename, self.payload, - self.frequency, self.headers) + mod_mock = mock.Mock( + frequency=settings.PER_INSTANCE, handler_version=1 + ) + handlers.run_part( + mod_mock, + self.data, + self.filename, + self.payload, + self.frequency, + self.headers, + ) # Assert that the handle_part() method of the mock object got # called with the expected arguments. mod_mock.handle_part.assert_called_once_with( - self.data, self.ctype, self.filename, self.payload) + self.data, self.ctype, self.filename, self.payload + ) def test_normal_version_2(self): """ C{handle_part} is called with C{frequency} for C{handler_version} == 2. """ - mod_mock = mock.Mock(frequency=settings.PER_INSTANCE, - handler_version=2) - handlers.run_part(mod_mock, self.data, self.filename, self.payload, - self.frequency, self.headers) + mod_mock = mock.Mock( + frequency=settings.PER_INSTANCE, handler_version=2 + ) + handlers.run_part( + mod_mock, + self.data, + self.filename, + self.payload, + self.frequency, + self.headers, + ) # Assert that the handle_part() method of the mock object got # called with the expected arguments. mod_mock.handle_part.assert_called_once_with( - self.data, self.ctype, self.filename, self.payload, - settings.PER_INSTANCE) + self.data, + self.ctype, + self.filename, + self.payload, + settings.PER_INSTANCE, + ) def test_modfreq_per_always(self): """ C{handle_part} is called regardless of frequency if nofreq is always. """ self.frequency = "once" - mod_mock = mock.Mock(frequency=settings.PER_ALWAYS, - handler_version=1) - handlers.run_part(mod_mock, self.data, self.filename, self.payload, - self.frequency, self.headers) + mod_mock = mock.Mock(frequency=settings.PER_ALWAYS, handler_version=1) + handlers.run_part( + mod_mock, + self.data, + self.filename, + self.payload, + self.frequency, + self.headers, + ) # Assert that the handle_part() method of the mock object got # called with the expected arguments. mod_mock.handle_part.assert_called_once_with( - self.data, self.ctype, self.filename, self.payload) + self.data, self.ctype, self.filename, self.payload + ) def test_no_handle_when_modfreq_once(self): """C{handle_part} is not called if frequency is once.""" self.frequency = "once" mod_mock = mock.Mock(frequency=settings.PER_ONCE) - handlers.run_part(mod_mock, self.data, self.filename, self.payload, - self.frequency, self.headers) + handlers.run_part( + mod_mock, + self.data, + self.filename, + self.payload, + self.frequency, + self.headers, + ) self.assertEqual(0, mod_mock.handle_part.call_count) def test_exception_is_caught(self): """Exceptions within C{handle_part} are caught and logged.""" - mod_mock = mock.Mock(frequency=settings.PER_INSTANCE, - handler_version=1) + mod_mock = mock.Mock( + frequency=settings.PER_INSTANCE, handler_version=1 + ) mod_mock.handle_part.side_effect = Exception try: - handlers.run_part(mod_mock, self.data, self.filename, - self.payload, self.frequency, self.headers) + handlers.run_part( + mod_mock, + self.data, + self.filename, + self.payload, + self.frequency, + self.headers, + ) except Exception: self.fail("Exception was not caught in handle_part") mod_mock.handle_part.assert_called_once_with( - self.data, self.ctype, self.filename, self.payload) + self.data, self.ctype, self.filename, self.payload + ) class TestCmdlineUrl(CiTestCase): def test_parse_cmdline_url_nokey_raises_keyerror(self): self.assertRaises( - KeyError, main.parse_cmdline_url, 'root=foo bar single') + KeyError, main.parse_cmdline_url, "root=foo bar single" + ) def test_parse_cmdline_url_found(self): - cmdline = 'root=foo bar single url=http://example.com arg1 -v' + cmdline = "root=foo bar single url=http://example.com arg1 -v" self.assertEqual( - ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) + ("url", "http://example.com"), main.parse_cmdline_url(cmdline) + ) - @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') + @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url") def test_invalid_content(self, m_read): key = "cloud-config-url" - url = 'http://example.com/foo' + url = "http://example.com/foo" cmdline = "ro %s=%s bar=1" % (key, url) m_read.return_value = url_helper.StringResponse(b"unexpected blob") fpath = self.tmp_path("ccfile") lvl, msg = main.attempt_cmdline_url( - fpath, network=True, cmdline=cmdline) + fpath, network=True, cmdline=cmdline + ) self.assertEqual(logging.WARN, lvl) self.assertIn(url, msg) self.assertFalse(os.path.exists(fpath)) - @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') + @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url") def test_valid_content(self, m_read): url = "http://example.com/foo" payload = b"#cloud-config\nmydata: foo\nbar: wark\n" - cmdline = "ro %s=%s bar=1" % ('cloud-config-url', url) + cmdline = "ro %s=%s bar=1" % ("cloud-config-url", url) m_read.return_value = url_helper.StringResponse(payload) fpath = self.tmp_path("ccfile") lvl, msg = main.attempt_cmdline_url( - fpath, network=True, cmdline=cmdline) + fpath, network=True, cmdline=cmdline + ) self.assertEqual(util.load_file(fpath, decode=False), payload) self.assertEqual(logging.INFO, lvl) self.assertIn(url, msg) - @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') + @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url") def test_no_key_found(self, m_read): cmdline = "ro mykey=http://example.com/foo root=foo" fpath = self.tmp_path("ccpath") lvl, _msg = main.attempt_cmdline_url( - fpath, network=True, cmdline=cmdline) + fpath, network=True, cmdline=cmdline + ) m_read.assert_not_called() self.assertFalse(os.path.exists(fpath)) self.assertEqual(logging.DEBUG, lvl) - @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') + @mock.patch("cloudinit.cmd.main.url_helper.read_file_or_url") def test_exception_warns(self, m_read): url = "http://example.com/foo" cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url fpath = self.tmp_path("ccfile") m_read.side_effect = url_helper.UrlError( - cause="Unexpected Error", url="http://example.com/foo") + cause="Unexpected Error", url="http://example.com/foo" + ) lvl, msg = main.attempt_cmdline_url( - fpath, network=True, cmdline=cmdline) + fpath, network=True, cmdline=cmdline + ) self.assertEqual(logging.WARN, lvl) self.assertIn(url, msg) self.assertFalse(os.path.exists(fpath)) diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index 0c8b8e53..684a9ae5 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -5,7 +5,6 @@ import os import stat from cloudinit import atomic_helper - from tests.unittests.helpers import CiTestCase @@ -34,7 +33,7 @@ class TestAtomicHelper(CiTestCase): def test_write_json(self): """write_json output is readable json.""" path = self.tmp_path("test_write_json") - data = {'key1': 'value1', 'key2': ['i1', 'i2']} + data = {"key1": "value1", "key2": ["i1", "i2"]} atomic_helper.write_json(path, data) with open(path, "r") as fp: found = json.load(fp) @@ -55,4 +54,5 @@ class TestAtomicHelper(CiTestCase): file_stat = os.stat(path) self.assertEqual(perms, stat.S_IMODE(file_stat.st_mode)) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index cf2c0a4d..a057be2a 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -5,54 +5,59 @@ import copy import errno import os -import pytest import shutil import tempfile from textwrap import dedent +import pytest -from tests.unittests.helpers import ( - FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja) - -from cloudinit import handlers -from cloudinit import helpers -from cloudinit import subp -from cloudinit import util - +from cloudinit import handlers, helpers, subp, util from cloudinit.handlers.cloud_config import CloudConfigPartHandler from cloudinit.handlers.jinja_template import ( - JinjaTemplatePartHandler, convert_jinja_instance_data, - render_jinja_payload) + JinjaTemplatePartHandler, + convert_jinja_instance_data, + render_jinja_payload, +) from cloudinit.handlers.shell_script import ShellScriptPartHandler from cloudinit.handlers.upstart_job import UpstartJobPartHandler +from cloudinit.settings import PER_ALWAYS, PER_INSTANCE +from tests.unittests.helpers import ( + CiTestCase, + FilesystemMockingTestCase, + mock, + skipUnlessJinja, +) -from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE) - -INSTANCE_DATA_FILE = 'instance-data-sensitive.json' +INSTANCE_DATA_FILE = "instance-data-sensitive.json" class TestUpstartJobPartHandler(FilesystemMockingTestCase): - mpath = 'cloudinit.handlers.upstart_job.' + mpath = "cloudinit.handlers.upstart_job." def test_upstart_frequency_no_out(self): c_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, c_root) up_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, up_root) - paths = helpers.Paths({ - 'cloud_dir': c_root, - 'upstart_dir': up_root, - }) + paths = helpers.Paths( + { + "cloud_dir": c_root, + "upstart_dir": up_root, + } + ) h = UpstartJobPartHandler(paths) # No files should be written out when # the frequency is ! per-instance - h.handle_part('', handlers.CONTENT_START, - None, None, None) - h.handle_part('blah', 'text/upstart-job', - 'test.conf', 'blah', frequency=PER_ALWAYS) - h.handle_part('', handlers.CONTENT_END, - None, None, None) + h.handle_part("", handlers.CONTENT_START, None, None, None) + h.handle_part( + "blah", + "text/upstart-job", + "test.conf", + "blah", + frequency=PER_ALWAYS, + ) + h.handle_part("", handlers.CONTENT_END, None, None, None) self.assertEqual(0, len(os.listdir(up_root))) def test_upstart_frequency_single(self): @@ -62,47 +67,54 @@ class TestUpstartJobPartHandler(FilesystemMockingTestCase): self.patchOS(new_root) self.patchUtils(new_root) - paths = helpers.Paths({ - 'upstart_dir': "/etc/upstart", - }) + paths = helpers.Paths( + { + "upstart_dir": "/etc/upstart", + } + ) util.ensure_dir("/run") util.ensure_dir("/etc/upstart") - with mock.patch(self.mpath + 'SUITABLE_UPSTART', return_value=True): - with mock.patch.object(subp, 'subp') as m_subp: + with mock.patch(self.mpath + "SUITABLE_UPSTART", return_value=True): + with mock.patch.object(subp, "subp") as m_subp: h = UpstartJobPartHandler(paths) - h.handle_part('', handlers.CONTENT_START, - None, None, None) - h.handle_part('blah', 'text/upstart-job', - 'test.conf', 'blah', frequency=PER_INSTANCE) - h.handle_part('', handlers.CONTENT_END, - None, None, None) + h.handle_part("", handlers.CONTENT_START, None, None, None) + h.handle_part( + "blah", + "text/upstart-job", + "test.conf", + "blah", + frequency=PER_INSTANCE, + ) + h.handle_part("", handlers.CONTENT_END, None, None, None) - self.assertEqual(len(os.listdir('/etc/upstart')), 1) + self.assertEqual(len(os.listdir("/etc/upstart")), 1) m_subp.assert_called_once_with( - ['initctl', 'reload-configuration'], capture=False) + ["initctl", "reload-configuration"], capture=False + ) class TestJinjaTemplatePartHandler(CiTestCase): with_logs = True - mpath = 'cloudinit.handlers.jinja_template.' + mpath = "cloudinit.handlers.jinja_template." def setUp(self): super(TestJinjaTemplatePartHandler, self).setUp() self.tmp = self.tmp_dir() - self.run_dir = os.path.join(self.tmp, 'run_dir') + self.run_dir = os.path.join(self.tmp, "run_dir") util.ensure_dir(self.run_dir) - self.paths = helpers.Paths({ - 'cloud_dir': self.tmp, 'run_dir': self.run_dir}) + self.paths = helpers.Paths( + {"cloud_dir": self.tmp, "run_dir": self.run_dir} + ) def test_jinja_template_part_handler_defaults(self): """On init, paths are saved and subhandler types are empty.""" h = JinjaTemplatePartHandler(self.paths) - self.assertEqual(['## template: jinja'], h.prefixes) + self.assertEqual(["## template: jinja"], h.prefixes) self.assertEqual(3, h.handler_version) self.assertEqual(self.paths, h.paths) self.assertEqual({}, h.sub_handlers) @@ -112,34 +124,47 @@ class TestJinjaTemplatePartHandler(CiTestCase): script_handler = ShellScriptPartHandler(self.paths) cloudconfig_handler = CloudConfigPartHandler(self.paths) h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler, cloudconfig_handler]) + self.paths, sub_handlers=[script_handler, cloudconfig_handler] + ) self.assertCountEqual( - ['text/cloud-config', 'text/cloud-config-jsonp', - 'text/x-shellscript'], - h.sub_handlers) + [ + "text/cloud-config", + "text/cloud-config-jsonp", + "text/x-shellscript", + ], + h.sub_handlers, + ) def test_jinja_template_part_handler_looks_up_subhandler_types(self): """When sub_handlers are passed, init lists types of subhandlers.""" script_handler = ShellScriptPartHandler(self.paths) cloudconfig_handler = CloudConfigPartHandler(self.paths) h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler, cloudconfig_handler]) + self.paths, sub_handlers=[script_handler, cloudconfig_handler] + ) self.assertCountEqual( - ['text/cloud-config', 'text/cloud-config-jsonp', - 'text/x-shellscript'], - h.sub_handlers) + [ + "text/cloud-config", + "text/cloud-config-jsonp", + "text/x-shellscript", + ], + h.sub_handlers, + ) def test_jinja_template_handle_noop_on_content_signals(self): """Perform no part handling when content type is CONTENT_SIGNALS.""" script_handler = ShellScriptPartHandler(self.paths) - h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler]) - with mock.patch.object(script_handler, 'handle_part') as m_handle_part: + h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) + with mock.patch.object(script_handler, "handle_part") as m_handle_part: h.handle_part( - data='data', ctype=handlers.CONTENT_START, filename='part-1', - payload='## template: jinja\n#!/bin/bash\necho himom', - frequency='freq', headers='headers') + data="data", + ctype=handlers.CONTENT_START, + filename="part-1", + payload="## template: jinja\n#!/bin/bash\necho himom", + frequency="freq", + headers="headers", + ) m_handle_part.assert_not_called() @skipUnlessJinja() @@ -150,19 +175,22 @@ class TestJinjaTemplatePartHandler(CiTestCase): # Create required instance data json file instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) - instance_data = {'topkey': 'echo himom'} + instance_data = {"topkey": "echo himom"} util.write_file(instance_json, util.json_dumps(instance_data)) - h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler]) - with mock.patch.object(script_handler, 'handle_part') as m_part: + h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) + with mock.patch.object(script_handler, "handle_part") as m_part: # ctype with leading '!' not in handlers.CONTENT_SIGNALS h.handle_part( - data='data', ctype="!" + handlers.CONTENT_START, - filename='part01', - payload='## template: jinja \t \n#!/bin/bash\n{{ topkey }}', - frequency='freq', headers='headers') + data="data", + ctype="!" + handlers.CONTENT_START, + filename="part01", + payload="## template: jinja \t \n#!/bin/bash\n{{ topkey }}", + frequency="freq", + headers="headers", + ) m_part.assert_called_once_with( - 'data', '!__begin__', 'part01', '#!/bin/bash\necho himom', 'freq') + "data", "!__begin__", "part01", "#!/bin/bash\necho himom", "freq" + ) @skipUnlessJinja() def test_jinja_template_handle_subhandler_v3_with_clean_payload(self): @@ -172,146 +200,163 @@ class TestJinjaTemplatePartHandler(CiTestCase): # Create required instance-data.json file instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) - instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}} + instance_data = {"topkey": {"sub": "runcmd: [echo hi]"}} util.write_file(instance_json, util.json_dumps(instance_data)) h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[cloudcfg_handler]) - with mock.patch.object(cloudcfg_handler, 'handle_part') as m_part: + self.paths, sub_handlers=[cloudcfg_handler] + ) + with mock.patch.object(cloudcfg_handler, "handle_part") as m_part: # ctype with leading '!' not in handlers.CONTENT_SIGNALS h.handle_part( - data='data', ctype="!" + handlers.CONTENT_END, - filename='part01', - payload='## template: jinja\n#cloud-config\n{{ topkey.sub }}', - frequency='freq', headers='headers') + data="data", + ctype="!" + handlers.CONTENT_END, + filename="part01", + payload="## template: jinja\n#cloud-config\n{{ topkey.sub }}", + frequency="freq", + headers="headers", + ) m_part.assert_called_once_with( - 'data', '!__end__', 'part01', '#cloud-config\nruncmd: [echo hi]', - 'freq', 'headers') + "data", + "!__end__", + "part01", + "#cloud-config\nruncmd: [echo hi]", + "freq", + "headers", + ) def test_jinja_template_handle_errors_on_missing_instance_data_json(self): """If instance-data is absent, raise an error from handle_part.""" script_handler = ShellScriptPartHandler(self.paths) - h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler]) + h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) with self.assertRaises(RuntimeError) as context_manager: h.handle_part( - data='data', ctype="!" + handlers.CONTENT_START, - filename='part01', - payload='## template: jinja \n#!/bin/bash\necho himom', - frequency='freq', headers='headers') - script_file = os.path.join(script_handler.script_dir, 'part01') + data="data", + ctype="!" + handlers.CONTENT_START, + filename="part01", + payload="## template: jinja \n#!/bin/bash\necho himom", + frequency="freq", + headers="headers", + ) + script_file = os.path.join(script_handler.script_dir, "part01") self.assertEqual( - 'Cannot render jinja template vars. Instance data not yet present' - ' at {}/{}'.format(self.run_dir, INSTANCE_DATA_FILE), - str(context_manager.exception) + "Cannot render jinja template vars. Instance data not yet present" + " at {}/{}".format(self.run_dir, INSTANCE_DATA_FILE), + str(context_manager.exception), ) self.assertFalse( os.path.exists(script_file), - 'Unexpected file created %s' % script_file) + "Unexpected file created %s" % script_file, + ) def test_jinja_template_handle_errors_on_unreadable_instance_data(self): """If instance-data is unreadable, raise an error from handle_part.""" script_handler = ShellScriptPartHandler(self.paths) - instance_json = os.path.join( - self.run_dir, INSTANCE_DATA_FILE) + instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) util.write_file(instance_json, util.json_dumps({})) - h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler]) - with mock.patch(self.mpath + 'load_file') as m_load: + h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) + with mock.patch(self.mpath + "load_file") as m_load: with self.assertRaises(RuntimeError) as context_manager: - m_load.side_effect = OSError(errno.EACCES, 'Not allowed') + m_load.side_effect = OSError(errno.EACCES, "Not allowed") h.handle_part( - data='data', ctype="!" + handlers.CONTENT_START, - filename='part01', - payload='## template: jinja \n#!/bin/bash\necho himom', - frequency='freq', headers='headers') - script_file = os.path.join(script_handler.script_dir, 'part01') + data="data", + ctype="!" + handlers.CONTENT_START, + filename="part01", + payload="## template: jinja \n#!/bin/bash\necho himom", + frequency="freq", + headers="headers", + ) + script_file = os.path.join(script_handler.script_dir, "part01") self.assertEqual( "Cannot render jinja template vars. No read permission on " "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE), - str(context_manager.exception)) + str(context_manager.exception), + ) self.assertFalse( os.path.exists(script_file), - 'Unexpected file created %s' % script_file) + "Unexpected file created %s" % script_file, + ) @skipUnlessJinja() def test_jinja_template_handle_renders_jinja_content(self): """When present, render jinja variables from instance data""" script_handler = ShellScriptPartHandler(self.paths) instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) - instance_data = {'topkey': {'subkey': 'echo himom'}} + instance_data = {"topkey": {"subkey": "echo himom"}} util.write_file(instance_json, util.json_dumps(instance_data)) - h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler]) + h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) h.handle_part( - data='data', ctype="!" + handlers.CONTENT_START, - filename='part01', + data="data", + ctype="!" + handlers.CONTENT_START, + filename="part01", payload=( - '## template: jinja \n' - '#!/bin/bash\n' - '{{ topkey.subkey|default("nosubkey") }}'), - frequency='freq', headers='headers') - script_file = os.path.join(script_handler.script_dir, 'part01') + "## template: jinja \n" + "#!/bin/bash\n" + '{{ topkey.subkey|default("nosubkey") }}' + ), + frequency="freq", + headers="headers", + ) + script_file = os.path.join(script_handler.script_dir, "part01") self.assertNotIn( - 'Instance data not yet present at {}/{}'.format( - self.run_dir, INSTANCE_DATA_FILE), - self.logs.getvalue()) + "Instance data not yet present at {}/{}".format( + self.run_dir, INSTANCE_DATA_FILE + ), + self.logs.getvalue(), + ) self.assertEqual( - '#!/bin/bash\necho himom', util.load_file(script_file)) + "#!/bin/bash\necho himom", util.load_file(script_file) + ) @skipUnlessJinja() def test_jinja_template_handle_renders_jinja_content_missing_keys(self): """When specified jinja variable is undefined, log a warning.""" script_handler = ShellScriptPartHandler(self.paths) instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) - instance_data = {'topkey': {'subkey': 'echo himom'}} + instance_data = {"topkey": {"subkey": "echo himom"}} util.write_file(instance_json, util.json_dumps(instance_data)) - h = JinjaTemplatePartHandler( - self.paths, sub_handlers=[script_handler]) + h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) h.handle_part( - data='data', ctype="!" + handlers.CONTENT_START, - filename='part01', - payload='## template: jinja \n#!/bin/bash\n{{ goodtry }}', - frequency='freq', headers='headers') - script_file = os.path.join(script_handler.script_dir, 'part01') + data="data", + ctype="!" + handlers.CONTENT_START, + filename="part01", + payload="## template: jinja \n#!/bin/bash\n{{ goodtry }}", + frequency="freq", + headers="headers", + ) + script_file = os.path.join(script_handler.script_dir, "part01") self.assertTrue( os.path.exists(script_file), - 'Missing expected file %s' % script_file) + "Missing expected file %s" % script_file, + ) self.assertIn( "WARNING: Could not render jinja template variables in file" " 'part01': 'goodtry'\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) class TestConvertJinjaInstanceData: - @pytest.mark.parametrize( - "include_key_aliases,data,expected", ( - ( - False, - {'my-key': 'my-val'}, - {'my-key': 'my-val'} - ), + "include_key_aliases,data,expected", + ( + (False, {"my-key": "my-val"}, {"my-key": "my-val"}), ( True, - {'my-key': 'my-val'}, - {'my-key': 'my-val', 'my_key': 'my-val'} - ), - ( - False, - {'my.key': 'my.val'}, - {'my.key': 'my.val'} + {"my-key": "my-val"}, + {"my-key": "my-val", "my_key": "my-val"}, ), + (False, {"my.key": "my.val"}, {"my.key": "my.val"}), ( True, - {'my.key': 'my.val'}, - {'my.key': 'my.val', 'my_key': 'my.val'} + {"my.key": "my.val"}, + {"my.key": "my.val", "my_key": "my.val"}, ), ( True, - {'my/key': 'my/val'}, - {'my/key': 'my/val', 'my_key': 'my/val'} + {"my/key": "my/val"}, + {"my/key": "my/val", "my_key": "my/val"}, ), - ) + ), ) def test_convert_instance_data_operators_to_underscores( self, include_key_aliases, data, expected @@ -328,39 +373,48 @@ class TestConvertJinjaInstanceData: allow ease of reference for users. Intsead of v1.availability_zone, the name availability_zone can be used in templates. """ - data = {'ds': {'dskey1': 1, 'dskey2': 2}, - 'v1': {'v1key1': 'v1.1'}, - 'v2': {'v2key1': 'v2.1'}} + data = { + "ds": {"dskey1": 1, "dskey2": 2}, + "v1": {"v1key1": "v1.1"}, + "v2": {"v2key1": "v2.1"}, + } expected_data = copy.deepcopy(data) - expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'}) + expected_data.update({"v1key1": "v1.1", "v2key1": "v2.1"}) converted_data = convert_jinja_instance_data(data=data) - assert sorted(['ds', 'v1', 'v2', 'v1key1', 'v2key1']) == sorted( + assert sorted(["ds", "v1", "v2", "v1key1", "v2key1"]) == sorted( converted_data.keys() ) assert expected_data == converted_data def test_convert_instance_data_most_recent_version_of_promoted_keys(self): """The most-recent versioned key value is promoted to top-level.""" - data = {'v1': {'key1': 'old v1 key1', 'key2': 'old v1 key2'}, - 'v2': {'key1': 'newer v2 key1', 'key3': 'newer v2 key3'}, - 'v3': {'key1': 'newest v3 key1'}} + data = { + "v1": {"key1": "old v1 key1", "key2": "old v1 key2"}, + "v2": {"key1": "newer v2 key1", "key3": "newer v2 key3"}, + "v3": {"key1": "newest v3 key1"}, + } expected_data = copy.deepcopy(data) expected_data.update( - {'key1': 'newest v3 key1', 'key2': 'old v1 key2', - 'key3': 'newer v2 key3'}) + { + "key1": "newest v3 key1", + "key2": "old v1 key2", + "key3": "newer v2 key3", + } + ) converted_data = convert_jinja_instance_data(data=data) assert expected_data == converted_data def test_convert_instance_data_decodes_decode_paths(self): """Any decode_paths provided are decoded by convert_instance_data.""" - data = {'key1': {'subkey1': 'aGkgbW9t'}, 'key2': 'aGkgZGFk'} + data = {"key1": {"subkey1": "aGkgbW9t"}, "key2": "aGkgZGFk"} expected_data = copy.deepcopy(data) - expected_data['key1']['subkey1'] = 'hi mom' + expected_data["key1"]["subkey1"] = "hi mom" converted_data = convert_jinja_instance_data( - data=data, decode_paths=('key1/subkey1',)) + data=data, decode_paths=("key1/subkey1",) + ) assert expected_data == converted_data @@ -372,9 +426,11 @@ class TestRenderJinjaPayload(CiTestCase): def test_render_jinja_payload_logs_jinja_vars_on_debug(self): """When debug is True, log jinja varables available.""" payload = ( - '## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}') - instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'} - expected_log = dedent("""\ + "## template: jinja\n#!/bin/sh\necho hi from {{ v1.hostname }}" + ) + instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"} + expected_log = dedent( + """\ DEBUG: Converted jinja variables { "hostname": "foo", @@ -384,28 +440,37 @@ class TestRenderJinjaPayload(CiTestCase): "hostname": "foo" } } - """) + """ + ) self.assertEqual( render_jinja_payload( - payload=payload, payload_fn='myfile', - instance_data=instance_data, debug=True), - '#!/bin/sh\necho hi from foo') + payload=payload, + payload_fn="myfile", + instance_data=instance_data, + debug=True, + ), + "#!/bin/sh\necho hi from foo", + ) self.assertEqual(expected_log, self.logs.getvalue()) @skipUnlessJinja() def test_render_jinja_payload_replaces_missing_variables_and_warns(self): """Warn on missing jinja variables and replace the absent variable.""" - payload = ( - '## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}') - instance_data = {'v1': {'hostname': 'foo'}, 'instance-id': 'iid'} + payload = "## template: jinja\n#!/bin/sh\necho hi from {{ NOTHERE }}" + instance_data = {"v1": {"hostname": "foo"}, "instance-id": "iid"} self.assertEqual( render_jinja_payload( - payload=payload, payload_fn='myfile', - instance_data=instance_data), - '#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE') + payload=payload, + payload_fn="myfile", + instance_data=instance_data, + ), + "#!/bin/sh\necho hi from CI_MISSING_JINJA_VAR/NOTHERE", + ) expected_log = ( - 'WARNING: Could not render jinja template variables in file' - " 'myfile': 'NOTHERE'") + "WARNING: Could not render jinja template variables in file" + " 'myfile': 'NOTHERE'" + ) self.assertIn(expected_log, self.logs.getvalue()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index e30e89a7..bed73a93 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,14 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. -import os import contextlib import io +import os from collections import namedtuple from cloudinit.cmd import main as cli -from tests.unittests import helpers as test_helpers from cloudinit.util import load_file, load_json - +from tests.unittests import helpers as test_helpers mock = test_helpers.mock @@ -24,7 +23,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def _call_main(self, sysv_args=None): if not sysv_args: - sysv_args = ['cloud-init'] + sysv_args = ["cloud-init"] try: return cli.main(sysv_args=sysv_args) except SystemExit as e: @@ -36,36 +35,37 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): Valid name values are only init and modules. """ tmpd = self.tmp_dir() - data_d = self.tmp_path('data', tmpd) - link_d = self.tmp_path('link', tmpd) - FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + data_d = self.tmp_path("data", tmpd) + link_d = self.tmp_path("link", tmpd) + FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) def myaction(): - raise Exception('Should not call myaction') + raise Exception("Should not call myaction") - myargs = FakeArgs(('doesnotmatter', myaction), False, 'bogusmode') + myargs = FakeArgs(("doesnotmatter", myaction), False, "bogusmode") with self.assertRaises(ValueError) as cm: - cli.status_wrapper('init1', myargs, data_d, link_d) - self.assertEqual('unknown name: init1', str(cm.exception)) - self.assertNotIn('Should not call myaction', self.logs.getvalue()) + cli.status_wrapper("init1", myargs, data_d, link_d) + self.assertEqual("unknown name: init1", str(cm.exception)) + self.assertNotIn("Should not call myaction", self.logs.getvalue()) def test_status_wrapper_errors_on_invalid_modes(self): """status_wrapper will error if a parameter combination is invalid.""" tmpd = self.tmp_dir() - data_d = self.tmp_path('data', tmpd) - link_d = self.tmp_path('link', tmpd) - FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + data_d = self.tmp_path("data", tmpd) + link_d = self.tmp_path("link", tmpd) + FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) def myaction(): - raise Exception('Should not call myaction') + raise Exception("Should not call myaction") - myargs = FakeArgs(('modules_name', myaction), False, 'bogusmode') + myargs = FakeArgs(("modules_name", myaction), False, "bogusmode") with self.assertRaises(ValueError) as cm: - cli.status_wrapper('modules', myargs, data_d, link_d) + cli.status_wrapper("modules", myargs, data_d, link_d) self.assertEqual( "Invalid cloud init mode specified 'modules-bogusmode'", - str(cm.exception)) - self.assertNotIn('Should not call myaction', self.logs.getvalue()) + str(cm.exception), + ) + self.assertNotIn("Should not call myaction", self.logs.getvalue()) def test_status_wrapper_init_local_writes_fresh_status_info(self): """When running in init-local mode, status_wrapper writes status.json. @@ -73,78 +73,90 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): Old status and results artifacts are also removed. """ tmpd = self.tmp_dir() - data_d = self.tmp_path('data', tmpd) - link_d = self.tmp_path('link', tmpd) - status_link = self.tmp_path('status.json', link_d) + data_d = self.tmp_path("data", tmpd) + link_d = self.tmp_path("link", tmpd) + status_link = self.tmp_path("status.json", link_d) # Write old artifacts which will be removed or updated. for _dir in data_d, link_d: test_helpers.populate_dir( - _dir, {'status.json': 'old', 'result.json': 'old'}) + _dir, {"status.json": "old", "result.json": "old"} + ) - FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) + FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) def myaction(name, args): # Return an error to watch status capture them - return 'SomeDatasource', ['an error'] + return "SomeDatasource", ["an error"] - myargs = FakeArgs(('ignored_name', myaction), True, 'bogusmode') - cli.status_wrapper('init', myargs, data_d, link_d) + myargs = FakeArgs(("ignored_name", myaction), True, "bogusmode") + cli.status_wrapper("init", myargs, data_d, link_d) # No errors reported in status - status_v1 = load_json(load_file(status_link))['v1'] - self.assertEqual(['an error'], status_v1['init-local']['errors']) - self.assertEqual('SomeDatasource', status_v1['datasource']) + status_v1 = load_json(load_file(status_link))["v1"] + self.assertEqual(["an error"], status_v1["init-local"]["errors"]) + self.assertEqual("SomeDatasource", status_v1["datasource"]) self.assertFalse( - os.path.exists(self.tmp_path('result.json', data_d)), - 'unexpected result.json found') + os.path.exists(self.tmp_path("result.json", data_d)), + "unexpected result.json found", + ) self.assertFalse( - os.path.exists(self.tmp_path('result.json', link_d)), - 'unexpected result.json link found') + os.path.exists(self.tmp_path("result.json", link_d)), + "unexpected result.json link found", + ) def test_no_arguments_shows_usage(self): exit_code = self._call_main() - self.assertIn('usage: cloud-init', self.stderr.getvalue()) + self.assertIn("usage: cloud-init", self.stderr.getvalue()) self.assertEqual(2, exit_code) def test_no_arguments_shows_error_message(self): exit_code = self._call_main() missing_subcommand_message = [ - 'too few arguments', # python2.7 msg - 'the following arguments are required: subcommand' # python3 msg + "too few arguments", # python2.7 msg + "the following arguments are required: subcommand", # python3 msg ] error = self.stderr.getvalue() - matches = ([msg in error for msg in missing_subcommand_message]) + matches = [msg in error for msg in missing_subcommand_message] self.assertTrue( - any(matches), 'Did not find error message for missing subcommand') + any(matches), "Did not find error message for missing subcommand" + ) self.assertEqual(2, exit_code) def test_all_subcommands_represented_in_help(self): """All known subparsers are represented in the cloud-int help doc.""" self._call_main() error = self.stderr.getvalue() - expected_subcommands = ['analyze', 'clean', 'devel', 'dhclient-hook', - 'features', 'init', 'modules', 'single'] + expected_subcommands = [ + "analyze", + "clean", + "devel", + "dhclient-hook", + "features", + "init", + "modules", + "single", + ] for subcommand in expected_subcommands: self.assertIn(subcommand, error) - @mock.patch('cloudinit.cmd.main.status_wrapper') + @mock.patch("cloudinit.cmd.main.status_wrapper") def test_init_subcommand_parser(self, m_status_wrapper): """The subcommand 'init' calls status_wrapper passing init.""" - self._call_main(['cloud-init', 'init']) + self._call_main(["cloud-init", "init"]) (name, parseargs) = m_status_wrapper.call_args_list[0][0] - self.assertEqual('init', name) - self.assertEqual('init', parseargs.subcommand) - self.assertEqual('init', parseargs.action[0]) - self.assertEqual('main_init', parseargs.action[1].__name__) + self.assertEqual("init", name) + self.assertEqual("init", parseargs.subcommand) + self.assertEqual("init", parseargs.action[0]) + self.assertEqual("main_init", parseargs.action[1].__name__) - @mock.patch('cloudinit.cmd.main.status_wrapper') + @mock.patch("cloudinit.cmd.main.status_wrapper") def test_modules_subcommand_parser(self, m_status_wrapper): """The subcommand 'modules' calls status_wrapper passing modules.""" - self._call_main(['cloud-init', 'modules']) + self._call_main(["cloud-init", "modules"]) (name, parseargs) = m_status_wrapper.call_args_list[0][0] - self.assertEqual('modules', name) - self.assertEqual('modules', parseargs.subcommand) - self.assertEqual('modules', parseargs.action[0]) - self.assertEqual('main_modules', parseargs.action[1].__name__) + self.assertEqual("modules", name) + self.assertEqual("modules", parseargs.subcommand) + self.assertEqual("modules", parseargs.action[0]) + self.assertEqual("main_modules", parseargs.action[1].__name__) def test_conditional_subcommands_from_entry_point_sys_argv(self): """Subcommands from entry-point are properly parsed from sys.argv.""" @@ -152,14 +164,22 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self.patchStdoutAndStderr(stdout=stdout) expected_errors = [ - 'usage: cloud-init analyze', 'usage: cloud-init clean', - 'usage: cloud-init collect-logs', 'usage: cloud-init devel', - 'usage: cloud-init status'] + "usage: cloud-init analyze", + "usage: cloud-init clean", + "usage: cloud-init collect-logs", + "usage: cloud-init devel", + "usage: cloud-init status", + ] conditional_subcommands = [ - 'analyze', 'clean', 'collect-logs', 'devel', 'status'] + "analyze", + "clean", + "collect-logs", + "devel", + "status", + ] # The cloud-init entrypoint calls main without passing sys_argv for subcommand in conditional_subcommands: - with mock.patch('sys.argv', ['cloud-init', subcommand, '-h']): + with mock.patch("sys.argv", ["cloud-init", subcommand, "-h"]): try: cli.main() except SystemExit as e: @@ -169,9 +189,9 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_analyze_subcommand_parser(self): """The subcommand cloud-init analyze calls the correct subparser.""" - self._call_main(['cloud-init', 'analyze']) + self._call_main(["cloud-init", "analyze"]) # These subcommands only valid for cloud-init analyze script - expected_subcommands = ['blame', 'show', 'dump'] + expected_subcommands = ["blame", "show", "dump"] error = self.stderr.getvalue() for subcommand in expected_subcommands: self.assertIn(subcommand, error) @@ -181,43 +201,44 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): # Provide -h param to collect-logs to avoid having to mock behavior. stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) - self._call_main(['cloud-init', 'collect-logs', '-h']) - self.assertIn('usage: cloud-init collect-log', stdout.getvalue()) + self._call_main(["cloud-init", "collect-logs", "-h"]) + self.assertIn("usage: cloud-init collect-log", stdout.getvalue()) def test_clean_subcommand_parser(self): """The subcommand cloud-init clean calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) - self._call_main(['cloud-init', 'clean', '-h']) - self.assertIn('usage: cloud-init clean', stdout.getvalue()) + self._call_main(["cloud-init", "clean", "-h"]) + self.assertIn("usage: cloud-init clean", stdout.getvalue()) def test_status_subcommand_parser(self): """The subcommand cloud-init status calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) - self._call_main(['cloud-init', 'status', '-h']) - self.assertIn('usage: cloud-init status', stdout.getvalue()) + self._call_main(["cloud-init", "status", "-h"]) + self.assertIn("usage: cloud-init status", stdout.getvalue()) def test_devel_subcommand_parser(self): """The subcommand cloud-init devel calls the correct subparser.""" - self._call_main(['cloud-init', 'devel']) + self._call_main(["cloud-init", "devel"]) # These subcommands only valid for cloud-init schema script - expected_subcommands = ['schema'] + expected_subcommands = ["schema"] error = self.stderr.getvalue() for subcommand in expected_subcommands: self.assertIn(subcommand, error) def test_wb_devel_schema_subcommand_parser(self): """The subcommand cloud-init schema calls the correct subparser.""" - exit_code = self._call_main(['cloud-init', 'devel', 'schema']) + exit_code = self._call_main(["cloud-init", "devel", "schema"]) self.assertEqual(1, exit_code) # Known whitebox output from schema subcommand self.assertEqual( - 'Error:\n' - 'Expected one of --config-file, --system or --docs arguments\n', - self.stderr.getvalue()) + "Error:\n" + "Expected one of --config-file, --system or --docs arguments\n", + self.stderr.getvalue(), + ) def test_wb_devel_schema_subcommand_doc_all_spot_check(self): """Validate that doc content has correct values from known examples. @@ -234,12 +255,10 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self._call_main(["cloud-init", "devel", "schema", "--docs", "all"]) expected_doc_sections = [ "**Supported distros:** all", - ( - "**Supported distros:** almalinux, alpine, centos, " - "cloudlinux, debian, eurolinux, fedora, miraclelinux, " - "openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, " - "virtuozzo" - ), + "**Supported distros:** almalinux, alpine, centos, " + "cloudlinux, debian, eurolinux, fedora, miraclelinux, " + "openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, " + "virtuozzo", "**Config schema**:\n **resize_rootfs:** " "(true/false/noblock)", "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n", @@ -316,42 +335,43 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): for expected in expected_doc_sections: self.assertIn(expected, stderr) - @mock.patch('cloudinit.cmd.main.main_single') + @mock.patch("cloudinit.cmd.main.main_single") def test_single_subcommand(self, m_main_single): """The subcommand 'single' calls main_single with valid args.""" - self._call_main(['cloud-init', 'single', '--name', 'cc_ntp']) + self._call_main(["cloud-init", "single", "--name", "cc_ntp"]) (name, parseargs) = m_main_single.call_args_list[0][0] - self.assertEqual('single', name) - self.assertEqual('single', parseargs.subcommand) - self.assertEqual('single', parseargs.action[0]) + self.assertEqual("single", name) + self.assertEqual("single", parseargs.subcommand) + self.assertEqual("single", parseargs.action[0]) self.assertFalse(parseargs.debug) self.assertFalse(parseargs.force) self.assertIsNone(parseargs.frequency) - self.assertEqual('cc_ntp', parseargs.name) + self.assertEqual("cc_ntp", parseargs.name) self.assertFalse(parseargs.report) - @mock.patch('cloudinit.cmd.main.dhclient_hook.handle_args') + @mock.patch("cloudinit.cmd.main.dhclient_hook.handle_args") def test_dhclient_hook_subcommand(self, m_handle_args): """The subcommand 'dhclient-hook' calls dhclient_hook with args.""" - self._call_main(['cloud-init', 'dhclient-hook', 'up', 'eth0']) + self._call_main(["cloud-init", "dhclient-hook", "up", "eth0"]) (name, parseargs) = m_handle_args.call_args_list[0][0] - self.assertEqual('dhclient-hook', name) - self.assertEqual('dhclient-hook', parseargs.subcommand) - self.assertEqual('dhclient-hook', parseargs.action[0]) + self.assertEqual("dhclient-hook", name) + self.assertEqual("dhclient-hook", parseargs.subcommand) + self.assertEqual("dhclient-hook", parseargs.action[0]) self.assertFalse(parseargs.debug) self.assertFalse(parseargs.force) - self.assertEqual('up', parseargs.event) - self.assertEqual('eth0', parseargs.interface) + self.assertEqual("up", parseargs.event) + self.assertEqual("eth0", parseargs.interface) - @mock.patch('cloudinit.cmd.main.main_features') + @mock.patch("cloudinit.cmd.main.main_features") def test_features_hook_subcommand(self, m_features): """The subcommand 'features' calls main_features with args.""" - self._call_main(['cloud-init', 'features']) + self._call_main(["cloud-init", "features"]) (name, parseargs) = m_features.call_args_list[0][0] - self.assertEqual('features', name) - self.assertEqual('features', parseargs.subcommand) - self.assertEqual('features', parseargs.action[0]) + self.assertEqual("features", name) + self.assertEqual("features", parseargs.subcommand) + self.assertEqual("features", parseargs.action[0]) self.assertFalse(parseargs.debug) self.assertFalse(parseargs.force) + # : ts=4 expandtab diff --git a/tests/unittests/test_conftest.py b/tests/unittests/test_conftest.py index 2e02b7a7..68903430 100644 --- a/tests/unittests/test_conftest.py +++ b/tests/unittests/test_conftest.py @@ -19,7 +19,7 @@ class TestDisableSubpUsage: @pytest.mark.allow_all_subp def test_subp_usage_can_be_reenabled(self): - subp.subp(['whoami']) + subp.subp(["whoami"]) @pytest.mark.allow_subp_for("whoami") def test_subp_usage_can_be_conditionally_reenabled(self): @@ -28,15 +28,15 @@ class TestDisableSubpUsage: with pytest.raises(AssertionError) as excinfo: subp.subp(["some", "args"]) assert "allowed: whoami" in str(excinfo.value) - subp.subp(['whoami']) + subp.subp(["whoami"]) @pytest.mark.allow_subp_for("whoami", "bash") def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self): with pytest.raises(AssertionError) as excinfo: subp.subp(["some", "args"]) assert "allowed: whoami,bash" in str(excinfo.value) - subp.subp(['bash', '-c', 'true']) - subp.subp(['whoami']) + subp.subp(["bash", "-c", "true"]) + subp.subp(["whoami"]) @pytest.mark.allow_all_subp @pytest.mark.allow_subp_for("bash") @@ -60,6 +60,6 @@ class TestDisableSubpUsageInTestSubclass(CiTestCase): _old_allowed_subp = self.allow_subp self.allowed_subp = True try: - subp.subp(['bash', '-c', 'true']) + subp.subp(["bash", "-c", "true"]) finally: self.allowed_subp = _old_allowed_subp diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index be9da40c..109e0208 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -1,9 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -from tests.unittests import helpers as test_helpers - from cloudinit.cs_utils import Cepko - +from tests.unittests import helpers as test_helpers SERVER_CONTEXT = { "cpu": 1000, @@ -16,7 +14,7 @@ SERVER_CONTEXT = { "smp": 1, "tags": ["much server", "very performance"], "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889", - "vnc_password": "9e84d6cb49e46379" + "vnc_password": "9e84d6cb49e46379", } @@ -25,7 +23,7 @@ class CepkoMock(Cepko): return SERVER_CONTEXT def get(self, key="", request_pattern=None): - return SERVER_CONTEXT['tags'] + return SERVER_CONTEXT["tags"] # 2015-01-22 BAW: This test is completely useless because it only ever tests @@ -34,33 +32,36 @@ class CepkoMock(Cepko): class CepkoResultTests(test_helpers.TestCase): def setUp(self): self.c = Cepko() - raise test_helpers.SkipTest('This test is completely useless') + raise test_helpers.SkipTest("This test is completely useless") def test_getitem(self): result = self.c.all() - self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result['uuid']) - self.assertEqual([], result['requirements']) - self.assertEqual("much server", result['tags'][0]) - self.assertEqual(1, result['smp']) + self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result["uuid"]) + self.assertEqual([], result["requirements"]) + self.assertEqual("much server", result["tags"][0]) + self.assertEqual(1, result["smp"]) def test_len(self): self.assertEqual(len(SERVER_CONTEXT), len(self.c.all())) def test_contains(self): result = self.c.all() - self.assertTrue('uuid' in result) - self.assertFalse('uid' in result) - self.assertTrue('meta' in result) - self.assertFalse('ssh_public_key' in result) + self.assertTrue("uuid" in result) + self.assertFalse("uid" in result) + self.assertTrue("meta" in result) + self.assertFalse("ssh_public_key" in result) def test_iter(self): - self.assertEqual(sorted(SERVER_CONTEXT.keys()), - sorted([key for key in self.c.all()])) + self.assertEqual( + sorted(SERVER_CONTEXT.keys()), + sorted([key for key in self.c.all()]), + ) def test_with_list_as_result(self): - result = self.c.get('tags') - self.assertEqual('much server', result[0]) - self.assertTrue('very performance' in result) + result = self.c.get("tags") + self.assertEqual("much server", result[0]) + self.assertTrue("very performance" in result) self.assertEqual(2, len(result)) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 2ee09bbb..a5018a42 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -5,37 +5,30 @@ import gzip import logging import os -from io import BytesIO, StringIO -from unittest import mock - from email import encoders from email.mime.application import MIMEApplication from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart +from io import BytesIO, StringIO +from unittest import mock import httpretty from cloudinit import handlers from cloudinit import helpers as c_helpers -from cloudinit import log -from cloudinit.settings import (PER_INSTANCE) -from cloudinit import sources -from cloudinit import stages +from cloudinit import log, safeyaml, sources, stages from cloudinit import user_data as ud -from cloudinit import safeyaml from cloudinit import util - +from cloudinit.settings import PER_INSTANCE from tests.unittests import helpers - INSTANCE_ID = "i-testing" class FakeDataSource(sources.DataSource): - def __init__(self, userdata=None, vendordata=None, vendordata2=None): sources.DataSource.__init__(self, {}, None, None) - self.metadata = {'instance-id': INSTANCE_ID} + self.metadata = {"instance-id": INSTANCE_ID} self.userdata_raw = userdata self.vendordata_raw = vendordata self.vendordata2_raw = vendordata2 @@ -52,7 +45,7 @@ def count_messages(root): def gzip_text(text): contents = BytesIO() - f = gzip.GzipFile(fileobj=contents, mode='wb') + f = gzip.GzipFile(fileobj=contents, mode="wb") f.write(util.encode_text(text)) f.flush() f.close() @@ -62,7 +55,6 @@ def gzip_text(text): # FIXME: these tests shouldn't be checking log output?? # Weirddddd... class TestConsumeUserData(helpers.FilesystemMockingTestCase): - def setUp(self): super(TestConsumeUserData, self).setUp() self._log = None @@ -87,13 +79,13 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): return log_file def test_simple_jsonp(self): - blob = ''' + blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "qux" }, { "op": "add", "path": "/bar", "value": "qux2" } ] -''' +""" ci = stages.Init() ci.datasource = FakeDataSource(blob) @@ -103,20 +95,20 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) cc = util.load_yaml(cc_contents) self.assertEqual(2, len(cc)) - self.assertEqual('qux', cc['baz']) - self.assertEqual('qux2', cc['bar']) + self.assertEqual("qux", cc["baz"]) + self.assertEqual("qux2", cc["bar"]) def test_simple_jsonp_vendor_and_vendor2_and_user(self): # test that user-data wins over vendor - user_blob = ''' + user_blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "qux" }, { "op": "add", "path": "/bar", "value": "qux2" }, { "op": "add", "path": "/foobar", "value": "qux3" } ] -''' - vendor_blob = ''' +""" + vendor_blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "quxA" }, @@ -124,61 +116,63 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): { "op": "add", "path": "/foo", "value": "quxC" }, { "op": "add", "path": "/corge", "value": "quxEE" } ] -''' - vendor2_blob = ''' +""" + vendor2_blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/corge", "value": "quxD" }, { "op": "add", "path": "/grault", "value": "quxFF" }, { "op": "add", "path": "/foobar", "value": "quxGG" } ] -''' +""" self.reRoot() initer = stages.Init() - initer.datasource = FakeDataSource(user_blob, - vendordata=vendor_blob, - vendordata2=vendor2_blob) + initer.datasource = FakeDataSource( + user_blob, vendordata=vendor_blob, vendordata2=vendor2_blob + ) initer.read_cfg() initer.initialize() initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (_which_ran, _failures) = mods.run_section('cloud_init_modules') + (_which_ran, _failures) = mods.run_section("cloud_init_modules") cfg = mods.cfg - self.assertIn('vendor_data', cfg) - self.assertIn('vendor_data2', cfg) + self.assertIn("vendor_data", cfg) + self.assertIn("vendor_data2", cfg) # Confirm that vendordata2 overrides vendordata, and that # userdata overrides both - self.assertEqual('qux', cfg['baz']) - self.assertEqual('qux2', cfg['bar']) - self.assertEqual('qux3', cfg['foobar']) - self.assertEqual('quxC', cfg['foo']) - self.assertEqual('quxD', cfg['corge']) - self.assertEqual('quxFF', cfg['grault']) + self.assertEqual("qux", cfg["baz"]) + self.assertEqual("qux2", cfg["bar"]) + self.assertEqual("qux3", cfg["foobar"]) + self.assertEqual("quxC", cfg["foo"]) + self.assertEqual("quxD", cfg["corge"]) + self.assertEqual("quxFF", cfg["grault"]) def test_simple_jsonp_no_vendor_consumed(self): # make sure that vendor data is not consumed - user_blob = ''' + user_blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "qux" }, { "op": "add", "path": "/bar", "value": "qux2" }, { "op": "add", "path": "/vendor_data", "value": {"enabled": "false"}} ] -''' - vendor_blob = ''' +""" + vendor_blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "quxA" }, { "op": "add", "path": "/bar", "value": "quxB" }, { "op": "add", "path": "/foo", "value": "quxC" } ] -''' +""" self.reRoot() initer = stages.Init() initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) @@ -187,35 +181,37 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (_which_ran, _failures) = mods.run_section('cloud_init_modules') + (_which_ran, _failures) = mods.run_section("cloud_init_modules") cfg = mods.cfg - self.assertEqual('qux', cfg['baz']) - self.assertEqual('qux2', cfg['bar']) - self.assertNotIn('foo', cfg) + self.assertEqual("qux", cfg["baz"]) + self.assertEqual("qux2", cfg["bar"]) + self.assertNotIn("foo", cfg) def test_mixed_cloud_config(self): - blob_cc = ''' + blob_cc = """ #cloud-config a: b c: d -''' +""" message_cc = MIMEBase("text", "cloud-config") message_cc.set_payload(blob_cc) - blob_jp = ''' + blob_jp = """ #cloud-config-jsonp [ { "op": "replace", "path": "/a", "value": "c" }, { "op": "remove", "path": "/c" } ] -''' +""" - message_jp = MIMEBase('text', "cloud-config-jsonp") + message_jp = MIMEBase("text", "cloud-config-jsonp") message_jp.set_payload(blob_jp) message = MIMEMultipart() @@ -230,26 +226,26 @@ c: d cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) cc = util.load_yaml(cc_contents) self.assertEqual(1, len(cc)) - self.assertEqual('c', cc['a']) + self.assertEqual("c", cc["a"]) def test_cloud_config_as_x_shell_script(self): - blob_cc = ''' + blob_cc = """ #cloud-config a: b c: d -''' +""" message_cc = MIMEBase("text", "x-shellscript") message_cc.set_payload(blob_cc) - blob_jp = ''' + blob_jp = """ #cloud-config-jsonp [ { "op": "replace", "path": "/a", "value": "c" }, { "op": "remove", "path": "/c" } ] -''' +""" - message_jp = MIMEBase('text', "cloud-config-jsonp") + message_jp = MIMEBase("text", "cloud-config-jsonp") message_jp.set_payload(blob_jp) message = MIMEMultipart() @@ -264,19 +260,19 @@ c: d cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) cc = util.load_yaml(cc_contents) self.assertEqual(1, len(cc)) - self.assertEqual('c', cc['a']) + self.assertEqual("c", cc["a"]) def test_vendor_user_yaml_cloud_config(self): - vendor_blob = ''' + vendor_blob = """ #cloud-config a: b name: vendor run: - x - y -''' +""" - user_blob = ''' + user_blob = """ #cloud-config a: c vendor_data: @@ -285,7 +281,7 @@ vendor_data: name: user run: - z -''' +""" self.reRoot() initer = stages.Init() initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) @@ -294,114 +290,122 @@ run: initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (_which_ran, _failures) = mods.run_section('cloud_init_modules') + (_which_ran, _failures) = mods.run_section("cloud_init_modules") cfg = mods.cfg - self.assertIn('vendor_data', cfg) - self.assertEqual('c', cfg['a']) - self.assertEqual('user', cfg['name']) - self.assertNotIn('x', cfg['run']) - self.assertNotIn('y', cfg['run']) - self.assertIn('z', cfg['run']) + self.assertIn("vendor_data", cfg) + self.assertEqual("c", cfg["a"]) + self.assertEqual("user", cfg["name"]) + self.assertNotIn("x", cfg["run"]) + self.assertNotIn("y", cfg["run"]) + self.assertIn("z", cfg["run"]) def test_vendordata_script(self): - vendor_blob = ''' + vendor_blob = """ #!/bin/bash echo "test" -''' - vendor2_blob = ''' +""" + vendor2_blob = """ #!/bin/bash echo "dynamic test" -''' +""" - user_blob = ''' + user_blob = """ #cloud-config vendor_data: enabled: True prefix: /bin/true -''' +""" new_root = self.reRoot() initer = stages.Init() - initer.datasource = FakeDataSource(user_blob, - vendordata=vendor_blob, - vendordata2=vendor2_blob) + initer.datasource = FakeDataSource( + user_blob, vendordata=vendor_blob, vendordata2=vendor2_blob + ) initer.read_cfg() initer.initialize() initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (_which_ran, _failures) = mods.run_section('cloud_init_modules') - vendor_script = initer.paths.get_ipath_cur('vendor_scripts') + (_which_ran, _failures) = mods.run_section("cloud_init_modules") + vendor_script = initer.paths.get_ipath_cur("vendor_scripts") vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script) self.assertTrue(os.path.exists(vendor_script_fns)) def test_merging_cloud_config(self): - blob = ''' + blob = """ #cloud-config a: b e: f run: - b - c -''' +""" message1 = MIMEBase("text", "cloud-config") message1.set_payload(blob) - blob2 = ''' + blob2 = """ #cloud-config a: e e: g run: - stuff - morestuff -''' +""" message2 = MIMEBase("text", "cloud-config") - message2['X-Merge-Type'] = ('dict(recurse_array,' - 'recurse_str)+list(append)+str(append)') + message2[ + "X-Merge-Type" + ] = "dict(recurse_array,recurse_str)+list(append)+str(append)" message2.set_payload(blob2) - blob3 = ''' + blob3 = """ #cloud-config e: - 1 - 2 - 3 p: 1 -''' +""" message3 = MIMEBase("text", "cloud-config") message3.set_payload(blob3) messages = [message1, message2, message3] - paths = c_helpers.Paths({}, ds=FakeDataSource('')) + paths = c_helpers.Paths({}, ds=FakeDataSource("")) cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths) self.reRoot() - cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None, - None) + cloud_cfg.handle_part( + None, handlers.CONTENT_START, None, None, None, None + ) for i, m in enumerate(messages): headers = dict(m) fn = "part-%s" % (i + 1) payload = m.get_payload(decode=True) - cloud_cfg.handle_part(None, headers['Content-Type'], - fn, payload, None, headers) - cloud_cfg.handle_part(None, handlers.CONTENT_END, None, None, None, - None) - contents = util.load_file(paths.get_ipath('cloud_config')) + cloud_cfg.handle_part( + None, headers["Content-Type"], fn, payload, None, headers + ) + cloud_cfg.handle_part( + None, handlers.CONTENT_END, None, None, None, None + ) + contents = util.load_file(paths.get_ipath("cloud_config")) contents = util.load_yaml(contents) - self.assertEqual(contents['run'], ['b', 'c', 'stuff', 'morestuff']) - self.assertEqual(contents['a'], 'be') - self.assertEqual(contents['e'], [1, 2, 3]) - self.assertEqual(contents['p'], 1) + self.assertEqual(contents["run"], ["b", "c", "stuff", "morestuff"]) + self.assertEqual(contents["a"], "be") + self.assertEqual(contents["e"], [1, 2, 3]) + self.assertEqual(contents["p"], 1) def test_unhandled_type_warning(self): """Raw text without magic is ignored but shows warning.""" @@ -410,35 +414,37 @@ p: 1 data = "arbitrary text\n" ci.datasource = FakeDataSource(data) - with mock.patch('cloudinit.util.write_file') as mockobj: + with mock.patch("cloudinit.util.write_file") as mockobj: log_file = self.capture_log(logging.WARNING) ci.fetch() ci.consume_data() self.assertIn( "Unhandled non-multipart (text/x-not-multipart) userdata:", - log_file.getvalue()) + log_file.getvalue(), + ) mockobj.assert_called_once_with( - ci.paths.get_ipath("cloud_config"), "", 0o600) + ci.paths.get_ipath("cloud_config"), "", 0o600 + ) def test_mime_gzip_compressed(self): """Tests that individual message gzip encoding works.""" def gzip_part(text): - return MIMEApplication(gzip_text(text), 'gzip') + return MIMEApplication(gzip_text(text), "gzip") - base_content1 = ''' + base_content1 = """ #cloud-config a: 2 -''' +""" - base_content2 = ''' + base_content2 = """ #cloud-config b: 3 c: 4 -''' +""" - message = MIMEMultipart('test') + message = MIMEMultipart("test") message.attach(gzip_part(base_content1)) message.attach(gzip_part(base_content2)) ci = stages.Init() @@ -450,9 +456,9 @@ c: 4 contents = util.load_yaml(contents) self.assertTrue(isinstance(contents, dict)) self.assertEqual(3, len(contents)) - self.assertEqual(2, contents['a']) - self.assertEqual(3, contents['b']) - self.assertEqual(4, contents['c']) + self.assertEqual(2, contents["a"]) + self.assertEqual(3, contents["b"]) + self.assertEqual(4, contents["c"]) def test_mime_text_plain(self): """Mime message of type text/plain is ignored but shows warning.""" @@ -462,15 +468,17 @@ c: 4 message.set_payload("Just text") ci.datasource = FakeDataSource(message.as_string().encode()) - with mock.patch('cloudinit.util.write_file') as mockobj: + with mock.patch("cloudinit.util.write_file") as mockobj: log_file = self.capture_log(logging.WARNING) ci.fetch() ci.consume_data() self.assertIn( "Unhandled unknown content-type (text/plain)", - log_file.getvalue()) + log_file.getvalue(), + ) mockobj.assert_called_once_with( - ci.paths.get_ipath("cloud_config"), "", 0o600) + ci.paths.get_ipath("cloud_config"), "", 0o600 + ) def test_shellscript(self): """Raw text starting #!/bin/sh is treated as script.""" @@ -481,15 +489,18 @@ c: 4 outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - with mock.patch('cloudinit.util.write_file') as mockobj: + with mock.patch("cloudinit.util.write_file") as mockobj: log_file = self.capture_log(logging.WARNING) ci.fetch() ci.consume_data() self.assertEqual("", log_file.getvalue()) - mockobj.assert_has_calls([ - mock.call(outpath, script, 0o700), - mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)]) + mockobj.assert_has_calls( + [ + mock.call(outpath, script, 0o700), + mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600), + ] + ) def test_mime_text_x_shellscript(self): """Mime message of type text/x-shellscript is treated as script.""" @@ -502,15 +513,18 @@ c: 4 outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - with mock.patch('cloudinit.util.write_file') as mockobj: + with mock.patch("cloudinit.util.write_file") as mockobj: log_file = self.capture_log(logging.WARNING) ci.fetch() ci.consume_data() self.assertEqual("", log_file.getvalue()) - mockobj.assert_has_calls([ - mock.call(outpath, script, 0o700), - mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)]) + mockobj.assert_has_calls( + [ + mock.call(outpath, script, 0o700), + mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600), + ] + ) def test_mime_text_plain_shell(self): """Mime type text/plain starting #!/bin/sh is treated as script.""" @@ -523,41 +537,48 @@ c: 4 outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001") - with mock.patch('cloudinit.util.write_file') as mockobj: + with mock.patch("cloudinit.util.write_file") as mockobj: log_file = self.capture_log(logging.WARNING) ci.fetch() ci.consume_data() self.assertEqual("", log_file.getvalue()) - mockobj.assert_has_calls([ - mock.call(outpath, script, 0o700), - mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600)]) + mockobj.assert_has_calls( + [ + mock.call(outpath, script, 0o700), + mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600), + ] + ) def test_mime_application_octet_stream(self): """Mime type application/octet-stream is ignored but shows warning.""" self.reRoot() ci = stages.Init() message = MIMEBase("application", "octet-stream") - message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc') + message.set_payload(b"\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc") encoders.encode_base64(message) ci.datasource = FakeDataSource(message.as_string().encode()) - with mock.patch('cloudinit.util.write_file') as mockobj: + with mock.patch("cloudinit.util.write_file") as mockobj: log_file = self.capture_log(logging.WARNING) ci.fetch() ci.consume_data() self.assertIn( "Unhandled unknown content-type (application/octet-stream)", - log_file.getvalue()) + log_file.getvalue(), + ) mockobj.assert_called_once_with( - ci.paths.get_ipath("cloud_config"), "", 0o600) + ci.paths.get_ipath("cloud_config"), "", 0o600 + ) def test_cloud_config_archive(self): - non_decodable = b'\x11\xc9\xb4gTH\xee\x12' - data = [{'content': '#cloud-config\npassword: gocubs\n'}, - {'content': '#cloud-config\nlocale: chicago\n'}, - {'content': non_decodable}] - message = b'#cloud-config-archive\n' + safeyaml.dumps(data).encode() + non_decodable = b"\x11\xc9\xb4gTH\xee\x12" + data = [ + {"content": "#cloud-config\npassword: gocubs\n"}, + {"content": "#cloud-config\nlocale: chicago\n"}, + {"content": non_decodable}, + ] + message = b"#cloud-config-archive\n" + safeyaml.dumps(data).encode() self.reRoot() ci = stages.Init() @@ -570,35 +591,35 @@ c: 4 # consuming the user-data provided should write 'cloud_config' file # which will have our yaml in it. - with mock.patch('cloudinit.util.write_file') as mockobj: + with mock.patch("cloudinit.util.write_file") as mockobj: mockobj.side_effect = fsstore ci.fetch() ci.consume_data() cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")]) - self.assertEqual(cfg.get('password'), 'gocubs') - self.assertEqual(cfg.get('locale'), 'chicago') + self.assertEqual(cfg.get("password"), "gocubs") + self.assertEqual(cfg.get("locale"), "chicago") - @mock.patch('cloudinit.util.read_conf_with_confd') + @mock.patch("cloudinit.util.read_conf_with_confd") def test_dont_allow_user_data(self, mock_cfg): mock_cfg.return_value = {"allow_userdata": False} # test that user-data is ignored but vendor-data is kept - user_blob = ''' + user_blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "qux" }, { "op": "add", "path": "/bar", "value": "qux2" } ] -''' - vendor_blob = ''' +""" + vendor_blob = """ #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "quxA" }, { "op": "add", "path": "/bar", "value": "quxB" }, { "op": "add", "path": "/foo", "value": "quxC" } ] -''' +""" self.reRoot() initer = stages.Init() initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) @@ -607,21 +628,22 @@ c: 4 initer.fetch() initer.instancify() initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + initer.cloudify().run( + "consume_data", + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) mods = stages.Modules(initer) - (_which_ran, _failures) = mods.run_section('cloud_init_modules') + (_which_ran, _failures) = mods.run_section("cloud_init_modules") cfg = mods.cfg - self.assertIn('vendor_data', cfg) - self.assertEqual('quxA', cfg['baz']) - self.assertEqual('quxB', cfg['bar']) - self.assertEqual('quxC', cfg['foo']) + self.assertIn("vendor_data", cfg) + self.assertEqual("quxA", cfg["baz"]) + self.assertEqual("quxB", cfg["bar"]) + self.assertEqual("quxC", cfg["foo"]) class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): - def setUp(self): TestConsumeUserData.setUp(self) helpers.HttprettyTestCase.setUp(self) @@ -630,14 +652,14 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): TestConsumeUserData.tearDown(self) helpers.HttprettyTestCase.tearDown(self) - @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch("cloudinit.url_helper.time.sleep") def test_include(self, mock_sleep): """Test #include.""" - included_url = 'http://hostname/path' - included_data = '#cloud-config\nincluded: true\n' + included_url = "http://hostname/path" + included_data = "#cloud-config\nincluded: true\n" httpretty.register_uri(httpretty.GET, included_url, included_data) - blob = '#include\n%s\n' % included_url + blob = "#include\n%s\n" % included_url self.reRoot() ci = stages.Init() @@ -646,20 +668,20 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): ci.consume_data() cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) cc = util.load_yaml(cc_contents) - self.assertTrue(cc.get('included')) + self.assertTrue(cc.get("included")) - @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch("cloudinit.url_helper.time.sleep") def test_include_bad_url(self, mock_sleep): """Test #include with a bad URL.""" - bad_url = 'http://bad/forbidden' - bad_data = '#cloud-config\nbad: true\n' + bad_url = "http://bad/forbidden" + bad_data = "#cloud-config\nbad: true\n" httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403) - included_url = 'http://hostname/path' - included_data = '#cloud-config\nincluded: true\n' + included_url = "http://hostname/path" + included_data = "#cloud-config\nincluded: true\n" httpretty.register_uri(httpretty.GET, included_url, included_data) - blob = '#include\n%s\n%s' % (bad_url, included_url) + blob = "#include\n%s\n%s" % (bad_url, included_url) self.reRoot() ci = stages.Init() @@ -667,26 +689,26 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): ci.fetch() with self.assertRaises(Exception) as context: ci.consume_data() - self.assertIn('403', str(context.exception)) + self.assertIn("403", str(context.exception)) with self.assertRaises(FileNotFoundError): util.load_file(ci.paths.get_ipath("cloud_config")) - @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch("cloudinit.url_helper.time.sleep") @mock.patch( "cloudinit.user_data.features.ERROR_ON_USER_DATA_FAILURE", False ) def test_include_bad_url_no_fail(self, mock_sleep): """Test #include with a bad URL and failure disabled""" - bad_url = 'http://bad/forbidden' - bad_data = '#cloud-config\nbad: true\n' + bad_url = "http://bad/forbidden" + bad_data = "#cloud-config\nbad: true\n" httpretty.register_uri(httpretty.GET, bad_url, bad_data, status=403) - included_url = 'http://hostname/path' - included_data = '#cloud-config\nincluded: true\n' + included_url = "http://hostname/path" + included_data = "#cloud-config\nincluded: true\n" httpretty.register_uri(httpretty.GET, included_url, included_data) - blob = '#include\n%s\n%s' % (bad_url, included_url) + blob = "#include\n%s\n%s" % (bad_url, included_url) self.reRoot() ci = stages.Init() @@ -695,32 +717,33 @@ class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): ci.fetch() ci.consume_data() - self.assertIn("403 Client Error: Forbidden for url: %s" % bad_url, - log_file.getvalue()) + self.assertIn( + "403 Client Error: Forbidden for url: %s" % bad_url, + log_file.getvalue(), + ) cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) cc = util.load_yaml(cc_contents) - self.assertIsNone(cc.get('bad')) - self.assertTrue(cc.get('included')) + self.assertIsNone(cc.get("bad")) + self.assertTrue(cc.get("included")) class TestUDProcess(helpers.ResourceUsingTestCase): - def test_bytes_in_userdata(self): - msg = b'#cloud-config\napt_update: True\n' + msg = b"#cloud-config\napt_update: True\n" ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(msg) self.assertTrue(count_messages(message) == 1) def test_string_in_userdata(self): - msg = '#cloud-config\napt_update: True\n' + msg = "#cloud-config\napt_update: True\n" ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(msg) self.assertTrue(count_messages(message) == 1) def test_compressed_in_userdata(self): - msg = gzip_text('#cloud-config\napt_update: True\n') + msg = gzip_text("#cloud-config\napt_update: True\n") ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(msg) @@ -728,15 +751,14 @@ class TestUDProcess(helpers.ResourceUsingTestCase): class TestConvertString(helpers.TestCase): - def test_handles_binary_non_utf8_decodable(self): """Printable unicode (not utf8-decodable) is safely converted.""" - blob = b'#!/bin/bash\necho \xc3\x84\n' + blob = b"#!/bin/bash\necho \xc3\x84\n" msg = ud.convert_string(blob) self.assertEqual(blob, msg.get_payload(decode=True)) def test_handles_binary_utf8_decodable(self): - blob = b'\x32\x32' + blob = b"\x32\x32" msg = ud.convert_string(blob) self.assertEqual(blob, msg.get_payload(decode=True)) @@ -756,24 +778,31 @@ class TestConvertString(helpers.TestCase): class TestFetchBaseConfig(helpers.TestCase): def test_only_builtin_gets_builtin(self): ret = helpers.wrap_and_call( - 'cloudinit.stages', - {'util.read_conf_with_confd': None, - 'util.read_conf_from_cmdline': None, - 'read_runtime_config': {'return_value': {}}}, - stages.fetch_base_config) + "cloudinit.stages", + { + "util.read_conf_with_confd": None, + "util.read_conf_from_cmdline": None, + "read_runtime_config": {"return_value": {}}, + }, + stages.fetch_base_config, + ) self.assertEqual(util.get_builtin_cfg(), ret) def test_conf_d_overrides_defaults(self): builtin = util.get_builtin_cfg() test_key = sorted(builtin)[0] - test_value = 'test' + test_value = "test" ret = helpers.wrap_and_call( - 'cloudinit.stages', - {'util.read_conf_with_confd': - {'return_value': {test_key: test_value}}, - 'util.read_conf_from_cmdline': None, - 'read_runtime_config': {'return_value': {}}}, - stages.fetch_base_config) + "cloudinit.stages", + { + "util.read_conf_with_confd": { + "return_value": {test_key: test_value} + }, + "util.read_conf_from_cmdline": None, + "read_runtime_config": {"return_value": {}}, + }, + stages.fetch_base_config, + ) self.assertEqual(ret.get(test_key), test_value) builtin[test_key] = test_value self.assertEqual(ret, builtin) @@ -781,47 +810,64 @@ class TestFetchBaseConfig(helpers.TestCase): def test_cmdline_overrides_defaults(self): builtin = util.get_builtin_cfg() test_key = sorted(builtin)[0] - test_value = 'test' + test_value = "test" cmdline = {test_key: test_value} ret = helpers.wrap_and_call( - 'cloudinit.stages', - {'util.read_conf_from_cmdline': {'return_value': cmdline}, - 'util.read_conf_with_confd': None, - 'read_runtime_config': None}, - stages.fetch_base_config) + "cloudinit.stages", + { + "util.read_conf_from_cmdline": {"return_value": cmdline}, + "util.read_conf_with_confd": None, + "read_runtime_config": None, + }, + stages.fetch_base_config, + ) self.assertEqual(ret.get(test_key), test_value) builtin[test_key] = test_value self.assertEqual(ret, builtin) def test_cmdline_overrides_confd_runtime_and_defaults(self): - builtin = {'key1': 'value0', 'key3': 'other2'} - conf_d = {'key1': 'value1', 'key2': 'other1'} - cmdline = {'key3': 'other3', 'key2': 'other2'} - runtime = {'key3': 'runtime3'} + builtin = {"key1": "value0", "key3": "other2"} + conf_d = {"key1": "value1", "key2": "other1"} + cmdline = {"key3": "other3", "key2": "other2"} + runtime = {"key3": "runtime3"} ret = helpers.wrap_and_call( - 'cloudinit.stages', - {'util.read_conf_with_confd': {'return_value': conf_d}, - 'util.get_builtin_cfg': {'return_value': builtin}, - 'read_runtime_config': {'return_value': runtime}, - 'util.read_conf_from_cmdline': {'return_value': cmdline}}, - stages.fetch_base_config) - self.assertEqual(ret, {'key1': 'value1', 'key2': 'other2', - 'key3': 'other3'}) + "cloudinit.stages", + { + "util.read_conf_with_confd": {"return_value": conf_d}, + "util.get_builtin_cfg": {"return_value": builtin}, + "read_runtime_config": {"return_value": runtime}, + "util.read_conf_from_cmdline": {"return_value": cmdline}, + }, + stages.fetch_base_config, + ) + self.assertEqual( + ret, {"key1": "value1", "key2": "other2", "key3": "other3"} + ) def test_order_precedence_is_builtin_system_runtime_cmdline(self): - builtin = {'key1': 'builtin0', 'key3': 'builtin3'} - conf_d = {'key1': 'confd1', 'key2': 'confd2', 'keyconfd1': 'kconfd1'} - runtime = {'key1': 'runtime1', 'key2': 'runtime2'} - cmdline = {'key1': 'cmdline1'} + builtin = {"key1": "builtin0", "key3": "builtin3"} + conf_d = {"key1": "confd1", "key2": "confd2", "keyconfd1": "kconfd1"} + runtime = {"key1": "runtime1", "key2": "runtime2"} + cmdline = {"key1": "cmdline1"} ret = helpers.wrap_and_call( - 'cloudinit.stages', - {'util.read_conf_with_confd': {'return_value': conf_d}, - 'util.get_builtin_cfg': {'return_value': builtin}, - 'util.read_conf_from_cmdline': {'return_value': cmdline}, - 'read_runtime_config': {'return_value': runtime}, - }, - stages.fetch_base_config) - self.assertEqual(ret, {'key1': 'cmdline1', 'key2': 'runtime2', - 'key3': 'builtin3', 'keyconfd1': 'kconfd1'}) + "cloudinit.stages", + { + "util.read_conf_with_confd": {"return_value": conf_d}, + "util.get_builtin_cfg": {"return_value": builtin}, + "util.read_conf_from_cmdline": {"return_value": cmdline}, + "read_runtime_config": {"return_value": runtime}, + }, + stages.fetch_base_config, + ) + self.assertEqual( + ret, + { + "key1": "cmdline1", + "key2": "runtime2", + "key3": "builtin3", + "keyconfd1": "kconfd1", + }, + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_dhclient_hook.py b/tests/unittests/test_dhclient_hook.py index 14549111..7e5b54c0 100644 --- a/tests/unittests/test_dhclient_hook.py +++ b/tests/unittests/test_dhclient_hook.py @@ -2,47 +2,48 @@ """Tests for cloudinit.dhclient_hook.""" -from cloudinit import dhclient_hook as dhc -from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir - import argparse import json import os from unittest import mock +from cloudinit import dhclient_hook as dhc +from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir + class TestDhclientHook(CiTestCase): ex_env = { - 'interface': 'eth0', - 'new_dhcp_lease_time': '3600', - 'new_host_name': 'x1', - 'new_ip_address': '10.145.210.163', - 'new_subnet_mask': '255.255.255.0', - 'old_host_name': 'x1', - 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', - 'pid': '614', - 'reason': 'BOUND', + "interface": "eth0", + "new_dhcp_lease_time": "3600", + "new_host_name": "x1", + "new_ip_address": "10.145.210.163", + "new_subnet_mask": "255.255.255.0", + "old_host_name": "x1", + "PATH": "/usr/sbin:/usr/bin:/sbin:/bin", + "pid": "614", + "reason": "BOUND", } # some older versions of dhclient put the same content, # but in upper case with DHCP4_ instead of new_ ex_env_dhcp4 = { - 'REASON': 'BOUND', - 'DHCP4_dhcp_lease_time': '3600', - 'DHCP4_host_name': 'x1', - 'DHCP4_ip_address': '10.145.210.163', - 'DHCP4_subnet_mask': '255.255.255.0', - 'INTERFACE': 'eth0', - 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', - 'pid': '614', + "REASON": "BOUND", + "DHCP4_dhcp_lease_time": "3600", + "DHCP4_host_name": "x1", + "DHCP4_ip_address": "10.145.210.163", + "DHCP4_subnet_mask": "255.255.255.0", + "INTERFACE": "eth0", + "PATH": "/usr/sbin:/usr/bin:/sbin:/bin", + "pid": "614", } expected = { - 'dhcp_lease_time': '3600', - 'host_name': 'x1', - 'ip_address': '10.145.210.163', - 'subnet_mask': '255.255.255.0'} + "dhcp_lease_time": "3600", + "host_name": "x1", + "ip_address": "10.145.210.163", + "subnet_mask": "255.255.255.0", + } def setUp(self): super(TestDhclientHook, self).setUp() @@ -50,7 +51,7 @@ class TestDhclientHook(CiTestCase): def test_handle_args(self): """quick test of call to handle_args.""" - nic = 'eth0' + nic = "eth0" args = argparse.Namespace(event=dhc.UP, interface=nic) with mock.patch.dict("os.environ", clear=True, values=self.ex_env): dhc.handle_args(dhc.NAME, args, data_d=self.tmp) @@ -61,45 +62,51 @@ class TestDhclientHook(CiTestCase): def test_run_hook_up_creates_dir(self): """If dir does not exist, run_hook should create it.""" subd = self.tmp_path("subdir", self.tmp) - nic = 'eth1' - dhc.run_hook(nic, 'up', data_d=subd, env=self.ex_env) + nic = "eth1" + dhc.run_hook(nic, "up", data_d=subd, env=self.ex_env) self.assertEqual( - set([nic + ".json"]), set(dir2dict(subd + os.path.sep))) + set([nic + ".json"]), set(dir2dict(subd + os.path.sep)) + ) def test_run_hook_up(self): """Test expected use of run_hook_up.""" - nic = 'eth0' - dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env) + nic = "eth0" + dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env) found = dir2dict(self.tmp + os.path.sep) self.assertEqual([nic + ".json"], list(found.keys())) self.assertEqual(self.expected, json.loads(found[nic + ".json"])) def test_run_hook_up_dhcp4_prefix(self): """Test run_hook filters correctly with older DHCP4_ data.""" - nic = 'eth0' - dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env_dhcp4) + nic = "eth0" + dhc.run_hook(nic, "up", data_d=self.tmp, env=self.ex_env_dhcp4) found = dir2dict(self.tmp + os.path.sep) self.assertEqual([nic + ".json"], list(found.keys())) self.assertEqual(self.expected, json.loads(found[nic + ".json"])) def test_run_hook_down_deletes(self): """down should delete the created json file.""" - nic = 'eth1' + nic = "eth1" populate_dir( - self.tmp, {nic + ".json": "{'abcd'}", 'myfile.txt': 'text'}) - dhc.run_hook(nic, 'down', data_d=self.tmp, env={'old_host_name': 'x1'}) + self.tmp, {nic + ".json": "{'abcd'}", "myfile.txt": "text"} + ) + dhc.run_hook(nic, "down", data_d=self.tmp, env={"old_host_name": "x1"}) self.assertEqual( - set(['myfile.txt']), - set(dir2dict(self.tmp + os.path.sep))) + set(["myfile.txt"]), set(dir2dict(self.tmp + os.path.sep)) + ) def test_get_parser(self): """Smoke test creation of get_parser.""" # cloud-init main uses 'action'. - event, interface = (dhc.UP, 'mynic0') + event, interface = (dhc.UP, "mynic0") self.assertEqual( - argparse.Namespace(event=event, interface=interface, - action=(dhc.NAME, dhc.handle_args)), - dhc.get_parser().parse_args([event, interface])) + argparse.Namespace( + event=event, + interface=interface, + action=(dhc.NAME, dhc.handle_args), + ), + dhc.get_parser().parse_args([event, interface]), + ) # vi: ts=4 expandtab diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py index 674e7b98..6c28724a 100644 --- a/tests/unittests/test_dmi.py +++ b/tests/unittests/test_dmi.py @@ -1,16 +1,13 @@ -from tests.unittests import helpers -from cloudinit import dmi -from cloudinit import util -from cloudinit import subp - import os -import tempfile import shutil +import tempfile from unittest import mock +from cloudinit import dmi, subp, util +from tests.unittests import helpers -class TestReadDMIData(helpers.FilesystemMockingTestCase): +class TestReadDMIData(helpers.FilesystemMockingTestCase): def setUp(self): super(TestReadDMIData, self).setUp() self.new_root = tempfile.mkdtemp() @@ -24,7 +21,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): self._m_is_FreeBSD = p.start() def _create_sysfs_parent_directory(self): - util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id')) + util.ensure_dir(os.path.join("sys", "class", "dmi", "id")) def _create_sysfs_file(self, key, content): """Mocks the sys path found on Linux systems.""" @@ -37,92 +34,109 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): In order to test a missing sys path and call outs to dmidecode, this function fakes the results of dmidecode to test the results. """ + def _dmidecode_subp(cmd): if cmd[-1] != key: raise subp.ProcessExecutionError() return (content, error) self.patched_funcs.enter_context( - mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True)) + mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True) + ) self.patched_funcs.enter_context( - mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp)) + mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp) + ) def _configure_kenv_return(self, key, content, error=None): """ In order to test a FreeBSD system call outs to kenv, this function fakes the results of kenv to test the results. """ + def _kenv_subp(cmd): if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].freebsd: raise subp.ProcessExecutionError() return (content, error) self.patched_funcs.enter_context( - mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp)) + mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp) + ) def patch_mapping(self, new_mapping): self.patched_funcs.enter_context( - mock.patch('cloudinit.dmi.DMIDECODE_TO_KERNEL', - new_mapping)) + mock.patch("cloudinit.dmi.DMIDECODE_TO_KERNEL", new_mapping) + ) def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): - self.patch_mapping({'mapped-key': dmi.kdmi('mapped-value', None)}) - expected_dmi_value = 'sys-used-correctly' - self._create_sysfs_file('mapped-value', expected_dmi_value) - self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong') - self.assertEqual(expected_dmi_value, dmi.read_dmi_data('mapped-key')) + self.patch_mapping({"mapped-key": dmi.kdmi("mapped-value", None)}) + expected_dmi_value = "sys-used-correctly" + self._create_sysfs_file("mapped-value", expected_dmi_value) + self._configure_dmidecode_return("mapped-key", "wrong-wrong-wrong") + self.assertEqual(expected_dmi_value, dmi.read_dmi_data("mapped-key")) def test_dmidecode_used_if_no_sysfs_file_on_disk(self): self.patch_mapping({}) self._create_sysfs_parent_directory() - expected_dmi_value = 'dmidecode-used' - self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) + expected_dmi_value = "dmidecode-used" + self._configure_dmidecode_return("use-dmidecode", expected_dmi_value) with mock.patch("cloudinit.util.os.uname") as m_uname: - m_uname.return_value = ('x-sysname', 'x-nodename', - 'x-release', 'x-version', 'x86_64') - self.assertEqual(expected_dmi_value, - dmi.read_dmi_data('use-dmidecode')) + m_uname.return_value = ( + "x-sysname", + "x-nodename", + "x-release", + "x-version", + "x86_64", + ) + self.assertEqual( + expected_dmi_value, dmi.read_dmi_data("use-dmidecode") + ) def test_dmidecode_not_used_on_arm(self): self.patch_mapping({}) print("current =%s", subp) self._create_sysfs_parent_directory() - dmi_val = 'from-dmidecode' - dmi_name = 'use-dmidecode' + dmi_val = "from-dmidecode" + dmi_name = "use-dmidecode" self._configure_dmidecode_return(dmi_name, dmi_val) print("now =%s", subp) - expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val} + expected = {"armel": None, "aarch64": dmi_val, "x86_64": dmi_val} found = {} # we do not run the 'dmi-decode' binary on some arches # verify that anything requested that is not in the sysfs dir # will return None on those arches. with mock.patch("cloudinit.util.os.uname") as m_uname: for arch in expected: - m_uname.return_value = ('x-sysname', 'x-nodename', - 'x-release', 'x-version', arch) + m_uname.return_value = ( + "x-sysname", + "x-nodename", + "x-release", + "x-version", + arch, + ) print("now2 =%s", subp) found[arch] = dmi.read_dmi_data(dmi_name) self.assertEqual(expected, found) def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) - self._configure_dmidecode_return('key', 'value') - self.assertIsNone(dmi.read_dmi_data('expect-fail')) + self._configure_dmidecode_return("key", "value") + self.assertIsNone(dmi.read_dmi_data("expect-fail")) def test_none_returned_if_dmidecode_not_in_path(self): self.patched_funcs.enter_context( - mock.patch.object(subp, 'which', lambda _: False)) + mock.patch.object(subp, "which", lambda _: False) + ) self.patch_mapping({}) - self.assertIsNone(dmi.read_dmi_data('expect-fail')) + self.assertIsNone(dmi.read_dmi_data("expect-fail")) def test_empty_string_returned_instead_of_foxfox(self): # uninitialized dmi values show as \xff, return empty string my_len = 32 - dmi_value = b'\xff' * my_len + b'\n' + dmi_value = b"\xff" * my_len + b"\n" expected = "" - dmi_key = 'system-product-name' - sysfs_key = 'product_name' + dmi_key = "system-product-name" + sysfs_key = "product_name" self._create_sysfs_file(sysfs_key, dmi_value) self.assertEqual(expected, dmi.read_dmi_data(dmi_key)) @@ -132,7 +146,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): # first verify we get the value if not in container self._m_is_container.return_value = False key, val = ("system-product-name", "my_product") - self._create_sysfs_file('product_name', val) + self._create_sysfs_file("product_name", val) self.assertEqual(val, dmi.read_dmi_data(key)) # then verify in container returns None @@ -142,7 +156,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): def test_container_returns_none_on_unknown(self): """In a container even bogus keys return None.""" self._m_is_container.return_value = True - self._create_sysfs_file('product_name', "should-be-ignored") + self._create_sysfs_file("product_name", "should-be-ignored") self.assertIsNone(dmi.read_dmi_data("bogus")) self.assertIsNone(dmi.read_dmi_data("system-product-name")) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index eb8992d9..f2d2b494 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -1,31 +1,34 @@ # This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple import copy import os +from collections import namedtuple from uuid import uuid4 -from cloudinit import safeyaml -from cloudinit import subp -from cloudinit import util +from cloudinit import safeyaml, subp, util +from cloudinit.sources import DataSourceIBMCloud as ds_ibm +from cloudinit.sources import DataSourceOracle as ds_oracle +from cloudinit.sources import DataSourceSmartOS as ds_smartos from tests.unittests.helpers import ( CiTestCase, + cloud_init_project_dir, dir2dict, populate_dir, populate_dir_with_ts, - cloud_init_project_dir, ) -from cloudinit.sources import DataSourceIBMCloud as ds_ibm -from cloudinit.sources import DataSourceSmartOS as ds_smartos -from cloudinit.sources import DataSourceOracle as ds_oracle -UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " - "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") -UNAME_PPC64EL = ("Linux diamond 4.4.0-83-generic #106-Ubuntu SMP " - "Mon Jun 26 17:53:54 UTC 2017 " - "ppc64le ppc64le ppc64le GNU/Linux") -UNAME_FREEBSD = ("FreeBSD fbsd12-1 12.1-RELEASE-p10 " - "FreeBSD 12.1-RELEASE-p10 GENERIC amd64") +UNAME_MYSYS = ( + "Linux bart 4.4.0-62-generic #83-Ubuntu " + "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux" +) +UNAME_PPC64EL = ( + "Linux diamond 4.4.0-83-generic #106-Ubuntu SMP " + "Mon Jun 26 17:53:54 UTC 2017 " + "ppc64le ppc64le ppc64le GNU/Linux" +) +UNAME_FREEBSD = ( + "FreeBSD fbsd12-1 12.1-RELEASE-p10 FreeBSD 12.1-RELEASE-p10 GENERIC amd64" +) BLKID_EFI_ROOT = """ DEVNAME=/dev/sda1 @@ -41,10 +44,16 @@ PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc # this is a Ubuntu 18.04 disk.img output (dual uefi and bios bootable) BLKID_UEFI_UBUNTU = [ - {'DEVNAME': 'vda1', 'TYPE': 'ext4', 'PARTUUID': uuid4(), 'UUID': uuid4()}, - {'DEVNAME': 'vda14', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vda15', 'TYPE': 'vfat', 'LABEL': 'UEFI', 'PARTUUID': uuid4(), - 'UUID': '5F55-129B'}] + {"DEVNAME": "vda1", "TYPE": "ext4", "PARTUUID": uuid4(), "UUID": uuid4()}, + {"DEVNAME": "vda14", "PARTUUID": uuid4()}, + { + "DEVNAME": "vda15", + "TYPE": "vfat", + "LABEL": "UEFI", + "PARTUUID": uuid4(), + "UUID": "5F55-129B", + }, +] POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" @@ -52,7 +61,7 @@ POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=enabled" DI_EC2_STRICT_ID_DEFAULT = "true" -OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1' +OVF_MATCH_STRING = "http://schemas.dmtf.org/ovf/environment/1" SHELL_MOCK_TMPL = """\ %(name)s() { @@ -66,7 +75,7 @@ SHELL_MOCK_TMPL = """\ RC_FOUND = 0 RC_NOT_FOUND = 1 -DS_NONE = 'None' +DS_NONE = "None" P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag" P_PRODUCT_NAME = "sys/class/dmi/id/product_name" @@ -78,31 +87,42 @@ P_DSID_CFG = "etc/cloud/ds-identify.cfg" IBM_CONFIG_UUID = "9796-932E" -MOCK_VIRT_IS_CONTAINER_OTHER = {'name': 'detect_virt', - 'RET': 'container-other', 'ret': 0} -MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} -MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} +MOCK_VIRT_IS_CONTAINER_OTHER = { + "name": "detect_virt", + "RET": "container-other", + "ret": 0, +} +MOCK_VIRT_IS_KVM = {"name": "detect_virt", "RET": "kvm", "ret": 0} +MOCK_VIRT_IS_VMWARE = {"name": "detect_virt", "RET": "vmware", "ret": 0} # currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt. -MOCK_VIRT_IS_VM_OTHER = {'name': 'detect_virt', 'RET': 'vm-other', 'ret': 0} -MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} -MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} -MOCK_UNAME_IS_FREEBSD = {'name': 'uname', 'out': UNAME_FREEBSD, 'ret': 0} +MOCK_VIRT_IS_VM_OTHER = {"name": "detect_virt", "RET": "vm-other", "ret": 0} +MOCK_VIRT_IS_XEN = {"name": "detect_virt", "RET": "xen", "ret": 0} +MOCK_UNAME_IS_PPC64 = {"name": "uname", "out": UNAME_PPC64EL, "ret": 0} +MOCK_UNAME_IS_FREEBSD = {"name": "uname", "out": UNAME_FREEBSD, "ret": 0} shell_true = 0 shell_false = 1 -CallReturn = namedtuple('CallReturn', - ['rc', 'stdout', 'stderr', 'cfg', 'files']) +CallReturn = namedtuple( + "CallReturn", ["rc", "stdout", "stderr", "cfg", "files"] +) class DsIdentifyBase(CiTestCase): - dsid_path = cloud_init_project_dir('tools/ds-identify') - allowed_subp = ['sh'] - - def call(self, rootd=None, mocks=None, func="main", args=None, files=None, - policy_dmi=DI_DEFAULT_POLICY, - policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI, - ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT): + dsid_path = cloud_init_project_dir("tools/ds-identify") + allowed_subp = ["sh"] + + def call( + self, + rootd=None, + mocks=None, + func="main", + args=None, + files=None, + policy_dmi=DI_DEFAULT_POLICY, + policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI, + ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT, + ): if args is None: args = [] if mocks is None: @@ -114,7 +134,7 @@ class DsIdentifyBase(CiTestCase): if rootd is None: rootd = self.tmp_dir() - unset = '_unset' + unset = "_unset" wrap = self.tmp_path(path="_shwrap", dir=rootd) populate_dir(rootd, files) @@ -130,11 +150,11 @@ class DsIdentifyBase(CiTestCase): 'DI_DEFAULT_POLICY="%s"' % policy_dmi, 'DI_DEFAULT_POLICY_NO_DMI="%s"' % policy_no_dmi, 'DI_EC2_STRICT_ID_DEFAULT="%s"' % ec2_strict_id, - "" + "", ] def write_mock(data): - ddata = {'out': None, 'err': None, 'ret': 0, 'RET': None} + ddata = {"out": None, "err": None, "ret": 0, "RET": None} ddata.update(data) for k in ddata: if ddata[k] is None: @@ -143,56 +163,68 @@ class DsIdentifyBase(CiTestCase): mocklines = [] defaults = [ - {'name': 'detect_virt', 'RET': 'none', 'ret': 1}, - {'name': 'uname', 'out': UNAME_MYSYS}, - {'name': 'blkid', 'out': BLKID_EFI_ROOT}, - {'name': 'ovf_vmware_transport_guestinfo', - 'out': 'No value found', 'ret': 1}, - {'name': 'dmi_decode', 'ret': 1, - 'err': 'No dmidecode program. ERROR.'}, - {'name': 'get_kenv_field', 'ret': 1, - 'err': 'No kenv program. ERROR.'}, + {"name": "detect_virt", "RET": "none", "ret": 1}, + {"name": "uname", "out": UNAME_MYSYS}, + {"name": "blkid", "out": BLKID_EFI_ROOT}, + { + "name": "ovf_vmware_transport_guestinfo", + "out": "No value found", + "ret": 1, + }, + { + "name": "dmi_decode", + "ret": 1, + "err": "No dmidecode program. ERROR.", + }, + { + "name": "get_kenv_field", + "ret": 1, + "err": "No kenv program. ERROR.", + }, ] - written = [d['name'] for d in mocks] + written = [d["name"] for d in mocks] for data in mocks: mocklines.append(write_mock(data)) for d in defaults: - if d['name'] not in written: + if d["name"] not in written: mocklines.append(write_mock(d)) - endlines = [ - func + ' ' + ' '.join(['"%s"' % s for s in args]) - ] + endlines = [func + " " + " ".join(['"%s"' % s for s in args])] with open(wrap, "w") as fp: - fp.write('\n'.join(head + mocklines + endlines) + "\n") + fp.write("\n".join(head + mocklines + endlines) + "\n") rc = 0 try: - out, err = subp.subp(['sh', '-c', '. %s' % wrap], capture=True) + out, err = subp.subp(["sh", "-c", ". %s" % wrap], capture=True) except subp.ProcessExecutionError as e: rc = e.exit_code out = e.stdout err = e.stderr cfg = None - cfg_out = os.path.join(rootd, 'run/cloud-init/cloud.cfg') + cfg_out = os.path.join(rootd, "run/cloud-init/cloud.cfg") if os.path.exists(cfg_out): contents = util.load_file(cfg_out) try: cfg = safeyaml.load(contents) except Exception as e: - cfg = {"_INVALID_YAML": contents, - "_EXCEPTION": str(e)} + cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)} return CallReturn(rc, out, err, cfg, dir2dict(rootd)) def _call_via_dict(self, data, rootd=None, **kwargs): # return output of self.call with a dict input like VALID_CFG[item] - xwargs = {'rootd': rootd} - passthrough = ('mocks', 'func', 'args', 'policy_dmi', - 'policy_no_dmi', 'files') + xwargs = {"rootd": rootd} + passthrough = ( + "mocks", + "func", + "args", + "policy_dmi", + "policy_no_dmi", + "files", + ) for k in passthrough: if k in data: xwargs[k] = data[k] @@ -204,7 +236,8 @@ class DsIdentifyBase(CiTestCase): def _test_ds_found(self, name): data = copy.deepcopy(VALID_CFG[name]) return self._check_via_dict( - data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) + data, RC_FOUND, dslist=[data.get("ds"), DS_NONE] + ) def _test_ds_not_found(self, name): data = copy.deepcopy(VALID_CFG[name]) @@ -216,87 +249,104 @@ class DsIdentifyBase(CiTestCase): try: self.assertEqual(rc, ret.rc) if dslist is not None: - self.assertEqual(dslist, ret.cfg['datasource_list']) + self.assertEqual(dslist, ret.cfg["datasource_list"]) good = True finally: if not good: - _print_run_output(ret.rc, ret.stdout, ret.stderr, ret.cfg, - ret.files) + _print_run_output( + ret.rc, ret.stdout, ret.stderr, ret.cfg, ret.files + ) return ret class TestDsIdentify(DsIdentifyBase): def test_wb_print_variables(self): """_print_info reports an array of discovered variables to stderr.""" - data = VALID_CFG['Azure-dmi-detection'] + data = VALID_CFG["Azure-dmi-detection"] _, _, err, _, _ = self._call_via_dict(data) expected_vars = [ - 'DMI_PRODUCT_NAME', 'DMI_SYS_VENDOR', 'DMI_PRODUCT_SERIAL', - 'DMI_PRODUCT_UUID', 'PID_1_PRODUCT_NAME', 'DMI_CHASSIS_ASSET_TAG', - 'FS_LABELS', 'KERNEL_CMDLINE', 'VIRT', 'UNAME_KERNEL_NAME', - 'UNAME_KERNEL_RELEASE', 'UNAME_KERNEL_VERSION', 'UNAME_MACHINE', - 'UNAME_NODENAME', 'UNAME_OPERATING_SYSTEM', 'DSNAME', 'DSLIST', - 'MODE', 'ON_FOUND', 'ON_MAYBE', 'ON_NOTFOUND'] + "DMI_PRODUCT_NAME", + "DMI_SYS_VENDOR", + "DMI_PRODUCT_SERIAL", + "DMI_PRODUCT_UUID", + "PID_1_PRODUCT_NAME", + "DMI_CHASSIS_ASSET_TAG", + "FS_LABELS", + "KERNEL_CMDLINE", + "VIRT", + "UNAME_KERNEL_NAME", + "UNAME_KERNEL_RELEASE", + "UNAME_KERNEL_VERSION", + "UNAME_MACHINE", + "UNAME_NODENAME", + "UNAME_OPERATING_SYSTEM", + "DSNAME", + "DSLIST", + "MODE", + "ON_FOUND", + "ON_MAYBE", + "ON_NOTFOUND", + ] for var in expected_vars: - self.assertIn('{0}='.format(var), err) + self.assertIn("{0}=".format(var), err) def test_azure_dmi_detection_from_chassis_asset_tag(self): """Azure datasource is detected from DMI chassis-asset-tag""" - self._test_ds_found('Azure-dmi-detection') + self._test_ds_found("Azure-dmi-detection") def test_azure_seed_file_detection(self): """Azure datasource is detected due to presence of a seed file. The seed file tested is /var/lib/cloud/seed/azure/ovf-env.xml.""" - self._test_ds_found('Azure-seed-detection') + self._test_ds_found("Azure-seed-detection") def test_aws_ec2_hvm(self): """EC2: hvm instances use dmi serial and uuid starting with 'ec2'.""" - self._test_ds_found('Ec2-hvm') + self._test_ds_found("Ec2-hvm") def test_aws_ec2_xen(self): """EC2: sys/hypervisor/uuid starts with ec2.""" - self._test_ds_found('Ec2-xen') + self._test_ds_found("Ec2-xen") def test_brightbox_is_ec2(self): """EC2: product_serial ends with '.brightbox.com'""" - self._test_ds_found('Ec2-brightbox') + self._test_ds_found("Ec2-brightbox") def test_bobrightbox_is_not_brightbox(self): """EC2: bobrightbox.com in product_serial is not brightbox'""" - self._test_ds_not_found('Ec2-brightbox-negative') + self._test_ds_not_found("Ec2-brightbox-negative") def test_freebsd_nocloud(self): """NoCloud identified on FreeBSD via label by geom.""" - self._test_ds_found('NoCloud-fbsd') + self._test_ds_found("NoCloud-fbsd") def test_gce_by_product_name(self): """GCE identifies itself with product_name.""" - self._test_ds_found('GCE') + self._test_ds_found("GCE") def test_gce_by_serial(self): """Older gce compute instances must be identified by serial.""" - self._test_ds_found('GCE-serial') + self._test_ds_found("GCE-serial") def test_config_drive(self): """ConfigDrive datasource has a disk with LABEL=config-2.""" - self._test_ds_found('ConfigDrive') + self._test_ds_found("ConfigDrive") def test_rbx_cloud(self): """Rbx datasource has a disk with LABEL=CLOUDMD.""" - self._test_ds_found('RbxCloud') + self._test_ds_found("RbxCloud") def test_rbx_cloud_lower(self): """Rbx datasource has a disk with LABEL=cloudmd.""" - self._test_ds_found('RbxCloudLower') + self._test_ds_found("RbxCloudLower") def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" - self._test_ds_found('ConfigDriveUpper') + self._test_ds_found("ConfigDriveUpper") def test_config_drive_seed(self): """Config Drive seed directory.""" - self._test_ds_found('ConfigDrive-seed') + self._test_ds_found("ConfigDrive-seed") def test_config_drive_interacts_with_ibmcloud_config_disk(self): """Verify ConfigDrive interaction with IBMCloud. @@ -304,34 +354,35 @@ class TestDsIdentify(DsIdentifyBase): If ConfigDrive is enabled and not IBMCloud, then ConfigDrive should claim the ibmcloud 'config-2' disk. If IBMCloud is enabled, then ConfigDrive should skip.""" - data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) - files = data.get('files', {}) + data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"]) + files = data.get("files", {}) if not files: - data['files'] = files - cfgpath = 'etc/cloud/cloud.cfg.d/99_networklayer_common.cfg' + data["files"] = files + cfgpath = "etc/cloud/cloud.cfg.d/99_networklayer_common.cfg" # with list including IBMCloud, config drive should be not found. - files[cfgpath] = 'datasource_list: [ ConfigDrive, IBMCloud ]\n' + files[cfgpath] = "datasource_list: [ ConfigDrive, IBMCloud ]\n" ret = self._check_via_dict(data, shell_true) - self.assertEqual( - ret.cfg.get('datasource_list'), ['IBMCloud', 'None']) + self.assertEqual(ret.cfg.get("datasource_list"), ["IBMCloud", "None"]) # But if IBMCloud is not enabled, config drive should claim this. - files[cfgpath] = 'datasource_list: [ ConfigDrive, NoCloud ]\n' + files[cfgpath] = "datasource_list: [ ConfigDrive, NoCloud ]\n" ret = self._check_via_dict(data, shell_true) self.assertEqual( - ret.cfg.get('datasource_list'), ['ConfigDrive', 'None']) + ret.cfg.get("datasource_list"), ["ConfigDrive", "None"] + ) def test_ibmcloud_template_userdata_in_provisioning(self): """Template provisioned with user-data during provisioning stage. Template provisioning with user-data has METADATA disk, datasource should return not found.""" - data = copy.deepcopy(VALID_CFG['IBMCloud-metadata']) + data = copy.deepcopy(VALID_CFG["IBMCloud-metadata"]) # change the 'is_ibm_provisioning' mock to return 1 (false) - isprov_m = [m for m in data['mocks'] - if m["name"] == "is_ibm_provisioning"][0] - isprov_m['ret'] = shell_true + isprov_m = [ + m for m in data["mocks"] if m["name"] == "is_ibm_provisioning" + ][0] + isprov_m["ret"] = shell_true return self._check_via_dict(data, RC_NOT_FOUND) def test_ibmcloud_template_userdata(self): @@ -339,58 +390,61 @@ class TestDsIdentify(DsIdentifyBase): Template provisioning with user-data has METADATA disk. datasource should return found.""" - self._test_ds_found('IBMCloud-metadata') + self._test_ds_found("IBMCloud-metadata") def test_ibmcloud_template_no_userdata_in_provisioning(self): """Template provisioned with no user-data during provisioning. no disks attached. Datasource should return not found.""" - data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks']) - data['mocks'].append( - {'name': 'is_ibm_provisioning', 'ret': shell_true}) + data = copy.deepcopy(VALID_CFG["IBMCloud-nodisks"]) + data["mocks"].append( + {"name": "is_ibm_provisioning", "ret": shell_true} + ) return self._check_via_dict(data, RC_NOT_FOUND) def test_ibmcloud_template_no_userdata(self): """Template provisioned with no user-data first boot. no disks attached. Datasource should return found.""" - self._check_via_dict(VALID_CFG['IBMCloud-nodisks'], RC_NOT_FOUND) + self._check_via_dict(VALID_CFG["IBMCloud-nodisks"], RC_NOT_FOUND) def test_ibmcloud_os_code(self): """Launched by os code always has config-2 disk.""" - self._test_ds_found('IBMCloud-config-2') + self._test_ds_found("IBMCloud-config-2") def test_ibmcloud_os_code_different_uuid(self): """IBM cloud config-2 disks must be explicit match on UUID. If the UUID is not 9796-932E then we actually expect ConfigDrive.""" - data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"]) offset = None - for m, d in enumerate(data['mocks']): - if d.get('name') == "blkid": + for m, d in enumerate(data["mocks"]): + if d.get("name") == "blkid": offset = m break if not offset: raise ValueError("Expected to find 'blkid' mock, but did not.") - data['mocks'][offset]['out'] = d['out'].replace(ds_ibm.IBM_CONFIG_UUID, - "DEAD-BEEF") + data["mocks"][offset]["out"] = d["out"].replace( + ds_ibm.IBM_CONFIG_UUID, "DEAD-BEEF" + ) self._check_via_dict( - data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) + data, rc=RC_FOUND, dslist=["ConfigDrive", DS_NONE] + ) def test_ibmcloud_with_nocloud_seed(self): """NoCloud seed should be preferred over IBMCloud. A nocloud seed should be preferred over IBMCloud even if enabled. Ubuntu 16.04 images have <vlc>/seed/nocloud-net. LP: #1766401.""" - data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) - files = data.get('files', {}) + data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"]) + files = data.get("files", {}) if not files: - data['files'] = files - files.update(VALID_CFG['NoCloud-seed']['files']) + data["files"] = files + files.update(VALID_CFG["NoCloud-seed"]["files"]) ret = self._check_via_dict(data, shell_true) self.assertEqual( - ['NoCloud', 'IBMCloud', 'None'], - ret.cfg.get('datasource_list')) + ["NoCloud", "IBMCloud", "None"], ret.cfg.get("datasource_list") + ) def test_ibmcloud_with_configdrive_seed(self): """ConfigDrive seed should be preferred over IBMCloud. @@ -398,28 +452,28 @@ class TestDsIdentify(DsIdentifyBase): A ConfigDrive seed should be preferred over IBMCloud even if enabled. Ubuntu 16.04 images have a fstab entry that mounts the METADATA disk into <vlc>/seed/config_drive. LP: ##1766401.""" - data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) - files = data.get('files', {}) + data = copy.deepcopy(VALID_CFG["IBMCloud-config-2"]) + files = data.get("files", {}) if not files: - data['files'] = files - files.update(VALID_CFG['ConfigDrive-seed']['files']) + data["files"] = files + files.update(VALID_CFG["ConfigDrive-seed"]["files"]) ret = self._check_via_dict(data, shell_true) self.assertEqual( - ['ConfigDrive', 'IBMCloud', 'None'], - ret.cfg.get('datasource_list')) + ["ConfigDrive", "IBMCloud", "None"], ret.cfg.get("datasource_list") + ) def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. Even though a search would find something, the builtin policy of disabled should cause the return of not found.""" - mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) + mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"]) self._check_via_dict(mydata, rc=RC_NOT_FOUND, policy_dmi="disabled") def test_policy_config_disable_overrides_builtin(self): """explicit policy: disabled in config file should cause not found.""" - mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) - mydata['files'][P_DSID_CFG] = '\n'.join(['policy: disabled', '']) + mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"]) + mydata["files"][P_DSID_CFG] = "\n".join(["policy: disabled", ""]) self._check_via_dict(mydata, rc=RC_NOT_FOUND) def test_single_entry_defines_datasource(self): @@ -428,54 +482,55 @@ class TestDsIdentify(DsIdentifyBase): Test the valid Ec2-hvm, but provide a config file that specifies a single entry in datasource_list. The configured value should be used.""" - mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) - cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg' - mydata['files'][cfgpath] = 'datasource_list: ["NoCloud"]\n' - self._check_via_dict(mydata, rc=RC_FOUND, dslist=['NoCloud', DS_NONE]) + mydata = copy.deepcopy(VALID_CFG["Ec2-hvm"]) + cfgpath = "etc/cloud/cloud.cfg.d/myds.cfg" + mydata["files"][cfgpath] = 'datasource_list: ["NoCloud"]\n' + self._check_via_dict(mydata, rc=RC_FOUND, dslist=["NoCloud", DS_NONE]) def test_configured_list_with_none(self): """When datasource_list already contains None, None is not added. The explicitly configured datasource_list has 'None' in it. That should not have None automatically added.""" - mydata = copy.deepcopy(VALID_CFG['GCE']) - cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg' - mydata['files'][cfgpath] = 'datasource_list: ["Ec2", "None"]\n' - self._check_via_dict(mydata, rc=RC_FOUND, dslist=['Ec2', DS_NONE]) + mydata = copy.deepcopy(VALID_CFG["GCE"]) + cfgpath = "etc/cloud/cloud.cfg.d/myds.cfg" + mydata["files"][cfgpath] = 'datasource_list: ["Ec2", "None"]\n' + self._check_via_dict(mydata, rc=RC_FOUND, dslist=["Ec2", DS_NONE]) def test_aliyun_identified(self): """Test that Aliyun cloud is identified by product id.""" - self._test_ds_found('AliYun') + self._test_ds_found("AliYun") def test_aliyun_over_ec2(self): """Even if all other factors identified Ec2, AliYun should be used.""" - mydata = copy.deepcopy(VALID_CFG['Ec2-xen']) - self._test_ds_found('AliYun') - prod_name = VALID_CFG['AliYun']['files'][P_PRODUCT_NAME] - mydata['files'][P_PRODUCT_NAME] = prod_name + mydata = copy.deepcopy(VALID_CFG["Ec2-xen"]) + self._test_ds_found("AliYun") + prod_name = VALID_CFG["AliYun"]["files"][P_PRODUCT_NAME] + mydata["files"][P_PRODUCT_NAME] = prod_name policy = "search,found=first,maybe=none,notfound=disabled" - self._check_via_dict(mydata, rc=RC_FOUND, dslist=['AliYun', DS_NONE], - policy_dmi=policy) + self._check_via_dict( + mydata, rc=RC_FOUND, dslist=["AliYun", DS_NONE], policy_dmi=policy + ) def test_default_openstack_intel_is_found(self): """On Intel, openstack must be identified.""" - self._test_ds_found('OpenStack') + self._test_ds_found("OpenStack") def test_openstack_open_telekom_cloud(self): """Open Telecom identification.""" - self._test_ds_found('OpenStack-OpenTelekom') + self._test_ds_found("OpenStack-OpenTelekom") def test_openstack_sap_ccloud(self): """SAP Converged Cloud identification""" - self._test_ds_found('OpenStack-SAPCCloud') + self._test_ds_found("OpenStack-SAPCCloud") def test_openstack_asset_tag_nova(self): """OpenStack identification via asset tag OpenStack Nova.""" - self._test_ds_found('OpenStack-AssetTag-Nova') + self._test_ds_found("OpenStack-AssetTag-Nova") def test_openstack_asset_tag_copute(self): """OpenStack identification via asset tag OpenStack Compute.""" - self._test_ds_found('OpenStack-AssetTag-Compute') + self._test_ds_found("OpenStack-AssetTag-Compute") def test_openstack_on_non_intel_is_maybe(self): """On non-Intel, openstack without dmi info is maybe. @@ -483,243 +538,282 @@ class TestDsIdentify(DsIdentifyBase): nova does not identify itself on platforms other than intel. https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" - data = copy.deepcopy(VALID_CFG['OpenStack']) - del data['files'][P_PRODUCT_NAME] - data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE, - 'policy_no_dmi': POLICY_FOUND_OR_MAYBE}) + data = copy.deepcopy(VALID_CFG["OpenStack"]) + del data["files"][P_PRODUCT_NAME] + data.update( + { + "policy_dmi": POLICY_FOUND_OR_MAYBE, + "policy_no_dmi": POLICY_FOUND_OR_MAYBE, + } + ) # this should show not found as default uname in tests is intel. # and intel openstack requires positive identification. self._check_via_dict(data, RC_NOT_FOUND, dslist=None) # updating the uname to ppc64 though should get a maybe. - data.update({'mocks': [MOCK_VIRT_IS_KVM, MOCK_UNAME_IS_PPC64]}) + data.update({"mocks": [MOCK_VIRT_IS_KVM, MOCK_UNAME_IS_PPC64]}) (_, _, err, _, _) = self._check_via_dict( - data, RC_FOUND, dslist=['OpenStack', 'None']) + data, RC_FOUND, dslist=["OpenStack", "None"] + ) self.assertIn("check for 'OpenStack' returned maybe", err) def test_default_ovf_is_found(self): """OVF is identified found when ovf/ovf-env.xml seed file exists.""" - self._test_ds_found('OVF-seed') + self._test_ds_found("OVF-seed") def test_default_ovf_with_detect_virt_none_not_found(self): """OVF identifies not found when detect_virt returns "none".""" self._check_via_dict( - {'ds': 'OVF'}, rc=RC_NOT_FOUND, policy_dmi="disabled") + {"ds": "OVF"}, rc=RC_NOT_FOUND, policy_dmi="disabled" + ) def test_default_ovf_returns_not_found_on_azure(self): """OVF datasource won't be found as false positive on Azure.""" - ovfonazure = copy.deepcopy(VALID_CFG['OVF']) + ovfonazure = copy.deepcopy(VALID_CFG["OVF"]) # Set azure asset tag to assert OVF content not found - ovfonazure['files'][P_CHASSIS_ASSET_TAG] = ( - '7783-7084-3265-9085-8269-3286-77\n') - self._check_via_dict( - ovfonazure, RC_FOUND, dslist=['Azure', DS_NONE]) + ovfonazure["files"][ + P_CHASSIS_ASSET_TAG + ] = "7783-7084-3265-9085-8269-3286-77\n" + self._check_via_dict(ovfonazure, RC_FOUND, dslist=["Azure", DS_NONE]) def test_ovf_on_vmware_iso_found_by_cdrom_with_ovf_schema_match(self): """OVF is identified when iso9660 cdrom path contains ovf schema.""" - self._test_ds_found('OVF') + self._test_ds_found("OVF") def test_ovf_on_vmware_guestinfo_found(self): """OVF guest info is found on vmware.""" - self._test_ds_found('OVF-guestinfo') + self._test_ds_found("OVF-guestinfo") def test_ovf_on_vmware_iso_found_when_vmware_customization(self): """OVF is identified when vmware customization is enabled.""" - self._test_ds_found('OVF-vmware-customization') + self._test_ds_found("OVF-vmware-customization") def test_ovf_on_vmware_iso_found_open_vm_tools_64(self): """OVF is identified when open-vm-tools installed in /usr/lib64.""" - cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization']) - p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so' - open64 = 'usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so' - cust64['files'][open64] = cust64['files'][p32] - del cust64['files'][p32] + cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"]) + p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" + open64 = "usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so" + cust64["files"][open64] = cust64["files"][p32] + del cust64["files"][p32] return self._check_via_dict( - cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] + ) def test_ovf_on_vmware_iso_found_open_vm_tools_x86_64_linux_gnu(self): """OVF is identified when open-vm-tools installed in /usr/lib/x86_64-linux-gnu.""" - cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization']) - p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so' - x86 = 'usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/' \ - 'libdeployPkgPlugin.so' - cust64['files'][x86] = cust64['files'][p32] - del cust64['files'][p32] + cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"]) + p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" + x86 = ( + "usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/" + "libdeployPkgPlugin.so" + ) + cust64["files"][x86] = cust64["files"][p32] + del cust64["files"][p32] return self._check_via_dict( - cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] + ) def test_ovf_on_vmware_iso_found_open_vm_tools_aarch64_linux_gnu(self): """OVF is identified when open-vm-tools installed in /usr/lib/aarch64-linux-gnu.""" - cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization']) - p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so' - aarch64 = 'usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/' \ - 'libdeployPkgPlugin.so' - cust64['files'][aarch64] = cust64['files'][p32] - del cust64['files'][p32] + cust64 = copy.deepcopy(VALID_CFG["OVF-vmware-customization"]) + p32 = "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so" + aarch64 = ( + "usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/" + "libdeployPkgPlugin.so" + ) + cust64["files"][aarch64] = cust64["files"][p32] + del cust64["files"][p32] return self._check_via_dict( - cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + cust64, RC_FOUND, dslist=[cust64.get("ds"), DS_NONE] + ) def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): """OVF is identified by well-known iso9660 labels.""" - ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) + ovf_cdrom_by_label = copy.deepcopy(VALID_CFG["OVF"]) # Unset matching cdrom ovf schema content - ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match' + ovf_cdrom_by_label["files"]["dev/sr0"] = "No content match" self._check_via_dict( - ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled") + ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled" + ) # Add recognized labels - valid_ovf_labels = ['ovf-transport', 'OVF-TRANSPORT', - "OVFENV", "ovfenv", "OVF ENV", "ovf env"] + valid_ovf_labels = [ + "ovf-transport", + "OVF-TRANSPORT", + "OVFENV", + "ovfenv", + "OVF ENV", + "ovf env", + ] for valid_ovf_label in valid_ovf_labels: - ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([ - {'DEVNAME': 'sda1', 'TYPE': 'ext4', 'LABEL': 'rootfs'}, - {'DEVNAME': 'sr0', 'TYPE': 'iso9660', - 'LABEL': valid_ovf_label}, - {'DEVNAME': 'vda1', 'TYPE': 'ntfs', 'LABEL': 'data'}]) + ovf_cdrom_by_label["mocks"][0]["out"] = blkid_out( + [ + {"DEVNAME": "sda1", "TYPE": "ext4", "LABEL": "rootfs"}, + { + "DEVNAME": "sr0", + "TYPE": "iso9660", + "LABEL": valid_ovf_label, + }, + {"DEVNAME": "vda1", "TYPE": "ntfs", "LABEL": "data"}, + ] + ) self._check_via_dict( - ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + ovf_cdrom_by_label, rc=RC_FOUND, dslist=["OVF", DS_NONE] + ) def test_ovf_on_vmware_iso_found_by_cdrom_with_different_size(self): """OVF is identified by well-known iso9660 labels.""" - ovf_cdrom_with_size = copy.deepcopy(VALID_CFG['OVF']) + ovf_cdrom_with_size = copy.deepcopy(VALID_CFG["OVF"]) # Set cdrom size to 20480 (10MB in 512 byte units) - ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '20480\n' + ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "20480\n" self._check_via_dict( - ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled" + ) # Set cdrom size to 204800 (100MB in 512 byte units) - ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '204800\n' + ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "204800\n" self._check_via_dict( - ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled" + ) # Set cdrom size to 18432 (9MB in 512 byte units) - ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '18432\n' + ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "18432\n" self._check_via_dict( - ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + ovf_cdrom_with_size, rc=RC_FOUND, dslist=["OVF", DS_NONE] + ) # Set cdrom size to 2048 (1MB in 512 byte units) - ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '2048\n' + ovf_cdrom_with_size["files"]["sys/class/block/sr0/size"] = "2048\n" self._check_via_dict( - ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + ovf_cdrom_with_size, rc=RC_FOUND, dslist=["OVF", DS_NONE] + ) def test_default_nocloud_as_vdb_iso9660(self): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" - self._test_ds_found('NoCloud') + self._test_ds_found("NoCloud") def test_nocloud_upper(self): """NoCloud is found with uppercase filesystem label.""" - self._test_ds_found('NoCloudUpper') + self._test_ds_found("NoCloudUpper") def test_nocloud_fatboot(self): """NoCloud fatboot label - LP: #184166.""" - self._test_ds_found('NoCloud-fatboot') + self._test_ds_found("NoCloud-fatboot") def test_nocloud_seed(self): """Nocloud seed directory.""" - self._test_ds_found('NoCloud-seed') + self._test_ds_found("NoCloud-seed") def test_nocloud_seed_ubuntu_core_writable(self): """Nocloud seed directory ubuntu core writable""" - self._test_ds_found('NoCloud-seed-ubuntu-core') + self._test_ds_found("NoCloud-seed-ubuntu-core") def test_hetzner_found(self): """Hetzner cloud is identified in sys_vendor.""" - self._test_ds_found('Hetzner') + self._test_ds_found("Hetzner") def test_smartos_bhyve(self): """SmartOS cloud identified by SmartDC in dmi.""" - self._test_ds_found('SmartOS-bhyve') + self._test_ds_found("SmartOS-bhyve") def test_smartos_lxbrand(self): """SmartOS cloud identified on lxbrand container.""" - self._test_ds_found('SmartOS-lxbrand') + self._test_ds_found("SmartOS-lxbrand") def test_smartos_lxbrand_requires_socket(self): """SmartOS cloud should not be identified if no socket file.""" - mycfg = copy.deepcopy(VALID_CFG['SmartOS-lxbrand']) - del mycfg['files'][ds_smartos.METADATA_SOCKFILE] + mycfg = copy.deepcopy(VALID_CFG["SmartOS-lxbrand"]) + del mycfg["files"][ds_smartos.METADATA_SOCKFILE] self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") def test_path_env_gets_set_from_main(self): """PATH environment should always have some tokens when main is run. We explicitly call main as we want to ensure it updates PATH.""" - cust = copy.deepcopy(VALID_CFG['NoCloud']) + cust = copy.deepcopy(VALID_CFG["NoCloud"]) rootd = self.tmp_dir() - mpp = 'main-printpath' + mpp = "main-printpath" pre = "MYPATH=" - cust['files'][mpp] = ( - 'PATH="/mycust/path"; main; r=$?; echo ' + pre + '$PATH; exit $r;') + cust["files"][mpp] = ( + 'PATH="/mycust/path"; main; r=$?; echo ' + pre + "$PATH; exit $r;" + ) ret = self._check_via_dict( - cust, RC_FOUND, - func=".", args=[os.path.join(rootd, mpp)], rootd=rootd) + cust, + RC_FOUND, + func=".", + args=[os.path.join(rootd, mpp)], + rootd=rootd, + ) match = [ line for line in ret.stdout.splitlines() if line.startswith(pre) ][0] toks = match.replace(pre, "").split(":") expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"] - self.assertEqual(expected, [p for p in expected if p in toks], - "path did not have expected tokens") + self.assertEqual( + expected, + [p for p in expected if p in toks], + "path did not have expected tokens", + ) def test_zstack_is_ec2(self): """EC2: chassis asset tag ends with 'zstack.io'""" - self._test_ds_found('Ec2-ZStack') + self._test_ds_found("Ec2-ZStack") def test_e24cloud_is_ec2(self): """EC2: e24cloud identified by sys_vendor""" - self._test_ds_found('Ec2-E24Cloud') + self._test_ds_found("Ec2-E24Cloud") def test_e24cloud_not_active(self): """EC2: bobrightbox.com in product_serial is not brightbox'""" - self._test_ds_not_found('Ec2-E24Cloud-negative') + self._test_ds_not_found("Ec2-E24Cloud-negative") def test_vmware_no_valid_transports(self): """VMware: no valid transports""" - self._test_ds_not_found('VMware-NoValidTransports') + self._test_ds_not_found("VMware-NoValidTransports") def test_vmware_envvar_no_data(self): """VMware: envvar transport no data""" - self._test_ds_not_found('VMware-EnvVar-NoData') + self._test_ds_not_found("VMware-EnvVar-NoData") def test_vmware_envvar_no_virt_id(self): """VMware: envvar transport success if no virt id""" - self._test_ds_found('VMware-EnvVar-NoVirtID') + self._test_ds_found("VMware-EnvVar-NoVirtID") def test_vmware_envvar_activated_by_metadata(self): """VMware: envvar transport activated by metadata""" - self._test_ds_found('VMware-EnvVar-Metadata') + self._test_ds_found("VMware-EnvVar-Metadata") def test_vmware_envvar_activated_by_userdata(self): """VMware: envvar transport activated by userdata""" - self._test_ds_found('VMware-EnvVar-Userdata') + self._test_ds_found("VMware-EnvVar-Userdata") def test_vmware_envvar_activated_by_vendordata(self): """VMware: envvar transport activated by vendordata""" - self._test_ds_found('VMware-EnvVar-Vendordata') + self._test_ds_found("VMware-EnvVar-Vendordata") def test_vmware_guestinfo_no_data(self): """VMware: guestinfo transport no data""" - self._test_ds_not_found('VMware-GuestInfo-NoData') + self._test_ds_not_found("VMware-GuestInfo-NoData") def test_vmware_guestinfo_no_virt_id(self): """VMware: guestinfo transport fails if no virt id""" - self._test_ds_not_found('VMware-GuestInfo-NoVirtID') + self._test_ds_not_found("VMware-GuestInfo-NoVirtID") def test_vmware_guestinfo_activated_by_metadata(self): """VMware: guestinfo transport activated by metadata""" - self._test_ds_found('VMware-GuestInfo-Metadata') + self._test_ds_found("VMware-GuestInfo-Metadata") def test_vmware_guestinfo_activated_by_userdata(self): """VMware: guestinfo transport activated by userdata""" - self._test_ds_found('VMware-GuestInfo-Userdata') + self._test_ds_found("VMware-GuestInfo-Userdata") def test_vmware_guestinfo_activated_by_vendordata(self): """VMware: guestinfo transport activated by vendordata""" - self._test_ds_found('VMware-GuestInfo-Vendordata') + self._test_ds_found("VMware-GuestInfo-Vendordata") class TestBSDNoSys(DsIdentifyBase): @@ -735,14 +829,14 @@ class TestBSDNoSys(DsIdentifyBase): This will be used on FreeBSD systems. """ - self._test_ds_found('Hetzner-kenv') + self._test_ds_found("Hetzner-kenv") def test_dmi_dmidecode(self): """Test that dmidecode(8) works on systems which don't have /sys This will be used on all other BSD systems. """ - self._test_ds_found('Hetzner-dmidecode') + self._test_ds_found("Hetzner-dmidecode") class TestIsIBMProvisioning(DsIdentifyBase): @@ -766,9 +860,11 @@ class TestIsIBMProvisioning(DsIdentifyBase): def test_config_with_old_log(self): """A config with a log from previous boot is not provisioning.""" rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", -30), - self.boot_ref: ("PWD=/", 0)} + data = { + self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0), + } populate_dir_with_ts(rootd, data) ret = self.call(rootd=rootd, func=self.funcname) self.assertEqual(shell_false, ret.rc) @@ -777,9 +873,11 @@ class TestIsIBMProvisioning(DsIdentifyBase): def test_config_with_new_log(self): """A config with a log from this boot is provisioning.""" rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", 30), - self.boot_ref: ("PWD=/", 0)} + data = { + self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0), + } populate_dir_with_ts(rootd, data) ret = self.call(rootd=rootd, func=self.funcname) self.assertEqual(shell_true, ret.rc) @@ -789,12 +887,12 @@ class TestIsIBMProvisioning(DsIdentifyBase): class TestOracle(DsIdentifyBase): def test_found_by_chassis(self): """Simple positive test of Oracle by chassis id.""" - self._test_ds_found('Oracle') + self._test_ds_found("Oracle") def test_not_found(self): """Simple negative test of Oracle.""" - mycfg = copy.deepcopy(VALID_CFG['Oracle']) - mycfg['files'][P_CHASSIS_ASSET_TAG] = "Not Oracle" + mycfg = copy.deepcopy(VALID_CFG["Oracle"]) + mycfg["files"][P_CHASSIS_ASSET_TAG] = "Not Oracle" self._check_via_dict(mycfg, rc=RC_NOT_FOUND) @@ -811,7 +909,7 @@ def blkid_out(disks=None): for key in [d for d in disk if d != "DEVNAME"]: lines.append("%s=%s" % (key, disk[key])) lines.append("") - return '\n'.join(lines) + return "\n".join(lines) def geom_out(disks=None): @@ -828,616 +926,809 @@ def geom_out(disks=None): disks = [] lines = [] for disk in disks: - lines.append("%s/%s N/A %s" % ( - disk["TYPE"], disk["LABEL"], disk["DEVNAME"])) + lines.append( + "%s/%s N/A %s" % (disk["TYPE"], disk["LABEL"], disk["DEVNAME"]) + ) lines.append("") - return '\n'.join(lines) + return "\n".join(lines) def _print_run_output(rc, out, err, cfg, files): """A helper to print return of TestDsIdentify. - _print_run_output(self.call())""" - print('\n'.join([ - '-- rc = %s --' % rc, - '-- out --', str(out), - '-- err --', str(err), - '-- cfg --', util.json_dumps(cfg)])) - print('-- files --') + _print_run_output(self.call())""" + print( + "\n".join( + [ + "-- rc = %s --" % rc, + "-- out --", + str(out), + "-- err --", + str(err), + "-- cfg --", + util.json_dumps(cfg), + ] + ) + ) + print("-- files --") for k, v in files.items(): if "/_shwrap" in k: continue - print(' === %s ===' % k) + print(" === %s ===" % k) for line in v.splitlines(): print(" " + line) VALID_CFG = { - 'AliYun': { - 'ds': 'AliYun', - 'files': {P_PRODUCT_NAME: 'Alibaba Cloud ECS\n'}, + "AliYun": { + "ds": "AliYun", + "files": {P_PRODUCT_NAME: "Alibaba Cloud ECS\n"}, }, - 'Azure-dmi-detection': { - 'ds': 'Azure', - 'files': { - P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n', - } + "Azure-dmi-detection": { + "ds": "Azure", + "files": { + P_CHASSIS_ASSET_TAG: "7783-7084-3265-9085-8269-3286-77\n", + }, }, - 'Azure-seed-detection': { - 'ds': 'Azure', - 'files': { - P_CHASSIS_ASSET_TAG: 'No-match\n', - os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n', - } + "Azure-seed-detection": { + "ds": "Azure", + "files": { + P_CHASSIS_ASSET_TAG: "No-match\n", + os.path.join(P_SEED_DIR, "azure", "ovf-env.xml"): "present\n", + }, }, - 'Ec2-hvm': { - 'ds': 'Ec2', - 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}], - 'files': { - P_PRODUCT_SERIAL: 'ec23aef5-54be-4843-8d24-8c819f88453e\n', - P_PRODUCT_UUID: 'EC23AEF5-54BE-4843-8D24-8C819F88453E\n', - } + "Ec2-hvm": { + "ds": "Ec2", + "mocks": [{"name": "detect_virt", "RET": "kvm", "ret": 0}], + "files": { + P_PRODUCT_SERIAL: "ec23aef5-54be-4843-8d24-8c819f88453e\n", + P_PRODUCT_UUID: "EC23AEF5-54BE-4843-8D24-8C819F88453E\n", + }, }, - 'Ec2-xen': { - 'ds': 'Ec2', - 'mocks': [MOCK_VIRT_IS_XEN], - 'files': { - 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n' + "Ec2-xen": { + "ds": "Ec2", + "mocks": [MOCK_VIRT_IS_XEN], + "files": { + "sys/hypervisor/uuid": "ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n" }, }, - 'Ec2-brightbox': { - 'ds': 'Ec2', - 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'}, + "Ec2-brightbox": { + "ds": "Ec2", + "files": {P_PRODUCT_SERIAL: "srv-otuxg.gb1.brightbox.com\n"}, }, - 'Ec2-brightbox-negative': { - 'ds': 'Ec2', - 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'}, + "Ec2-brightbox-negative": { + "ds": "Ec2", + "files": {P_PRODUCT_SERIAL: "tricky-host.bobrightbox.com\n"}, }, - 'GCE': { - 'ds': 'GCE', - 'files': {P_PRODUCT_NAME: 'Google Compute Engine\n'}, - 'mocks': [MOCK_VIRT_IS_KVM], + "GCE": { + "ds": "GCE", + "files": {P_PRODUCT_NAME: "Google Compute Engine\n"}, + "mocks": [MOCK_VIRT_IS_KVM], }, - 'GCE-serial': { - 'ds': 'GCE', - 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'}, - 'mocks': [MOCK_VIRT_IS_KVM], + "GCE-serial": { + "ds": "GCE", + "files": {P_PRODUCT_SERIAL: "GoogleCloud-8f2e88f\n"}, + "mocks": [MOCK_VIRT_IS_KVM], }, - 'NoCloud': { - 'ds': 'NoCloud', - 'mocks': [ + "NoCloud": { + "ds": "NoCloud", + "mocks": [ MOCK_VIRT_IS_KVM, - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - BLKID_UEFI_UBUNTU + - [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])}, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + BLKID_UEFI_UBUNTU + + [ + { + "DEVNAME": "vdb", + "TYPE": "iso9660", + "LABEL": "cidata", + } + ] + ), + }, ], - 'files': { - 'dev/vdb': 'pretend iso content for cidata\n', - } + "files": { + "dev/vdb": "pretend iso content for cidata\n", + }, }, - 'NoCloud-fbsd': { - 'ds': 'NoCloud', - 'mocks': [ + "NoCloud-fbsd": { + "ds": "NoCloud", + "mocks": [ MOCK_VIRT_IS_KVM, MOCK_UNAME_IS_FREEBSD, - {'name': 'geom', 'ret': 0, - 'out': geom_out( - [{'DEVNAME': 'vtbd', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])}, + { + "name": "geom", + "ret": 0, + "out": geom_out( + [{"DEVNAME": "vtbd", "TYPE": "iso9660", "LABEL": "cidata"}] + ), + }, ], - 'files': { - '/dev/vtdb': 'pretend iso content for cidata\n', - } + "files": { + "/dev/vtdb": "pretend iso content for cidata\n", + }, }, - 'NoCloudUpper': { - 'ds': 'NoCloud', - 'mocks': [ + "NoCloudUpper": { + "ds": "NoCloud", + "mocks": [ MOCK_VIRT_IS_KVM, - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - BLKID_UEFI_UBUNTU + - [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + BLKID_UEFI_UBUNTU + + [ + { + "DEVNAME": "vdb", + "TYPE": "iso9660", + "LABEL": "CIDATA", + } + ] + ), + }, ], - 'files': { - 'dev/vdb': 'pretend iso content for cidata\n', - } + "files": { + "dev/vdb": "pretend iso content for cidata\n", + }, }, - 'NoCloud-fatboot': { - 'ds': 'NoCloud', - 'mocks': [ + "NoCloud-fatboot": { + "ds": "NoCloud", + "mocks": [ MOCK_VIRT_IS_XEN, - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - BLKID_UEFI_UBUNTU + - [{'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'SEC_TYPE': 'msdos', - 'UUID': '355a-4FC2', 'LABEL_FATBOOT': 'cidata'}])}, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + BLKID_UEFI_UBUNTU + + [ + { + "DEVNAME": "xvdb", + "TYPE": "vfat", + "SEC_TYPE": "msdos", + "UUID": "355a-4FC2", + "LABEL_FATBOOT": "cidata", + } + ] + ), + }, ], - 'files': { - 'dev/vdb': 'pretend iso content for cidata\n', - } + "files": { + "dev/vdb": "pretend iso content for cidata\n", + }, }, - 'NoCloud-seed': { - 'ds': 'NoCloud', - 'files': { - os.path.join(P_SEED_DIR, 'nocloud', 'user-data'): 'ud\n', - os.path.join(P_SEED_DIR, 'nocloud', 'meta-data'): 'md\n', - } + "NoCloud-seed": { + "ds": "NoCloud", + "files": { + os.path.join(P_SEED_DIR, "nocloud", "user-data"): "ud\n", + os.path.join(P_SEED_DIR, "nocloud", "meta-data"): "md\n", + }, }, - 'NoCloud-seed-ubuntu-core': { - 'ds': 'NoCloud', - 'files': { - os.path.join('writable/system-data', P_SEED_DIR, - 'nocloud-net', 'user-data'): 'ud\n', - os.path.join('writable/system-data', P_SEED_DIR, - 'nocloud-net', 'meta-data'): 'md\n', - } + "NoCloud-seed-ubuntu-core": { + "ds": "NoCloud", + "files": { + os.path.join( + "writable/system-data", P_SEED_DIR, "nocloud-net", "user-data" + ): "ud\n", + os.path.join( + "writable/system-data", P_SEED_DIR, "nocloud-net", "meta-data" + ): "md\n", + }, }, - 'OpenStack': { - 'ds': 'OpenStack', - 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'}, - 'mocks': [MOCK_VIRT_IS_KVM], - 'policy_dmi': POLICY_FOUND_ONLY, - 'policy_no_dmi': POLICY_FOUND_ONLY, + "OpenStack": { + "ds": "OpenStack", + "files": {P_PRODUCT_NAME: "OpenStack Nova\n"}, + "mocks": [MOCK_VIRT_IS_KVM], + "policy_dmi": POLICY_FOUND_ONLY, + "policy_no_dmi": POLICY_FOUND_ONLY, }, - 'OpenStack-OpenTelekom': { + "OpenStack-OpenTelekom": { # OTC gen1 (Xen) hosts use OpenStack datasource, LP: #1756471 - 'ds': 'OpenStack', - 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'}, - 'mocks': [MOCK_VIRT_IS_XEN], + "ds": "OpenStack", + "files": {P_CHASSIS_ASSET_TAG: "OpenTelekomCloud\n"}, + "mocks": [MOCK_VIRT_IS_XEN], }, - 'OpenStack-SAPCCloud': { + "OpenStack-SAPCCloud": { # SAP CCloud hosts use OpenStack on VMware - 'ds': 'OpenStack', - 'files': {P_CHASSIS_ASSET_TAG: 'SAP CCloud VM\n'}, - 'mocks': [MOCK_VIRT_IS_VMWARE], + "ds": "OpenStack", + "files": {P_CHASSIS_ASSET_TAG: "SAP CCloud VM\n"}, + "mocks": [MOCK_VIRT_IS_VMWARE], }, - 'OpenStack-AssetTag-Nova': { + "OpenStack-AssetTag-Nova": { # VMware vSphere can't modify product-name, LP: #1669875 - 'ds': 'OpenStack', - 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'}, - 'mocks': [MOCK_VIRT_IS_XEN], + "ds": "OpenStack", + "files": {P_CHASSIS_ASSET_TAG: "OpenStack Nova\n"}, + "mocks": [MOCK_VIRT_IS_XEN], }, - 'OpenStack-AssetTag-Compute': { + "OpenStack-AssetTag-Compute": { # VMware vSphere can't modify product-name, LP: #1669875 - 'ds': 'OpenStack', - 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Compute\n'}, - 'mocks': [MOCK_VIRT_IS_XEN], + "ds": "OpenStack", + "files": {P_CHASSIS_ASSET_TAG: "OpenStack Compute\n"}, + "mocks": [MOCK_VIRT_IS_XEN], }, - 'OVF-seed': { - 'ds': 'OVF', - 'files': { - os.path.join(P_SEED_DIR, 'ovf', 'ovf-env.xml'): 'present\n', - } + "OVF-seed": { + "ds": "OVF", + "files": { + os.path.join(P_SEED_DIR, "ovf", "ovf-env.xml"): "present\n", + }, }, - 'OVF-vmware-customization': { - 'ds': 'OVF', - 'mocks': [ + "OVF-vmware-customization": { + "ds": "OVF", + "mocks": [ # Include a mockes iso9660 potential, even though content not ovf - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) - }, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [{"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""}] + ), + }, MOCK_VIRT_IS_VMWARE, ], - 'files': { - 'dev/sr0': 'no match', + "files": { + "dev/sr0": "no match", # Setup vmware customization enabled - 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so': 'here', - 'etc/cloud/cloud.cfg': 'disable_vmware_customization: false\n', - } + "usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so": "here", + "etc/cloud/cloud.cfg": "disable_vmware_customization: false\n", + }, }, - 'OVF': { - 'ds': 'OVF', - 'mocks': [ - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}, - {'DEVNAME': 'sr1', 'TYPE': 'iso9660', 'LABEL': 'ignoreme'}, - {'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}]), - }, + "OVF": { + "ds": "OVF", + "mocks": [ + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + {"DEVNAME": "sr0", "TYPE": "iso9660", "LABEL": ""}, + { + "DEVNAME": "sr1", + "TYPE": "iso9660", + "LABEL": "ignoreme", + }, + { + "DEVNAME": "vda1", + "TYPE": "vfat", + "PARTUUID": uuid4(), + }, + ] + ), + }, MOCK_VIRT_IS_VMWARE, ], - 'files': { - 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n', - 'sys/class/block/sr0/size': '2048\n', - } + "files": { + "dev/sr0": "pretend ovf iso has " + OVF_MATCH_STRING + "\n", + "sys/class/block/sr0/size": "2048\n", + }, }, - 'OVF-guestinfo': { - 'ds': 'OVF', - 'mocks': [ - {'name': 'ovf_vmware_transport_guestinfo', 'ret': 0, - 'out': '<?xml version="1.0" encoding="UTF-8"?>\n<Environment'}, + "OVF-guestinfo": { + "ds": "OVF", + "mocks": [ + { + "name": "ovf_vmware_transport_guestinfo", + "ret": 0, + "out": '<?xml version="1.0" encoding="UTF-8"?>\n<Environment', + }, MOCK_VIRT_IS_VMWARE, ], }, - 'ConfigDrive': { - 'ds': 'ConfigDrive', - 'mocks': [ - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vda2', 'TYPE': 'ext4', - 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'config-2'}]) - }, + "ConfigDrive": { + "ds": "ConfigDrive", + "mocks": [ + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "vda1", + "TYPE": "vfat", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "vda2", + "TYPE": "ext4", + "LABEL": "cloudimg-rootfs", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "vdb", + "TYPE": "vfat", + "LABEL": "config-2", + }, + ] + ), + }, ], }, - 'ConfigDriveUpper': { - 'ds': 'ConfigDrive', - 'mocks': [ - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vda2', 'TYPE': 'ext4', - 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CONFIG-2'}]) - }, + "ConfigDriveUpper": { + "ds": "ConfigDrive", + "mocks": [ + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "vda1", + "TYPE": "vfat", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "vda2", + "TYPE": "ext4", + "LABEL": "cloudimg-rootfs", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "vdb", + "TYPE": "vfat", + "LABEL": "CONFIG-2", + }, + ] + ), + }, ], }, - 'ConfigDrive-seed': { - 'ds': 'ConfigDrive', - 'files': { - os.path.join(P_SEED_DIR, 'config_drive', 'openstack', - 'latest', 'meta_data.json'): 'md\n'}, + "ConfigDrive-seed": { + "ds": "ConfigDrive", + "files": { + os.path.join( + P_SEED_DIR, + "config_drive", + "openstack", + "latest", + "meta_data.json", + ): "md\n" + }, }, - 'RbxCloud': { - 'ds': 'RbxCloud', - 'mocks': [ - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vda2', 'TYPE': 'ext4', - 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}] - )}, + "RbxCloud": { + "ds": "RbxCloud", + "mocks": [ + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "vda1", + "TYPE": "vfat", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "vda2", + "TYPE": "ext4", + "LABEL": "cloudimg-rootfs", + "PARTUUID": uuid4(), + }, + {"DEVNAME": "vdb", "TYPE": "vfat", "LABEL": "CLOUDMD"}, + ] + ), + }, ], }, - 'RbxCloudLower': { - 'ds': 'RbxCloud', - 'mocks': [ - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vda2', 'TYPE': 'ext4', - 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, - {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'cloudmd'}] - )}, + "RbxCloudLower": { + "ds": "RbxCloud", + "mocks": [ + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "vda1", + "TYPE": "vfat", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "vda2", + "TYPE": "ext4", + "LABEL": "cloudimg-rootfs", + "PARTUUID": uuid4(), + }, + {"DEVNAME": "vdb", "TYPE": "vfat", "LABEL": "cloudmd"}, + ] + ), + }, ], }, - 'Hetzner': { - 'ds': 'Hetzner', - 'files': {P_SYS_VENDOR: 'Hetzner\n'}, + "Hetzner": { + "ds": "Hetzner", + "files": {P_SYS_VENDOR: "Hetzner\n"}, }, - 'Hetzner-kenv': { - 'ds': 'Hetzner', - 'mocks': [ + "Hetzner-kenv": { + "ds": "Hetzner", + "mocks": [ MOCK_UNAME_IS_FREEBSD, - {'name': 'get_kenv_field', 'ret': 0, 'RET': 'Hetzner'} + {"name": "get_kenv_field", "ret": 0, "RET": "Hetzner"}, ], }, - 'Hetzner-dmidecode': { - 'ds': 'Hetzner', - 'mocks': [ - {'name': 'dmi_decode', 'ret': 0, 'RET': 'Hetzner'} - ], + "Hetzner-dmidecode": { + "ds": "Hetzner", + "mocks": [{"name": "dmi_decode", "ret": 0, "RET": "Hetzner"}], }, - 'IBMCloud-metadata': { - 'ds': 'IBMCloud', - 'mocks': [ + "IBMCloud-metadata": { + "ds": "IBMCloud", + "mocks": [ MOCK_VIRT_IS_XEN, - {'name': 'is_ibm_provisioning', 'ret': shell_false}, - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, - {'DEVNAME': 'xvda2', 'TYPE': 'ext4', - 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, - {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'METADATA'}]), - }, + {"name": "is_ibm_provisioning", "ret": shell_false}, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "xvda1", + "TYPE": "vfat", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "xvda2", + "TYPE": "ext4", + "LABEL": "cloudimg-rootfs", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "xvdb", + "TYPE": "vfat", + "LABEL": "METADATA", + }, + ] + ), + }, ], }, - 'IBMCloud-config-2': { - 'ds': 'IBMCloud', - 'mocks': [ + "IBMCloud-config-2": { + "ds": "IBMCloud", + "mocks": [ MOCK_VIRT_IS_XEN, - {'name': 'is_ibm_provisioning', 'ret': shell_false}, - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), - 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'}, - {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2', - 'UUID': ds_ibm.IBM_CONFIG_UUID}, - {'DEVNAME': 'xvda2', 'TYPE': 'ext4', - 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(), - 'UUID': uuid4()}, - ]), - }, + {"name": "is_ibm_provisioning", "ret": shell_false}, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "xvda1", + "TYPE": "ext3", + "PARTUUID": uuid4(), + "UUID": uuid4(), + "LABEL": "cloudimg-bootfs", + }, + { + "DEVNAME": "xvdb", + "TYPE": "vfat", + "LABEL": "config-2", + "UUID": ds_ibm.IBM_CONFIG_UUID, + }, + { + "DEVNAME": "xvda2", + "TYPE": "ext4", + "LABEL": "cloudimg-rootfs", + "PARTUUID": uuid4(), + "UUID": uuid4(), + }, + ] + ), + }, ], }, - 'IBMCloud-nodisks': { - 'ds': 'IBMCloud', - 'mocks': [ + "IBMCloud-nodisks": { + "ds": "IBMCloud", + "mocks": [ MOCK_VIRT_IS_XEN, - {'name': 'is_ibm_provisioning', 'ret': shell_false}, - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, - {'DEVNAME': 'xvda2', 'TYPE': 'ext4', - 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}]), - }, + {"name": "is_ibm_provisioning", "ret": shell_false}, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "xvda1", + "TYPE": "vfat", + "PARTUUID": uuid4(), + }, + { + "DEVNAME": "xvda2", + "TYPE": "ext4", + "LABEL": "cloudimg-rootfs", + "PARTUUID": uuid4(), + }, + ] + ), + }, ], }, - 'Oracle': { - 'ds': 'Oracle', - 'files': { - P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + '\n', - } + "Oracle": { + "ds": "Oracle", + "files": { + P_CHASSIS_ASSET_TAG: ds_oracle.CHASSIS_ASSET_TAG + "\n", + }, }, - 'SmartOS-bhyve': { - 'ds': 'SmartOS', - 'mocks': [ + "SmartOS-bhyve": { + "ds": "SmartOS", + "mocks": [ MOCK_VIRT_IS_VM_OTHER, - {'name': 'blkid', 'ret': 0, - 'out': blkid_out( - [{'DEVNAME': 'vda1', 'TYPE': 'ext4', - 'PARTUUID': '49ec635a-01'}, - {'DEVNAME': 'vda2', 'TYPE': 'swap', - 'LABEL': 'cloudimg-swap', 'PARTUUID': '49ec635a-02'}]), - }, + { + "name": "blkid", + "ret": 0, + "out": blkid_out( + [ + { + "DEVNAME": "vda1", + "TYPE": "ext4", + "PARTUUID": "49ec635a-01", + }, + { + "DEVNAME": "vda2", + "TYPE": "swap", + "LABEL": "cloudimg-swap", + "PARTUUID": "49ec635a-02", + }, + ] + ), + }, ], - 'files': {P_PRODUCT_NAME: 'SmartDC HVM\n'}, + "files": {P_PRODUCT_NAME: "SmartDC HVM\n"}, }, - 'SmartOS-lxbrand': { - 'ds': 'SmartOS', - 'mocks': [ + "SmartOS-lxbrand": { + "ds": "SmartOS", + "mocks": [ MOCK_VIRT_IS_CONTAINER_OTHER, - {'name': 'uname', 'ret': 0, - 'out': ("Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 " - "BrandZ virtual linux x86_64 GNU/Linux")}, - {'name': 'blkid', 'ret': 2, 'out': ''}, + { + "name": "uname", + "ret": 0, + "out": ( + "Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 " + "BrandZ virtual linux x86_64 GNU/Linux" + ), + }, + {"name": "blkid", "ret": 2, "out": ""}, ], - 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, + "files": {ds_smartos.METADATA_SOCKFILE: "would be a socket\n"}, }, - 'Ec2-ZStack': { - 'ds': 'Ec2', - 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, + "Ec2-ZStack": { + "ds": "Ec2", + "files": {P_CHASSIS_ASSET_TAG: "123456.zstack.io\n"}, }, - 'Ec2-E24Cloud': { - 'ds': 'Ec2', - 'files': {P_SYS_VENDOR: 'e24cloud\n'}, + "Ec2-E24Cloud": { + "ds": "Ec2", + "files": {P_SYS_VENDOR: "e24cloud\n"}, }, - 'Ec2-E24Cloud-negative': { - 'ds': 'Ec2', - 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, + "Ec2-E24Cloud-negative": { + "ds": "Ec2", + "files": {P_SYS_VENDOR: "e24cloudyday\n"}, }, - 'VMware-NoValidTransports': { - 'ds': 'VMware', - 'mocks': [ + "VMware-NoValidTransports": { + "ds": "VMware", + "mocks": [ MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-EnvVar-NoData': { - 'ds': 'VMware', - 'mocks': [ + "VMware-EnvVar-NoData": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_envvar_vmx_guestinfo', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_metadata", + "ret": 1, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_vendordata", + "ret": 1, }, MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-EnvVar-NoVirtID': { - 'ds': 'VMware', - 'mocks': [ + "VMware-EnvVar-NoVirtID": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_envvar_vmx_guestinfo', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo_metadata", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_vendordata", + "ret": 1, }, ], }, - 'VMware-EnvVar-Metadata': { - 'ds': 'VMware', - 'mocks': [ + "VMware-EnvVar-Metadata": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_envvar_vmx_guestinfo', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo_metadata", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_vendordata", + "ret": 1, }, MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-EnvVar-Userdata': { - 'ds': 'VMware', - 'mocks': [ + "VMware-EnvVar-Userdata": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_envvar_vmx_guestinfo', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_metadata", + "ret": 1, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo_userdata", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_vendordata", + "ret": 1, }, MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-EnvVar-Vendordata': { - 'ds': 'VMware', - 'mocks': [ + "VMware-EnvVar-Vendordata": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_envvar_vmx_guestinfo', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo", + "ret": 0, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_metadata", + "ret": 1, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', - 'ret': 1, + "name": "vmware_has_envvar_vmx_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', - 'ret': 0, + "name": "vmware_has_envvar_vmx_guestinfo_vendordata", + "ret": 0, }, MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-GuestInfo-NoData': { - 'ds': 'VMware', - 'mocks': [ + "VMware-GuestInfo-NoData": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_rpctool', - 'ret': 0, - 'out': '/usr/bin/vmware-rpctool', + "name": "vmware_has_rpctool", + "ret": 0, + "out": "/usr/bin/vmware-rpctool", }, { - 'name': 'vmware_rpctool_guestinfo_metadata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_metadata", + "ret": 1, }, { - 'name': 'vmware_rpctool_guestinfo_userdata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_rpctool_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_vendordata", + "ret": 1, }, MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-GuestInfo-NoVirtID': { - 'ds': 'VMware', - 'mocks': [ + "VMware-GuestInfo-NoVirtID": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_rpctool', - 'ret': 0, - 'out': '/usr/bin/vmware-rpctool', + "name": "vmware_has_rpctool", + "ret": 0, + "out": "/usr/bin/vmware-rpctool", }, { - 'name': 'vmware_rpctool_guestinfo_metadata', - 'ret': 0, - 'out': '---', + "name": "vmware_rpctool_guestinfo_metadata", + "ret": 0, + "out": "---", }, { - 'name': 'vmware_rpctool_guestinfo_userdata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_rpctool_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_vendordata", + "ret": 1, }, ], }, - 'VMware-GuestInfo-Metadata': { - 'ds': 'VMware', - 'mocks': [ + "VMware-GuestInfo-Metadata": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_rpctool', - 'ret': 0, - 'out': '/usr/bin/vmware-rpctool', + "name": "vmware_has_rpctool", + "ret": 0, + "out": "/usr/bin/vmware-rpctool", }, { - 'name': 'vmware_rpctool_guestinfo_metadata', - 'ret': 0, - 'out': '---', + "name": "vmware_rpctool_guestinfo_metadata", + "ret": 0, + "out": "---", }, { - 'name': 'vmware_rpctool_guestinfo_userdata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_rpctool_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_vendordata", + "ret": 1, }, MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-GuestInfo-Userdata': { - 'ds': 'VMware', - 'mocks': [ + "VMware-GuestInfo-Userdata": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_rpctool', - 'ret': 0, - 'out': '/usr/bin/vmware-rpctool', + "name": "vmware_has_rpctool", + "ret": 0, + "out": "/usr/bin/vmware-rpctool", }, { - 'name': 'vmware_rpctool_guestinfo_metadata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_metadata", + "ret": 1, }, { - 'name': 'vmware_rpctool_guestinfo_userdata', - 'ret': 0, - 'out': '---', + "name": "vmware_rpctool_guestinfo_userdata", + "ret": 0, + "out": "---", }, { - 'name': 'vmware_rpctool_guestinfo_vendordata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_vendordata", + "ret": 1, }, MOCK_VIRT_IS_VMWARE, ], }, - 'VMware-GuestInfo-Vendordata': { - 'ds': 'VMware', - 'mocks': [ + "VMware-GuestInfo-Vendordata": { + "ds": "VMware", + "mocks": [ { - 'name': 'vmware_has_rpctool', - 'ret': 0, - 'out': '/usr/bin/vmware-rpctool', + "name": "vmware_has_rpctool", + "ret": 0, + "out": "/usr/bin/vmware-rpctool", }, { - 'name': 'vmware_rpctool_guestinfo_metadata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_metadata", + "ret": 1, }, { - 'name': 'vmware_rpctool_guestinfo_userdata', - 'ret': 1, + "name": "vmware_rpctool_guestinfo_userdata", + "ret": 1, }, { - 'name': 'vmware_rpctool_guestinfo_vendordata', - 'ret': 0, - 'out': '---', + "name": "vmware_rpctool_guestinfo_vendordata", + "ret": 0, + "out": "---", }, MOCK_VIRT_IS_VMWARE, ], diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index e8e0b5b1..f447d295 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -2,178 +2,276 @@ import httpretty as hp -from tests.unittests import helpers - from cloudinit import ec2_utils as eu from cloudinit import url_helper as uh +from tests.unittests import helpers class TestEc2Util(helpers.HttprettyTestCase): - VERSION = 'latest' + VERSION = "latest" def test_userdata_fetch(self): - hp.register_uri(hp.GET, - 'http://169.254.169.254/%s/user-data' % (self.VERSION), - body='stuff', - status=200) + hp.register_uri( + hp.GET, + "http://169.254.169.254/%s/user-data" % (self.VERSION), + body="stuff", + status=200, + ) userdata = eu.get_instance_userdata(self.VERSION) - self.assertEqual('stuff', userdata.decode('utf-8')) + self.assertEqual("stuff", userdata.decode("utf-8")) def test_userdata_fetch_fail_not_found(self): - hp.register_uri(hp.GET, - 'http://169.254.169.254/%s/user-data' % (self.VERSION), - status=404) + hp.register_uri( + hp.GET, + "http://169.254.169.254/%s/user-data" % (self.VERSION), + status=404, + ) userdata = eu.get_instance_userdata(self.VERSION, retries=0) - self.assertEqual('', userdata) + self.assertEqual("", userdata) def test_userdata_fetch_fail_server_dead(self): - hp.register_uri(hp.GET, - 'http://169.254.169.254/%s/user-data' % (self.VERSION), - status=500) + hp.register_uri( + hp.GET, + "http://169.254.169.254/%s/user-data" % (self.VERSION), + status=500, + ) userdata = eu.get_instance_userdata(self.VERSION, retries=0) - self.assertEqual('', userdata) + self.assertEqual("", userdata) def test_userdata_fetch_fail_server_not_found(self): - hp.register_uri(hp.GET, - 'http://169.254.169.254/%s/user-data' % (self.VERSION), - status=404) + hp.register_uri( + hp.GET, + "http://169.254.169.254/%s/user-data" % (self.VERSION), + status=404, + ) userdata = eu.get_instance_userdata(self.VERSION) - self.assertEqual('', userdata) + self.assertEqual("", userdata) def test_metadata_fetch_no_keys(self): - base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) - hp.register_uri(hp.GET, base_url, status=200, - body="\n".join(['hostname', - 'instance-id', - 'ami-launch-index'])) - hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'), - status=200, body='ec2.fake.host.name.com') - hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), - status=200, body='123') - hp.register_uri(hp.GET, uh.combine_url(base_url, 'ami-launch-index'), - status=200, body='1') + base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION) + hp.register_uri( + hp.GET, + base_url, + status=200, + body="\n".join(["hostname", "instance-id", "ami-launch-index"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "hostname"), + status=200, + body="ec2.fake.host.name.com", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "instance-id"), + status=200, + body="123", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "ami-launch-index"), + status=200, + body="1", + ) md = eu.get_instance_metadata(self.VERSION, retries=0) - self.assertEqual(md['hostname'], 'ec2.fake.host.name.com') - self.assertEqual(md['instance-id'], '123') - self.assertEqual(md['ami-launch-index'], '1') + self.assertEqual(md["hostname"], "ec2.fake.host.name.com") + self.assertEqual(md["instance-id"], "123") + self.assertEqual(md["ami-launch-index"], "1") def test_metadata_fetch_key(self): - base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) - hp.register_uri(hp.GET, base_url, status=200, - body="\n".join(['hostname', - 'instance-id', - 'public-keys/'])) - hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'), - status=200, body='ec2.fake.host.name.com') - hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), - status=200, body='123') - hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'), - status=200, body='0=my-public-key') - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'public-keys/0/openssh-key'), - status=200, body='ssh-rsa AAAA.....wZEf my-public-key') + base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION) + hp.register_uri( + hp.GET, + base_url, + status=200, + body="\n".join(["hostname", "instance-id", "public-keys/"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "hostname"), + status=200, + body="ec2.fake.host.name.com", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "instance-id"), + status=200, + body="123", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "public-keys/"), + status=200, + body="0=my-public-key", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "public-keys/0/openssh-key"), + status=200, + body="ssh-rsa AAAA.....wZEf my-public-key", + ) md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) - self.assertEqual(md['hostname'], 'ec2.fake.host.name.com') - self.assertEqual(md['instance-id'], '123') - self.assertEqual(1, len(md['public-keys'])) + self.assertEqual(md["hostname"], "ec2.fake.host.name.com") + self.assertEqual(md["instance-id"], "123") + self.assertEqual(1, len(md["public-keys"])) def test_metadata_fetch_with_2_keys(self): - base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) - hp.register_uri(hp.GET, base_url, status=200, - body="\n".join(['hostname', - 'instance-id', - 'public-keys/'])) - hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'), - status=200, body='ec2.fake.host.name.com') - hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), - status=200, body='123') - hp.register_uri(hp.GET, uh.combine_url(base_url, 'public-keys/'), - status=200, - body="\n".join(['0=my-public-key', '1=my-other-key'])) - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'public-keys/0/openssh-key'), - status=200, body='ssh-rsa AAAA.....wZEf my-public-key') - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'public-keys/1/openssh-key'), - status=200, body='ssh-rsa AAAA.....wZEf my-other-key') + base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION) + hp.register_uri( + hp.GET, + base_url, + status=200, + body="\n".join(["hostname", "instance-id", "public-keys/"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "hostname"), + status=200, + body="ec2.fake.host.name.com", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "instance-id"), + status=200, + body="123", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "public-keys/"), + status=200, + body="\n".join(["0=my-public-key", "1=my-other-key"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "public-keys/0/openssh-key"), + status=200, + body="ssh-rsa AAAA.....wZEf my-public-key", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "public-keys/1/openssh-key"), + status=200, + body="ssh-rsa AAAA.....wZEf my-other-key", + ) md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) - self.assertEqual(md['hostname'], 'ec2.fake.host.name.com') - self.assertEqual(md['instance-id'], '123') - self.assertEqual(2, len(md['public-keys'])) + self.assertEqual(md["hostname"], "ec2.fake.host.name.com") + self.assertEqual(md["instance-id"], "123") + self.assertEqual(2, len(md["public-keys"])) def test_metadata_fetch_bdm(self): - base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) - hp.register_uri(hp.GET, base_url, status=200, - body="\n".join(['hostname', - 'instance-id', - 'block-device-mapping/'])) - hp.register_uri(hp.GET, uh.combine_url(base_url, 'hostname'), - status=200, body='ec2.fake.host.name.com') - hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), - status=200, body='123') - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'block-device-mapping/'), - status=200, - body="\n".join(['ami', 'ephemeral0'])) - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'block-device-mapping/ami'), - status=200, - body="sdb") - hp.register_uri(hp.GET, - uh.combine_url(base_url, - 'block-device-mapping/ephemeral0'), - status=200, - body="sdc") + base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION) + hp.register_uri( + hp.GET, + base_url, + status=200, + body="\n".join( + ["hostname", "instance-id", "block-device-mapping/"] + ), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "hostname"), + status=200, + body="ec2.fake.host.name.com", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "instance-id"), + status=200, + body="123", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "block-device-mapping/"), + status=200, + body="\n".join(["ami", "ephemeral0"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "block-device-mapping/ami"), + status=200, + body="sdb", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "block-device-mapping/ephemeral0"), + status=200, + body="sdc", + ) md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) - self.assertEqual(md['hostname'], 'ec2.fake.host.name.com') - self.assertEqual(md['instance-id'], '123') - bdm = md['block-device-mapping'] + self.assertEqual(md["hostname"], "ec2.fake.host.name.com") + self.assertEqual(md["instance-id"], "123") + bdm = md["block-device-mapping"] self.assertEqual(2, len(bdm)) - self.assertEqual(bdm['ami'], 'sdb') - self.assertEqual(bdm['ephemeral0'], 'sdc') + self.assertEqual(bdm["ami"], "sdb") + self.assertEqual(bdm["ephemeral0"], "sdc") def test_metadata_no_security_credentials(self): - base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) - hp.register_uri(hp.GET, base_url, status=200, - body="\n".join(['instance-id', - 'iam/'])) - hp.register_uri(hp.GET, uh.combine_url(base_url, 'instance-id'), - status=200, body='i-0123451689abcdef0') - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'iam/'), - status=200, - body="\n".join(['info/', 'security-credentials/'])) - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'iam/info/'), - status=200, - body='LastUpdated') - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'iam/info/LastUpdated'), - status=200, body='2016-10-27T17:29:39Z') - hp.register_uri(hp.GET, - uh.combine_url(base_url, 'iam/security-credentials/'), - status=200, - body='ReadOnly/') - hp.register_uri(hp.GET, - uh.combine_url(base_url, - 'iam/security-credentials/ReadOnly/'), - status=200, - body="\n".join(['LastUpdated', 'Expiration'])) - hp.register_uri(hp.GET, - uh.combine_url( - base_url, - 'iam/security-credentials/ReadOnly/LastUpdated'), - status=200, body='2016-10-27T17:28:17Z') - hp.register_uri(hp.GET, - uh.combine_url( - base_url, - 'iam/security-credentials/ReadOnly/Expiration'), - status=200, body='2016-10-28T00:00:34Z') + base_url = "http://169.254.169.254/%s/meta-data/" % (self.VERSION) + hp.register_uri( + hp.GET, + base_url, + status=200, + body="\n".join(["instance-id", "iam/"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "instance-id"), + status=200, + body="i-0123451689abcdef0", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "iam/"), + status=200, + body="\n".join(["info/", "security-credentials/"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "iam/info/"), + status=200, + body="LastUpdated", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "iam/info/LastUpdated"), + status=200, + body="2016-10-27T17:29:39Z", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "iam/security-credentials/"), + status=200, + body="ReadOnly/", + ) + hp.register_uri( + hp.GET, + uh.combine_url(base_url, "iam/security-credentials/ReadOnly/"), + status=200, + body="\n".join(["LastUpdated", "Expiration"]), + ) + hp.register_uri( + hp.GET, + uh.combine_url( + base_url, "iam/security-credentials/ReadOnly/LastUpdated" + ), + status=200, + body="2016-10-27T17:28:17Z", + ) + hp.register_uri( + hp.GET, + uh.combine_url( + base_url, "iam/security-credentials/ReadOnly/Expiration" + ), + status=200, + body="2016-10-28T00:00:34Z", + ) md = eu.get_instance_metadata(self.VERSION, retries=0, timeout=0.1) - self.assertEqual(md['instance-id'], 'i-0123451689abcdef0') - iam = md['iam'] + self.assertEqual(md["instance-id"], "i-0123451689abcdef0") + iam = md["iam"] self.assertEqual(1, len(iam)) - self.assertEqual(iam['info']['LastUpdated'], '2016-10-27T17:29:39Z') - self.assertNotIn('security-credentials', iam) + self.assertEqual(iam["info"]["LastUpdated"], "2016-10-27T17:29:39Z") + self.assertNotIn("security-credentials", iam) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_event.py b/tests/unittests/test_event.py index 3da4c70c..2ea91bb2 100644 --- a/tests/unittests/test_event.py +++ b/tests/unittests/test_event.py @@ -1,26 +1,26 @@ # This file is part of cloud-init. See LICENSE file for license information. """Tests related to cloudinit.event module.""" -from cloudinit.event import EventType, EventScope, userdata_to_events +from cloudinit.event import EventScope, EventType, userdata_to_events class TestEvent: def test_userdata_to_events(self): - userdata = {'network': {'when': ['boot']}} + userdata = {"network": {"when": ["boot"]}} expected = {EventScope.NETWORK: {EventType.BOOT}} assert expected == userdata_to_events(userdata) def test_invalid_scope(self, caplog): - userdata = {'networkasdfasdf': {'when': ['boot']}} + userdata = {"networkasdfasdf": {"when": ["boot"]}} userdata_to_events(userdata) assert ( "'networkasdfasdf' is not a valid EventScope! Update data " - "will be ignored for 'networkasdfasdf' scope" - ) in caplog.text + "will be ignored for 'networkasdfasdf' scope" in caplog.text + ) def test_invalid_event(self, caplog): - userdata = {'network': {'when': ['bootasdfasdf']}} + userdata = {"network": {"when": ["bootasdfasdf"]}} userdata_to_events(userdata) assert ( "'bootasdfasdf' is not a valid EventType! Update data " - "will be ignored for 'network' scope" - ) in caplog.text + "will be ignored for 'network' scope" in caplog.text + ) diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py index d7a7226d..141de55b 100644 --- a/tests/unittests/test_features.py +++ b/tests/unittests/test_features.py @@ -4,10 +4,11 @@ This file is for testing the feature flag functionality itself, NOT for testing any individual feature flag """ -import pytest import sys from pathlib import Path +import pytest + import cloudinit @@ -24,37 +25,44 @@ def create_override(request): features and feature_overrides modules to how they were before the test started """ - override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py' + override_path = Path(cloudinit.__file__).parent / "feature_overrides.py" if override_path.exists(): - raise Exception("feature_overrides.py unexpectedly exists! " - "Remove it to run this test.") - with override_path.open('w') as f: + raise Exception( + "feature_overrides.py unexpectedly exists! " + "Remove it to run this test." + ) + with override_path.open("w") as f: for key, value in request.param.items(): - f.write('{} = {}\n'.format(key, value)) + f.write("{} = {}\n".format(key, value)) - sys.modules.pop('cloudinit.features', None) + sys.modules.pop("cloudinit.features", None) yield override_path.unlink() - sys.modules.pop('cloudinit.feature_overrides', None) + sys.modules.pop("cloudinit.feature_overrides", None) class TestFeatures: def test_feature_without_override(self): from cloudinit.features import ERROR_ON_USER_DATA_FAILURE + assert ERROR_ON_USER_DATA_FAILURE is True - @pytest.mark.parametrize('create_override', - [{'ERROR_ON_USER_DATA_FAILURE': False}], - indirect=True) + @pytest.mark.parametrize( + "create_override", + [{"ERROR_ON_USER_DATA_FAILURE": False}], + indirect=True, + ) def test_feature_with_override(self, create_override): from cloudinit.features import ERROR_ON_USER_DATA_FAILURE + assert ERROR_ON_USER_DATA_FAILURE is False - @pytest.mark.parametrize('create_override', - [{'SPAM': True}], - indirect=True) + @pytest.mark.parametrize( + "create_override", [{"SPAM": True}], indirect=True + ) def test_feature_only_in_override(self, create_override): from cloudinit.features import SPAM + assert SPAM is True diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py index ceada49a..c3772e3f 100644 --- a/tests/unittests/test_gpg.py +++ b/tests/unittests/test_gpg.py @@ -1,12 +1,11 @@ -import pytest from unittest import mock -from cloudinit import gpg -from cloudinit import subp +import pytest +from cloudinit import gpg, subp from tests.unittests.helpers import CiTestCase -TEST_KEY_HUMAN = ''' +TEST_KEY_HUMAN = """ /etc/apt/cloud-init.gpg.d/my_key.gpg -------------------------------------------- pub rsa4096 2021-10-22 [SC] @@ -14,9 +13,9 @@ pub rsa4096 2021-10-22 [SC] uid [ unknown] Brett Holman <brett.holman@canonical.com> sub rsa4096 2021-10-22 [A] sub rsa4096 2021-10-22 [E] -''' +""" -TEST_KEY_MACHINE = ''' +TEST_KEY_MACHINE = """ tru::1:1635129362:0:3:1:5 pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: @@ -26,13 +25,13 @@ sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: -''' +""" -TEST_KEY_FINGERPRINT_HUMAN = \ - '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' +TEST_KEY_FINGERPRINT_HUMAN = ( + "3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85" +) -TEST_KEY_FINGERPRINT_MACHINE = \ - '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' +TEST_KEY_FINGERPRINT_MACHINE = "3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85" class TestGPGCommands: @@ -41,45 +40,45 @@ class TestGPGCommands: internally. """ with mock.patch.object( - subp, - 'subp', - side_effect=subp.ProcessExecutionError): + subp, "subp", side_effect=subp.ProcessExecutionError + ): with pytest.raises(subp.ProcessExecutionError): - gpg.dearmor('garbage key value') + gpg.dearmor("garbage key value") def test_gpg_list_args(self): - """Verify correct command gets called to list keys - """ + """Verify correct command gets called to list keys""" no_colons = [ - 'gpg', - '--with-fingerprint', - '--no-default-keyring', - '--list-keys', - '--keyring', - 'key'] + "gpg", + "--with-fingerprint", + "--no-default-keyring", + "--list-keys", + "--keyring", + "key", + ] colons = [ - 'gpg', - '--with-fingerprint', - '--no-default-keyring', - '--list-keys', - '--keyring', - '--with-colons', - 'key'] - with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp: - gpg.list('key') + "gpg", + "--with-fingerprint", + "--no-default-keyring", + "--list-keys", + "--keyring", + "--with-colons", + "key", + ] + with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp: + gpg.list("key") assert mock.call(colons, capture=True) == m_subp.call_args - gpg.list('key', human_output=True) + gpg.list("key", human_output=True) test_calls = mock.call((no_colons), capture=True) assert test_calls == m_subp.call_args def test_gpg_dearmor_args(self): - """Verify correct command gets called to dearmor keys - """ - with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp: - gpg.dearmor('key') + """Verify correct command gets called to dearmor keys""" + with mock.patch.object(subp, "subp", return_value=("", "")) as m_subp: + gpg.dearmor("key") test_call = mock.call( - ["gpg", "--dearmor"], data='key', decode=False) + ["gpg", "--dearmor"], data="key", decode=False + ) assert test_call == m_subp.call_args @mock.patch("cloudinit.gpg.time.sleep") @@ -91,18 +90,21 @@ class TestGPGCommands: """retry should be done on gpg receive keys failure.""" retries = (1, 2, 4) my_exc = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) - m_subp.side_effect = (my_exc, my_exc, ('', '')) + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) + m_subp.side_effect = (my_exc, my_exc, ("", "")) gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) self.assertEqual( - [mock.call(1), mock.call(2)], m_sleep.call_args_list) + [mock.call(1), mock.call(2)], m_sleep.call_args_list + ) def test_raises_error_after_retries(self, m_subp, m_sleep): """If the final run fails, error should be raised.""" naplen = 1 keyid, keyserver = ("ABCD", "keyserver.example.com") m_subp.side_effect = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) with self.assertRaises(ValueError) as rcm: gpg.recv_key(keyid, keyserver, retries=(naplen,)) self.assertIn(keyid, str(rcm.exception)) @@ -112,7 +114,8 @@ class TestGPGCommands: def test_no_retries_on_none(self, m_subp, m_sleep): """retry should not be done if retries is None.""" m_subp.side_effect = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) + stdout="", stderr="", exit_code=2, cmd=["mycmd"] + ) with self.assertRaises(ValueError): gpg.recv_key("ABCD", "keyserver.example.com", retries=None) m_sleep.assert_not_called() @@ -121,10 +124,16 @@ class TestGPGCommands: """Verify gpg is called with expected args.""" key, keyserver = ("DEADBEEF", "keyserver.example.com") retries = (1, 2, 4) - m_subp.return_value = ('', '') + m_subp.return_value = ("", "") gpg.recv_key(key, keyserver, retries=retries) m_subp.assert_called_once_with( - ['gpg', '--no-tty', - '--keyserver=%s' % keyserver, '--recv-keys', key], - capture=True) + [ + "gpg", + "--no-tty", + "--keyserver=%s" % keyserver, + "--recv-keys", + key, + ], + capture=True, + ) m_sleep.assert_not_called() diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index f491f8cd..69291597 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -5,9 +5,8 @@ import os from pathlib import Path -from tests.unittests import helpers as test_helpers - from cloudinit import sources +from tests.unittests import helpers as test_helpers class MyDataSource(sources.DataSource): @@ -25,8 +24,9 @@ class TestPaths(test_helpers.ResourceUsingTestCase): mypaths = self.getCloudPaths(myds) self.assertEqual( - os.path.join(mypaths.cloud_dir, 'instances', safe_iid), - mypaths.get_ipath()) + os.path.join(mypaths.cloud_dir, "instances", safe_iid), + mypaths.get_ipath(), + ) def test_get_ipath_and_empty_instance_id_returns_none(self): myds = MyDataSource(sys_cfg={}, distro=None, paths={}) @@ -52,8 +52,7 @@ class Testcloud_init_project_dir: def test_top_level_dir(self): """Assert the location of the top project directory is correct""" - assert (self.top_dir == - self._get_top_level_dir_alt_implementation()) + assert self.top_dir == self._get_top_level_dir_alt_implementation() def test_cloud_init_project_dir(self): """Assert cloud_init_project_dir produces an expected location diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index 3d1b9582..87c69dbb 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -13,7 +13,6 @@ from tests.unittests.helpers import CiTestCase class TestCloudInitLogger(CiTestCase): - def setUp(self): # set up a logger like cloud-init does in setupLogging, but instead # of sys.stderr, we'll plug in a StringIO() object so we can see @@ -26,7 +25,7 @@ class TestCloudInitLogger(CiTestCase): console.setLevel(ci_logging.DEBUG) self.ci_root.addHandler(console) self.ci_root.setLevel(ci_logging.DEBUG) - self.LOG = logging.getLogger('test_cloudinit_logger') + self.LOG = logging.getLogger("test_cloudinit_logger") def test_logger_uses_gmtime(self): """Test that log message have timestamp in UTC (gmtime)""" @@ -43,15 +42,16 @@ class TestCloudInitLogger(CiTestCase): # utc_after : 2017-08-23 14:19:43.570064 utc_before = datetime.datetime.utcnow() - datetime.timedelta(0, 0.5) - self.LOG.error('Test message') + self.LOG.error("Test message") utc_after = datetime.datetime.utcnow() + datetime.timedelta(0, 0.5) # extract timestamp from log: # 2017-08-23 14:19:43,069 - test_log.py[ERROR]: Test message logstr = self.ci_logs.getvalue().splitlines()[0] - timestampstr = logstr.split(' - ')[0] - parsed_dt = datetime.datetime.strptime(timestampstr, - CLOUD_INIT_ASCTIME_FMT) + timestampstr = logstr.split(" - ")[0] + parsed_dt = datetime.datetime.strptime( + timestampstr, CLOUD_INIT_ASCTIME_FMT + ) self.assertLess(utc_before, parsed_dt) self.assertLess(parsed_dt, utc_after) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 48ab6602..cf484dda 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -1,13 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. -from tests.unittests import helpers - -from cloudinit.handlers import cloud_config -from cloudinit.handlers import (CONTENT_START, CONTENT_END) - -from cloudinit import helpers as c_helpers -from cloudinit import util - import collections import glob import os @@ -15,6 +7,11 @@ import random import re import string +from cloudinit import helpers as c_helpers +from cloudinit import util +from cloudinit.handlers import CONTENT_END, CONTENT_START, cloud_config +from tests.unittests import helpers + SOURCE_PAT = "source*.*yaml" EXPECTED_PAT = "expected%s.yaml" TYPES = [dict, str, list, tuple, None, int] @@ -43,7 +40,7 @@ def _old_mergemanydict(*args): def _random_str(rand): - base = '' + base = "" for _i in range(rand.randint(1, 2 ** 8)): base += rand.choice(string.ascii_letters + string.digits) return base @@ -98,7 +95,7 @@ def make_dict(max_depth, seed=None): class TestSimpleRun(helpers.ResourceUsingTestCase): def _load_merge_files(self): - merge_root = helpers.resourceLocation('merge_sources') + merge_root = helpers.resourceLocation("merge_sources") tests = [] source_ids = collections.defaultdict(list) expected_files = {} @@ -106,8 +103,9 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): base_fn = os.path.basename(fn) file_id = re.match(r"source(\d+)\-(\d+)[.]yaml", base_fn) if not file_id: - raise IOError("File %s does not have a numeric identifier" - % (fn)) + raise IOError( + "File %s does not have a numeric identifier" % (fn) + ) file_id = int(file_id.group(1)) source_ids[file_id].append(fn) expected_fn = os.path.join(merge_root, EXPECTED_PAT % (file_id)) @@ -141,29 +139,31 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): cc_handler = cloud_config.CloudConfigPartHandler(paths) cc_handler.cloud_fn = None for (payloads, (expected_merge, expected_fn)) in tests: - cc_handler.handle_part(None, CONTENT_START, None, - None, None, None) + cc_handler.handle_part(None, CONTENT_START, None, None, None, None) merging_fns = [] for (fn, contents) in payloads: - cc_handler.handle_part(None, None, "%s.yaml" % (fn), - contents, None, {}) + cc_handler.handle_part( + None, None, "%s.yaml" % (fn), contents, None, {} + ) merging_fns.append(fn) merged_buf = cc_handler.cloud_buf - cc_handler.handle_part(None, CONTENT_END, None, - None, None, None) + cc_handler.handle_part(None, CONTENT_END, None, None, None, None) fail_msg = "Equality failure on checking %s with %s: %s != %s" - fail_msg = fail_msg % (expected_fn, - ",".join(merging_fns), merged_buf, - expected_merge) + fail_msg = fail_msg % ( + expected_fn, + ",".join(merging_fns), + merged_buf, + expected_merge, + ) self.assertEqual(expected_merge, merged_buf, msg=fail_msg) def test_compat_merges_dict(self): a = { - '1': '2', - 'b': 'c', + "1": "2", + "b": "c", } b = { - 'b': 'e', + "b": "e", } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) @@ -171,53 +171,53 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): def test_compat_merges_dict2(self): a = { - 'Blah': 1, - 'Blah2': 2, - 'Blah3': 3, + "Blah": 1, + "Blah2": 2, + "Blah3": 3, } b = { - 'Blah': 1, - 'Blah2': 2, - 'Blah3': [1], + "Blah": 1, + "Blah2": 2, + "Blah3": [1], } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d) def test_compat_merges_list(self): - a = {'b': [1, 2, 3]} - b = {'b': [4, 5]} - c = {'b': [6, 7]} + a = {"b": [1, 2, 3]} + b = {"b": [4, 5]} + c = {"b": [6, 7]} e = _old_mergemanydict(a, b, c) f = util.mergemanydict([a, b, c]) self.assertEqual(e, f) def test_compat_merges_str(self): - a = {'b': "hi"} - b = {'b': "howdy"} - c = {'b': "hallo"} + a = {"b": "hi"} + b = {"b": "howdy"} + c = {"b": "hallo"} e = _old_mergemanydict(a, b, c) f = util.mergemanydict([a, b, c]) self.assertEqual(e, f) def test_compat_merge_sub_dict(self): a = { - '1': '2', - 'b': { - 'f': 'g', - 'e': 'c', - 'h': 'd', - 'hh': { - '1': 2, + "1": "2", + "b": { + "f": "g", + "e": "c", + "h": "d", + "hh": { + "1": 2, }, - } + }, } b = { - 'b': { - 'e': 'c', - 'hh': { - '3': 4, - } + "b": { + "e": "c", + "hh": { + "3": 4, + }, } } c = _old_mergedict(a, b) @@ -226,14 +226,14 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): def test_compat_merge_sub_dict2(self): a = { - '1': '2', - 'b': { - 'f': 'g', - } + "1": "2", + "b": { + "f": "g", + }, } b = { - 'b': { - 'e': 'c', + "b": { + "e": "c", } } c = _old_mergedict(a, b) @@ -242,18 +242,19 @@ class TestSimpleRun(helpers.ResourceUsingTestCase): def test_compat_merge_sub_list(self): a = { - '1': '2', - 'b': { - 'f': ['1'], - } + "1": "2", + "b": { + "f": ["1"], + }, } b = { - 'b': { - 'f': [], + "b": { + "f": [], } } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b5c38c55..66a47b0f 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,20 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import net -from cloudinit import distros -from cloudinit.net import cmdline -from cloudinit.net import ( - eni, interface_has_own_mac, natural_sort_key, netplan, network_state, - renderers, sysconfig, networkd) -from cloudinit.sources.helpers import openstack -from cloudinit import temp_utils -from cloudinit import subp -from cloudinit import util -from cloudinit import safeyaml as yaml - -from tests.unittests.helpers import ( - CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) - import base64 import copy import gzip @@ -23,9 +8,32 @@ import json import os import re import textwrap -from yaml.serializer import Serializer import pytest +from yaml.serializer import Serializer + +from cloudinit import distros, net +from cloudinit import safeyaml as yaml +from cloudinit import subp, temp_utils, util +from cloudinit.net import ( + cmdline, + eni, + interface_has_own_mac, + natural_sort_key, + netplan, + network_state, + networkd, + renderers, + sysconfig, +) +from cloudinit.sources.helpers import openstack +from tests.unittests.helpers import ( + CiTestCase, + FilesystemMockingTestCase, + dir2dict, + mock, + populate_dir, +) DHCP_CONTENT_1 = """ DEVICE='eth0' @@ -48,15 +56,19 @@ DOMAINSEARCH='foo.com' """ DHCP_EXPECTED_1 = { - 'name': 'eth0', - 'type': 'physical', - 'subnets': [{'broadcast': '192.168.122.255', - 'control': 'manual', - 'gateway': '192.168.122.1', - 'dns_search': ['foo.com'], - 'type': 'dhcp', - 'netmask': '255.255.255.0', - 'dns_nameservers': ['192.168.122.1']}], + "name": "eth0", + "type": "physical", + "subnets": [ + { + "broadcast": "192.168.122.255", + "control": "manual", + "gateway": "192.168.122.1", + "dns_search": ["foo.com"], + "type": "dhcp", + "netmask": "255.255.255.0", + "dns_nameservers": ["192.168.122.1"], + } + ], } DHCP6_CONTENT_1 = """ @@ -73,12 +85,17 @@ DNSDOMAIN= """ DHCP6_EXPECTED_1 = { - 'name': 'eno1', - 'type': 'physical', - 'subnets': [{'control': 'manual', - 'dns_nameservers': ['2001:67c:1562:8010::2:1'], - 'netmask': '64', - 'type': 'dhcp6'}]} + "name": "eno1", + "type": "physical", + "subnets": [ + { + "control": "manual", + "dns_nameservers": ["2001:67c:1562:8010::2:1"], + "netmask": "64", + "type": "dhcp6", + } + ], +} STATIC_CONTENT_1 = """ @@ -97,14 +114,20 @@ DOMAINSEARCH='foo.com' """ STATIC_EXPECTED_1 = { - 'name': 'eth1', - 'type': 'physical', - 'subnets': [{'broadcast': '10.0.0.255', 'control': 'manual', - 'gateway': '10.0.0.1', - 'dns_search': ['foo.com'], 'type': 'static', - 'netmask': '255.255.255.0', - 'dns_nameservers': ['10.0.1.1'], - 'address': '10.0.0.2'}], + "name": "eth1", + "type": "physical", + "subnets": [ + { + "broadcast": "10.0.0.255", + "control": "manual", + "gateway": "10.0.0.1", + "dns_search": ["foo.com"], + "type": "static", + "netmask": "255.255.255.0", + "dns_nameservers": ["10.0.1.1"], + "address": "10.0.0.2", + } + ], } V1_NAMESERVER_ALIAS = """ @@ -471,34 +494,42 @@ ethernets: # Examples (and expected outputs for various renderers). OS_SAMPLES = [ { - 'in_data': { + "in_data": { "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", - "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", - "routes": [{ - "netmask": "0.0.0.0", - "network": "0.0.0.0", - "gateway": "172.19.3.254", - }], - "ip_address": "172.19.1.34", "id": "network0" - }], + "networks": [ + { + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + } + ], + "ip_address": "172.19.1.34", + "id": "network0", + } + ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", }, ], }, - 'in_macs': { - 'fa:16:3e:ed:9a:59': 'eth0', + "in_macs": { + "fa:16:3e:ed:9a:59": "eth0", }, - 'out_sysconfig_opensuse': [ - ('etc/sysconfig/network/ifcfg-eth0', - """ + "out_sysconfig_opensuse": [ + ( + "etc/sysconfig/network/ifcfg-eth0", + """ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=static @@ -506,26 +537,39 @@ IPADDR=172.19.1.34 LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 STARTMODE=auto -""".lstrip()), - ('etc/resolv.conf', - """ +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 -""".lstrip()), - ('etc/NetworkManager/conf.d/99-cloud-init.conf', - """ +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ # Created by cloud-init on instance boot automatically, do not edit. # [main] dns = none -""".lstrip()), - ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', - "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', - 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], - 'out_sysconfig_rhel': [ - ('etc/sysconfig/network-scripts/ifcfg-eth0', - """ +""".lstrip(), + ), + ( + "etc/udev/rules.d/85-persistent-net-cloud-init.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], + "out_sysconfig_rhel": [ + ( + "etc/sysconfig/network-scripts/ifcfg-eth0", + """ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=none @@ -539,60 +583,82 @@ NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no -""".lstrip()), - ('etc/resolv.conf', - """ +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 -""".lstrip()), - ('etc/NetworkManager/conf.d/99-cloud-init.conf', - """ +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ # Created by cloud-init on instance boot automatically, do not edit. # [main] dns = none -""".lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', - "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', - 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] - +""".lstrip(), + ), + ( + "etc/udev/rules.d/70-persistent-net.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], }, { - 'in_data': { + "in_data": { "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "public-ipv4", - "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", - "routes": [{ - "netmask": "0.0.0.0", - "network": "0.0.0.0", - "gateway": "172.19.3.254", - }], - "ip_address": "172.19.1.34", "id": "network0" - }, { - "network_id": "private-ipv4", - "type": "ipv4", "netmask": "255.255.255.0", - "link": "tap1a81968a-79", - "routes": [], - "ip_address": "10.0.0.10", "id": "network1" - }], + "networks": [ + { + "network_id": "public-ipv4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + } + ], + "ip_address": "172.19.1.34", + "id": "network0", + }, + { + "network_id": "private-ipv4", + "type": "ipv4", + "netmask": "255.255.255.0", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "10.0.0.10", + "id": "network1", + }, + ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", }, ], }, - 'in_macs': { - 'fa:16:3e:ed:9a:59': 'eth0', + "in_macs": { + "fa:16:3e:ed:9a:59": "eth0", }, - 'out_sysconfig_opensuse': [ - ('etc/sysconfig/network/ifcfg-eth0', - """ + "out_sysconfig_opensuse": [ + ( + "etc/sysconfig/network/ifcfg-eth0", + """ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=static @@ -602,26 +668,39 @@ LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 NETMASK1=255.255.255.0 STARTMODE=auto -""".lstrip()), - ('etc/resolv.conf', - """ +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 -""".lstrip()), - ('etc/NetworkManager/conf.d/99-cloud-init.conf', - """ +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ # Created by cloud-init on instance boot automatically, do not edit. # [main] dns = none -""".lstrip()), - ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', - "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', - 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], - 'out_sysconfig_rhel': [ - ('etc/sysconfig/network-scripts/ifcfg-eth0', - """ +""".lstrip(), + ), + ( + "etc/udev/rules.d/85-persistent-net-cloud-init.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], + "out_sysconfig_rhel": [ + ( + "etc/sysconfig/network-scripts/ifcfg-eth0", + """ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=none @@ -637,80 +716,106 @@ NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no -""".lstrip()), - ('etc/resolv.conf', - """ +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 -""".lstrip()), - ('etc/NetworkManager/conf.d/99-cloud-init.conf', - """ +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ # Created by cloud-init on instance boot automatically, do not edit. # [main] dns = none -""".lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', - "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', - 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] - +""".lstrip(), + ), + ( + "etc/udev/rules.d/70-persistent-net.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], }, { - 'in_data': { + "in_data": { "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "public-ipv4", - "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", - "routes": [{ - "netmask": "0.0.0.0", - "network": "0.0.0.0", - "gateway": "172.19.3.254", - }], - "ip_address": "172.19.1.34", "id": "network0" - }, { - "network_id": "public-ipv6-a", - "type": "ipv6", "netmask": "", - "link": "tap1a81968a-79", - "routes": [ - { - "gateway": "2001:DB8::1", - "netmask": "::", - "network": "::" - } - ], - "ip_address": "2001:DB8::10", "id": "network1" - }, { - "network_id": "public-ipv6-b", - "type": "ipv6", "netmask": "64", - "link": "tap1a81968a-79", - "routes": [ - ], - "ip_address": "2001:DB9::10", "id": "network2" - }, { - "network_id": "public-ipv6-c", - "type": "ipv6", "netmask": "64", - "link": "tap1a81968a-79", - "routes": [ - ], - "ip_address": "2001:DB10::10", "id": "network3" - }], + "networks": [ + { + "network_id": "public-ipv4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + } + ], + "ip_address": "172.19.1.34", + "id": "network0", + }, + { + "network_id": "public-ipv6-a", + "type": "ipv6", + "netmask": "", + "link": "tap1a81968a-79", + "routes": [ + { + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::", + } + ], + "ip_address": "2001:DB8::10", + "id": "network1", + }, + { + "network_id": "public-ipv6-b", + "type": "ipv6", + "netmask": "64", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "2001:DB9::10", + "id": "network2", + }, + { + "network_id": "public-ipv6-c", + "type": "ipv6", + "netmask": "64", + "link": "tap1a81968a-79", + "routes": [], + "ip_address": "2001:DB10::10", + "id": "network3", + }, + ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", }, ], }, - 'in_macs': { - 'fa:16:3e:ed:9a:59': 'eth0', + "in_macs": { + "fa:16:3e:ed:9a:59": "eth0", }, - 'out_sysconfig_opensuse': [ - ('etc/sysconfig/network/ifcfg-eth0', - """ + "out_sysconfig_opensuse": [ + ( + "etc/sysconfig/network/ifcfg-eth0", + """ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=static @@ -721,26 +826,39 @@ IPADDR6_2=2001:DB10::10/64 LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 STARTMODE=auto -""".lstrip()), - ('etc/resolv.conf', - """ +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 -""".lstrip()), - ('etc/NetworkManager/conf.d/99-cloud-init.conf', - """ +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ # Created by cloud-init on instance boot automatically, do not edit. # [main] dns = none -""".lstrip()), - ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', - "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', - 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], - 'out_sysconfig_rhel': [ - ('etc/sysconfig/network-scripts/ifcfg-eth0', - """ +""".lstrip(), + ), + ( + "etc/udev/rules.d/85-persistent-net-cloud-init.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], + "out_sysconfig_rhel": [ + ( + "etc/sysconfig/network-scripts/ifcfg-eth0", + """ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=none @@ -760,24 +878,36 @@ NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no -""".lstrip()), - ('etc/resolv.conf', - """ +""".lstrip(), + ), + ( + "etc/resolv.conf", + """ ; Created by cloud-init on instance boot automatically, do not edit. ; nameserver 172.19.0.12 -""".lstrip()), - ('etc/NetworkManager/conf.d/99-cloud-init.conf', - """ +""".lstrip(), + ), + ( + "etc/NetworkManager/conf.d/99-cloud-init.conf", + """ # Created by cloud-init on instance boot automatically, do not edit. # [main] dns = none -""".lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', - "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', - 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))] - } +""".lstrip(), + ), + ( + "etc/udev/rules.d/70-persistent-net.rules", + "".join( + [ + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', + 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n', + ] + ), + ), + ], + }, ] EXAMPLE_ENI = """ @@ -820,8 +950,9 @@ iface eth1 inet static """.lstrip() NETWORK_CONFIGS = { - 'small': { - 'expected_networkd_eth99': textwrap.dedent("""\ + "small": { + "expected_networkd_eth99": textwrap.dedent( + """\ [Match] Name=eth99 MACAddress=c0:d6:9f:2c:e8:80 @@ -837,8 +968,10 @@ NETWORK_CONFIGS = { Gateway=65.61.151.37 Destination=0.0.0.0/0 Metric=10000 - """).rstrip(' '), - 'expected_networkd_eth1': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_networkd_eth1": textwrap.dedent( + """\ [Match] Name=eth1 MACAddress=cf:d6:af:48:e8:80 @@ -846,8 +979,10 @@ NETWORK_CONFIGS = { DHCP=no Domains=wark.maas DNS=1.2.3.4 5.6.7.8 - """).rstrip(' '), - 'expected_eni': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback dns-nameservers 1.2.3.4 5.6.7.8 @@ -865,8 +1000,10 @@ NETWORK_CONFIGS = { dns-search barley.maas sach.maas post-up route add default gw 65.61.151.37 metric 10000 || true pre-down route del default gw 65.61.151.37 metric 10000 || true - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: @@ -892,29 +1029,37 @@ NETWORK_CONFIGS = { to: 0.0.0.0/0 via: 65.61.151.37 set-name: eth99 - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-eth1': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=static LLADDR=cf:d6:af:48:e8:80 - STARTMODE=auto"""), - 'ifcfg-eth99': textwrap.dedent("""\ + STARTMODE=auto""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ BOOTPROTO=dhcp4 LLADDR=c0:d6:9f:2c:e8:80 IPADDR=192.168.21.3 NETMASK=255.255.255.0 - STARTMODE=auto"""), + STARTMODE=auto""" + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-eth1': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eth1 HWADDR=cf:d6:af:48:e8:80 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-eth99': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-eth99": textwrap.dedent( + """\ BOOTPROTO=dhcp DEFROUTE=yes DEVICE=eth99 @@ -930,9 +1075,11 @@ NETWORK_CONFIGS = { NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), + USERCTL=no""" + ), }, - 'yaml': textwrap.dedent(""" + "yaml": textwrap.dedent( + """ version: 1 config: # Physical interfaces. @@ -961,16 +1108,20 @@ NETWORK_CONFIGS = { - 5.6.7.8 search: - wark.maas - """), + """ + ), }, - 'v4_and_v6': { - 'expected_networkd': textwrap.dedent("""\ + "v4_and_v6": { + "expected_networkd": textwrap.dedent( + """\ [Match] Name=iface0 [Network] DHCP=yes - """).rstrip(' '), - 'expected_eni': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback @@ -979,22 +1130,28 @@ NETWORK_CONFIGS = { # control-alias iface0 iface iface0 inet6 dhcp - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: iface0: dhcp4: true dhcp6: true - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp DHCLIENT6_MODE=managed - STARTMODE=auto""") + STARTMODE=auto""" + ) }, - 'yaml': textwrap.dedent("""\ + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' @@ -1002,10 +1159,12 @@ NETWORK_CONFIGS = { subnets: - {'type': 'dhcp4'} - {'type': 'dhcp6'} - """).rstrip(' '), + """ + ).rstrip(" "), }, - 'v4_and_v6_static': { - 'expected_networkd': textwrap.dedent("""\ + "v4_and_v6_static": { + "expected_networkd": textwrap.dedent( + """\ [Match] Name=iface0 [Link] @@ -1015,8 +1174,10 @@ NETWORK_CONFIGS = { [Address] Address=192.168.14.2/24 Address=2001:1::1/64 - """).rstrip(' '), - 'expected_eni': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback @@ -1029,8 +1190,10 @@ NETWORK_CONFIGS = { iface iface0 inet6 static address 2001:1::1/64 mtu 1500 - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: @@ -1040,8 +1203,10 @@ NETWORK_CONFIGS = { - 2001:1::1/64 ipv6-mtu: 1500 mtu: 9000 - """).rstrip(' '), - 'yaml': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' @@ -1054,19 +1219,23 @@ NETWORK_CONFIGS = { - type: static address: 2001:1::1/64 mtu: 1500 - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=static IPADDR=192.168.14.2 IPADDR6=2001:1::1/64 NETMASK=255.255.255.0 STARTMODE=auto MTU=9000 - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=iface0 IPADDR=192.168.14.2 @@ -1081,17 +1250,21 @@ NETWORK_CONFIGS = { USERCTL=no MTU=9000 IPV6_MTU=1500 - """), + """ + ), }, }, - 'v6_and_v4': { - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + "v6_and_v4": { + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp DHCLIENT6_MODE=managed - STARTMODE=auto""") + STARTMODE=auto""" + ) }, - 'yaml': textwrap.dedent("""\ + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' @@ -1099,46 +1272,58 @@ NETWORK_CONFIGS = { subnets: - type: dhcp6 - type: dhcp4 - """).rstrip(' '), + """ + ).rstrip(" "), }, - 'dhcpv6_only': { - 'expected_networkd': textwrap.dedent("""\ + "dhcpv6_only": { + "expected_networkd": textwrap.dedent( + """\ [Match] Name=iface0 [Network] DHCP=ipv6 - """).rstrip(' '), - 'expected_eni': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet6 dhcp - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: iface0: dhcp6: true - """).rstrip(' '), - 'yaml': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' name: 'iface0' subnets: - {'type': 'dhcp6'} - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp6 DHCLIENT6_MODE=managed STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=iface0 DHCPV6C=yes @@ -1148,27 +1333,33 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, - 'dhcpv6_accept_ra': { - 'expected_eni': textwrap.dedent("""\ + "dhcpv6_accept_ra": { + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet6 dhcp accept_ra 1 - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: iface0: accept-ra: true dhcp6: true - """).rstrip(' '), - 'yaml_v1': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ version: 1 config: - type: 'physical' @@ -1176,23 +1367,29 @@ NETWORK_CONFIGS = { subnets: - {'type': 'dhcp6'} accept-ra: true - """).rstrip(' '), - 'yaml_v2': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ version: 2 ethernets: iface0: dhcp6: true accept-ra: true - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp6 DHCLIENT6_MODE=managed STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=iface0 DHCPV6C=yes @@ -1203,34 +1400,42 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, - 'expected_networkd': textwrap.dedent("""\ + "expected_networkd": textwrap.dedent( + """\ [Match] Name=iface0 [Network] DHCP=ipv6 IPv6AcceptRA=True - """).rstrip(' '), + """ + ).rstrip(" "), }, - 'dhcpv6_reject_ra': { - 'expected_eni': textwrap.dedent("""\ + "dhcpv6_reject_ra": { + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet6 dhcp accept_ra 0 - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: iface0: accept-ra: false dhcp6: true - """).rstrip(' '), - 'yaml_v1': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml_v1": textwrap.dedent( + """\ version: 1 config: - type: 'physical' @@ -1238,23 +1443,29 @@ NETWORK_CONFIGS = { subnets: - {'type': 'dhcp6'} accept-ra: false - """).rstrip(' '), - 'yaml_v2': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml_v2": textwrap.dedent( + """\ version: 2 ethernets: iface0: dhcp6: true accept-ra: false - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp6 DHCLIENT6_MODE=managed STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=iface0 DHCPV6C=yes @@ -1265,49 +1476,61 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, - 'expected_networkd': textwrap.dedent("""\ + "expected_networkd": textwrap.dedent( + """\ [Match] Name=iface0 [Network] DHCP=ipv6 IPv6AcceptRA=False - """).rstrip(' '), + """ + ).rstrip(" "), }, - 'ipv6_slaac': { - 'expected_eni': textwrap.dedent("""\ + "ipv6_slaac": { + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet6 auto dhcp 0 - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: iface0: dhcp6: true - """).rstrip(' '), - 'yaml': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' name: 'iface0' subnets: - {'type': 'ipv6_slaac'} - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp6 DHCLIENT6_MODE=info STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=iface0 IPV6_AUTOCONF=yes @@ -1317,11 +1540,13 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, - 'static6': { - 'yaml': textwrap.dedent("""\ + "static6": { + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' @@ -1330,9 +1555,11 @@ NETWORK_CONFIGS = { subnets: - type: 'static6' address: 2001:1::1/64 - """).rstrip(' '), - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=iface0 IPV6ADDR=2001:1::1/64 @@ -1344,42 +1571,52 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, - 'dhcpv6_stateless': { - 'expected_eni': textwrap.dedent("""\ + "dhcpv6_stateless": { + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet6 auto dhcp 1 - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: iface0: dhcp6: true - """).rstrip(' '), - 'yaml': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' name: 'iface0' subnets: - {'type': 'ipv6_dhcpv6-stateless'} - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp6 DHCLIENT6_MODE=info STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=iface0 DHCPV6C=yes @@ -1391,26 +1628,32 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, - 'dhcpv6_stateful': { - 'expected_eni': textwrap.dedent("""\ + "dhcpv6_stateful": { + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet6 dhcp - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: iface0: accept-ra: true dhcp6: true - """).rstrip(' '), - 'yaml': textwrap.dedent("""\ + """ + ).rstrip(" "), + "yaml": textwrap.dedent( + """\ version: 1 config: - type: 'physical' @@ -1418,16 +1661,20 @@ NETWORK_CONFIGS = { subnets: - {'type': 'ipv6_dhcpv6-stateful'} accept-ra: true - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp6 DHCLIENT6_MODE=managed STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp DEVICE=iface0 DHCPV6C=yes @@ -1439,75 +1686,93 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, - 'wakeonlan_disabled': { - 'expected_eni': textwrap.dedent("""\ + "wakeonlan_disabled": { + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet dhcp - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: ethernets: iface0: dhcp4: true wakeonlan: false version: 2 - """), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp4 STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp DEVICE=iface0 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, - 'yaml_v2': textwrap.dedent("""\ + "yaml_v2": textwrap.dedent( + """\ version: 2 ethernets: iface0: dhcp4: true wakeonlan: false - """).rstrip(' '), + """ + ).rstrip(" "), }, - 'wakeonlan_enabled': { - 'expected_eni': textwrap.dedent("""\ + "wakeonlan_enabled": { + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback auto iface0 iface iface0 inet dhcp ethernet-wol g - """).rstrip(' '), - 'expected_netplan': textwrap.dedent(""" + """ + ).rstrip(" "), + "expected_netplan": textwrap.dedent( + """ network: ethernets: iface0: dhcp4: true wakeonlan: true version: 2 - """), - 'expected_sysconfig_opensuse': { - 'ifcfg-iface0': textwrap.dedent("""\ + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp4 ETHTOOL_OPTS="wol g" STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-iface0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-iface0": textwrap.dedent( + """\ BOOTPROTO=dhcp DEVICE=iface0 ETHTOOL_OPTS="wol g" @@ -1515,18 +1780,21 @@ NETWORK_CONFIGS = { ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, - 'yaml_v2': textwrap.dedent("""\ + "yaml_v2": textwrap.dedent( + """\ version: 2 ethernets: iface0: dhcp4: true wakeonlan: true - """).rstrip(' '), + """ + ).rstrip(" "), }, - 'all': { - 'expected_eni': ("""\ + "all": { + "expected_eni": """\ auto lo iface lo inet loopback dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 @@ -1616,8 +1884,9 @@ iface eth0.101 inet static post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true -"""), - 'expected_netplan': textwrap.dedent(""" +""", + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: @@ -1713,25 +1982,31 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true - barley.maas - sacchromyces.maas - brettanomyces.maas - """).rstrip(' '), - 'expected_sysconfig_opensuse': { - 'ifcfg-bond0': textwrap.dedent("""\ + """ + ).rstrip(" "), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ BONDING_MASTER=yes BONDING_MODULE_OPTS="mode=active-backup """ - """xmit_hash_policy=layer3+4 """ - """miimon=100" + """xmit_hash_policy=layer3+4 """ + """miimon=100" BONDING_SLAVE_0=eth1 BONDING_SLAVE_1=eth2 BOOTPROTO=dhcp6 DHCLIENT6_MODE=managed LLADDR=aa:bb:cc:dd:ee:ff - STARTMODE=auto"""), - 'ifcfg-bond0.200': textwrap.dedent("""\ + STARTMODE=auto""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ BOOTPROTO=dhcp4 ETHERDEVICE=bond0 STARTMODE=auto - VLAN_ID=200"""), - 'ifcfg-br0': textwrap.dedent("""\ + VLAN_ID=200""" + ), + "ifcfg-br0": textwrap.dedent( + """\ BRIDGE_AGEINGTIME=250 BOOTPROTO=static IPADDR=192.168.14.2 @@ -1741,12 +2016,16 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true BRIDGE_PRIORITY=22 BRIDGE_PORTS='eth3 eth4' STARTMODE=auto - BRIDGE_STP=off"""), - 'ifcfg-eth0': textwrap.dedent("""\ + BRIDGE_STP=off""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ BOOTPROTO=static LLADDR=c0:d6:9f:2c:e8:80 - STARTMODE=auto"""), - 'ifcfg-eth0.101': textwrap.dedent("""\ + STARTMODE=auto""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ BOOTPROTO=static IPADDR=192.168.0.2 IPADDR1=192.168.2.10 @@ -1755,44 +2034,58 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true NETMASK1=255.255.255.0 ETHERDEVICE=eth0 STARTMODE=auto - VLAN_ID=101"""), - 'ifcfg-eth1': textwrap.dedent("""\ + VLAN_ID=101""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=none LLADDR=aa:d6:9f:2c:e8:80 - STARTMODE=hotplug"""), - 'ifcfg-eth2': textwrap.dedent("""\ + STARTMODE=hotplug""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ BOOTPROTO=none LLADDR=c0:bb:9f:2c:e8:80 - STARTMODE=hotplug"""), - 'ifcfg-eth3': textwrap.dedent("""\ + STARTMODE=hotplug""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ BOOTPROTO=static BRIDGE=yes LLADDR=66:bb:9f:2c:e8:80 - STARTMODE=auto"""), - 'ifcfg-eth4': textwrap.dedent("""\ + STARTMODE=auto""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ BOOTPROTO=static BRIDGE=yes LLADDR=98:bb:9f:2c:e8:80 - STARTMODE=auto"""), - 'ifcfg-eth5': textwrap.dedent("""\ + STARTMODE=auto""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ BOOTPROTO=dhcp LLADDR=98:bb:9f:2c:e8:8a - STARTMODE=manual"""), - 'ifcfg-ib0': textwrap.dedent("""\ + STARTMODE=manual""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ BOOTPROTO=static LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 IPADDR=192.168.200.7 MTU=9000 NETMASK=255.255.255.0 STARTMODE=auto - TYPE=InfiniBand"""), + TYPE=InfiniBand""" + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-bond0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ BONDING_MASTER=yes BONDING_OPTS="mode=active-backup """ - """xmit_hash_policy=layer3+4 """ - """miimon=100" + """xmit_hash_policy=layer3+4 """ + """miimon=100" BONDING_SLAVE0=eth1 BONDING_SLAVE1=eth2 BOOTPROTO=none @@ -1803,8 +2096,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes TYPE=Bond - USERCTL=no"""), - 'ifcfg-bond0.200': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-bond0.200": textwrap.dedent( + """\ BOOTPROTO=dhcp DEVICE=bond0.200 DHCLIENT_SET_DEFAULT_ROUTE=no @@ -1812,8 +2107,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true ONBOOT=yes PHYSDEV=bond0 USERCTL=no - VLAN=yes"""), - 'ifcfg-br0': textwrap.dedent("""\ + VLAN=yes""" + ), + "ifcfg-br0": textwrap.dedent( + """\ AGEING=250 BOOTPROTO=none DEFROUTE=yes @@ -1831,16 +2128,20 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true PRIO=22 STP=no TYPE=Bridge - USERCTL=no"""), - 'ifcfg-eth0': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-eth0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eth0 HWADDR=c0:d6:9f:2c:e8:80 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-eth0.101': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-eth0.101": textwrap.dedent( + """\ BOOTPROTO=none DEFROUTE=yes DEVICE=eth0.101 @@ -1857,8 +2158,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true ONBOOT=yes PHYSDEV=eth0 USERCTL=no - VLAN=yes"""), - 'ifcfg-eth1': textwrap.dedent("""\ + VLAN=yes""" + ), + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eth1 HWADDR=aa:d6:9f:2c:e8:80 @@ -1867,8 +2170,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true ONBOOT=yes SLAVE=yes TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-eth2': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-eth2": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eth2 HWADDR=c0:bb:9f:2c:e8:80 @@ -1877,8 +2182,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true ONBOOT=yes SLAVE=yes TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-eth3': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-eth3": textwrap.dedent( + """\ BOOTPROTO=none BRIDGE=br0 DEVICE=eth3 @@ -1886,8 +2193,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-eth4': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-eth4": textwrap.dedent( + """\ BOOTPROTO=none BRIDGE=br0 DEVICE=eth4 @@ -1895,8 +2204,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-eth5': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-eth5": textwrap.dedent( + """\ BOOTPROTO=dhcp DEVICE=eth5 DHCLIENT_SET_DEFAULT_ROUTE=no @@ -1904,8 +2215,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=no TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-ib0': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-ib0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=ib0 HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 @@ -1915,9 +2228,11 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes TYPE=InfiniBand - USERCTL=no"""), + USERCTL=no""" + ), }, - 'yaml': textwrap.dedent(""" + "yaml": textwrap.dedent( + """ version: 1 config: # Physical interfaces. @@ -2060,10 +2375,12 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true destination: 10.0.0.0/8 gateway: 11.0.0.1 metric: 3 - """).lstrip(), + """ + ).lstrip(), }, - 'bond': { - 'yaml': textwrap.dedent(""" + "bond": { + "yaml": textwrap.dedent( + """ version: 1 config: - type: physical @@ -2109,8 +2426,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true network: 3001:67c:1 netmask: ffff:ffff:0 metric: 10000 - """), - 'expected_netplan': textwrap.dedent(""" + """ + ), + "expected_netplan": textwrap.dedent( + """ network: version: 2 ethernets: @@ -2152,8 +2471,10 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true - metric: 10000 to: 3001:67c:1/32 via: 3001:67c:1562:1 - """), - 'expected_eni': textwrap.dedent("""\ + """ + ), + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback @@ -2215,8 +2536,10 @@ iface bond0 inet6 static || true pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ || true - """), - 'yaml-v2': textwrap.dedent(""" + """ + ), + "yaml-v2": textwrap.dedent( + """ version: 2 ethernets: eth0: @@ -2256,8 +2579,10 @@ iface bond0 inet6 static - metric: 10000 to: 3001:67c:1562:8007::1/64 via: 3001:67c:1562:8007::aac:40b2 - """), - 'expected_netplan-v2': textwrap.dedent(""" + """ + ), + "expected_netplan-v2": textwrap.dedent( + """ network: bonds: bond0: @@ -2298,17 +2623,18 @@ iface bond0 inet6 static macaddress: aa:bb:cc:dd:e8:01 set-name: vf0 version: 2 - """), - - 'expected_sysconfig_opensuse': { - 'ifcfg-bond0': textwrap.dedent("""\ + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ BONDING_MASTER=yes BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" BONDING_SLAVE_0=bond0s0 BONDING_SLAVE_1=bond0s1 BOOTPROTO=static @@ -2320,27 +2646,33 @@ iface bond0 inet6 static NETMASK=255.255.255.0 NETMASK1=255.255.255.0 STARTMODE=auto - """), - 'ifcfg-bond0s0': textwrap.dedent("""\ + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ BOOTPROTO=none LLADDR=aa:bb:cc:dd:e8:00 STARTMODE=hotplug - """), - 'ifcfg-bond0s1': textwrap.dedent("""\ + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ BOOTPROTO=none LLADDR=aa:bb:cc:dd:e8:01 STARTMODE=hotplug - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-bond0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-bond0": textwrap.dedent( + """\ BONDING_MASTER=yes BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ - """miimon=100 num_grat_arp=5 """ - """downdelay=10 updelay=20 """ - """fail_over_mac=active """ - """primary=bond0s0 """ - """primary_reselect=always" + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none @@ -2361,8 +2693,10 @@ iface bond0 inet6 static ONBOOT=yes TYPE=Bond USERCTL=no - """), - 'ifcfg-bond0s0': textwrap.dedent("""\ + """ + ), + "ifcfg-bond0s0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=bond0s0 HWADDR=aa:bb:cc:dd:e8:00 @@ -2372,19 +2706,25 @@ iface bond0 inet6 static SLAVE=yes TYPE=Ethernet USERCTL=no - """), - 'route6-bond0': textwrap.dedent("""\ + """ + ), + "route6-bond0": textwrap.dedent( + """\ # Created by cloud-init on instance boot automatically, do not edit. # 2001:67c:1/ffff:ffff:0 via 2001:67c:1562:1 dev bond0 3001:67c:1/ffff:ffff:0 via 3001:67c:1562:1 metric 10000 dev bond0 - """), - 'route-bond0': textwrap.dedent("""\ + """ + ), + "route-bond0": textwrap.dedent( + """\ ADDRESS0=10.1.3.0 GATEWAY0=192.168.0.3 NETMASK0=255.255.255.0 - """), - 'ifcfg-bond0s1': textwrap.dedent("""\ + """ + ), + "ifcfg-bond0s1": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=bond0s1 HWADDR=aa:bb:cc:dd:e8:01 @@ -2394,11 +2734,13 @@ iface bond0 inet6 static SLAVE=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, - 'vlan': { - 'yaml': textwrap.dedent(""" + "vlan": { + "yaml": textwrap.dedent( + """ version: 1 config: - type: physical @@ -2421,14 +2763,18 @@ iface bond0 inet6 static - gateway: 2001:1::1 netmask: '::' network: '::' - """), - 'expected_sysconfig_opensuse': { + """ + ), + "expected_sysconfig_opensuse": { # TODO RJS: unknown proper BOOTPROTO setting ask Marius - 'ifcfg-en0': textwrap.dedent("""\ + "ifcfg-en0": textwrap.dedent( + """\ BOOTPROTO=static LLADDR=aa:bb:cc:dd:e8:00 - STARTMODE=auto"""), - 'ifcfg-en0.99': textwrap.dedent("""\ + STARTMODE=auto""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ BOOTPROTO=static IPADDR=192.168.2.2 IPADDR1=192.168.1.2 @@ -2439,18 +2785,22 @@ iface bond0 inet6 static STARTMODE=auto ETHERDEVICE=en0 VLAN_ID=99 - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-en0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-en0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=en0 HWADDR=aa:bb:cc:dd:e8:00 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet - USERCTL=no"""), - 'ifcfg-en0.99': textwrap.dedent("""\ + USERCTL=no""" + ), + "ifcfg-en0.99": textwrap.dedent( + """\ BOOTPROTO=none DEFROUTE=yes DEVICE=en0.99 @@ -2469,11 +2819,13 @@ iface bond0 inet6 static ONBOOT=yes PHYSDEV=en0 USERCTL=no - VLAN=yes"""), + VLAN=yes""" + ), }, }, - 'bridge': { - 'yaml': textwrap.dedent(""" + "bridge": { + "yaml": textwrap.dedent( + """ version: 1 config: - type: physical @@ -2498,9 +2850,11 @@ iface bond0 inet6 static bridge_bridgeprio: 22 subnets: - type: static - address: 192.168.2.2/24"""), - 'expected_sysconfig_opensuse': { - 'ifcfg-br0': textwrap.dedent("""\ + address: 192.168.2.2/24""" + ), + "expected_sysconfig_opensuse": { + "ifcfg-br0": textwrap.dedent( + """\ BOOTPROTO=static IPADDR=192.168.2.2 NETMASK=255.255.255.0 @@ -2508,24 +2862,30 @@ iface bond0 inet6 static BRIDGE_STP=off BRIDGE_PRIORITY=22 BRIDGE_PORTS='eth0 eth1' - """), - 'ifcfg-eth0': textwrap.dedent("""\ + """ + ), + "ifcfg-eth0": textwrap.dedent( + """\ BOOTPROTO=static BRIDGE=yes LLADDR=52:54:00:12:34:00 IPADDR6=2001:1::100/96 STARTMODE=auto - """), - 'ifcfg-eth1': textwrap.dedent("""\ + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=static BRIDGE=yes LLADDR=52:54:00:12:34:01 IPADDR6=2001:1::101/96 STARTMODE=auto - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-br0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-br0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=br0 IPADDR=192.168.2.2 @@ -2536,8 +2896,10 @@ iface bond0 inet6 static STP=no TYPE=Bridge USERCTL=no - """), - 'ifcfg-eth0': textwrap.dedent("""\ + """ + ), + "ifcfg-eth0": textwrap.dedent( + """\ BOOTPROTO=none BRIDGE=br0 DEVICE=eth0 @@ -2550,8 +2912,10 @@ iface bond0 inet6 static ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - 'ifcfg-eth1': textwrap.dedent("""\ + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=none BRIDGE=br0 DEVICE=eth1 @@ -2564,11 +2928,13 @@ iface bond0 inet6 static ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, - 'manual': { - 'yaml': textwrap.dedent(""" + "manual": { + "yaml": textwrap.dedent( + """ version: 1 config: - type: physical @@ -2590,8 +2956,10 @@ iface bond0 inet6 static subnets: - type: manual control: manual - """), - 'expected_eni': textwrap.dedent("""\ + """ + ), + "expected_eni": textwrap.dedent( + """\ auto lo iface lo inet loopback @@ -2605,8 +2973,10 @@ iface bond0 inet6 static # control-manual eth2 iface eth2 inet manual - """), - 'expected_netplan': textwrap.dedent("""\ + """ + ), + "expected_netplan": textwrap.dedent( + """\ network: version: 2 @@ -2626,29 +2996,37 @@ iface bond0 inet6 static match: macaddress: 52:54:00:12:34:ff set-name: eth2 - """), - 'expected_sysconfig_opensuse': { - 'ifcfg-eth0': textwrap.dedent("""\ + """ + ), + "expected_sysconfig_opensuse": { + "ifcfg-eth0": textwrap.dedent( + """\ BOOTPROTO=static LLADDR=52:54:00:12:34:00 IPADDR=192.168.1.2 NETMASK=255.255.255.0 STARTMODE=manual - """), - 'ifcfg-eth1': textwrap.dedent("""\ + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=static LLADDR=52:54:00:12:34:aa MTU=1480 STARTMODE=auto - """), - 'ifcfg-eth2': textwrap.dedent("""\ + """ + ), + "ifcfg-eth2": textwrap.dedent( + """\ BOOTPROTO=static LLADDR=52:54:00:12:34:ff STARTMODE=manual - """), + """ + ), }, - 'expected_sysconfig_rhel': { - 'ifcfg-eth0': textwrap.dedent("""\ + "expected_sysconfig_rhel": { + "ifcfg-eth0": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eth0 HWADDR=52:54:00:12:34:00 @@ -2658,8 +3036,10 @@ iface bond0 inet6 static ONBOOT=no TYPE=Ethernet USERCTL=no - """), - 'ifcfg-eth1': textwrap.dedent("""\ + """ + ), + "ifcfg-eth1": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eth1 HWADDR=52:54:00:12:34:aa @@ -2668,8 +3048,10 @@ iface bond0 inet6 static ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - 'ifcfg-eth2': textwrap.dedent("""\ + """ + ), + "ifcfg-eth2": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eth2 HWADDR=52:54:00:12:34:ff @@ -2677,51 +3059,85 @@ iface bond0 inet6 static ONBOOT=no TYPE=Ethernet USERCTL=no - """), + """ + ), }, }, } CONFIG_V1_EXPLICIT_LOOPBACK = { - 'version': 1, - 'config': [{'name': 'eth0', 'type': 'physical', - 'subnets': [{'control': 'auto', 'type': 'dhcp'}]}, - {'name': 'lo', 'type': 'loopback', - 'subnets': [{'control': 'auto', 'type': 'loopback'}]}, - ]} + "version": 1, + "config": [ + { + "name": "eth0", + "type": "physical", + "subnets": [{"control": "auto", "type": "dhcp"}], + }, + { + "name": "lo", + "type": "loopback", + "subnets": [{"control": "auto", "type": "loopback"}], + }, + ], +} CONFIG_V1_SIMPLE_SUBNET = { - 'version': 1, - 'config': [{'mac_address': '52:54:00:12:34:00', - 'name': 'interface0', - 'subnets': [{'address': '10.0.2.15', - 'gateway': '10.0.2.2', - 'netmask': '255.255.255.0', - 'type': 'static'}], - 'type': 'physical'}]} + "version": 1, + "config": [ + { + "mac_address": "52:54:00:12:34:00", + "name": "interface0", + "subnets": [ + { + "address": "10.0.2.15", + "gateway": "10.0.2.2", + "netmask": "255.255.255.0", + "type": "static", + } + ], + "type": "physical", + } + ], +} CONFIG_V1_MULTI_IFACE = { - 'version': 1, - 'config': [{'type': 'physical', - 'mtu': 1500, - 'subnets': [{'type': 'static', - 'netmask': '255.255.240.0', - 'routes': [{'netmask': '0.0.0.0', - 'network': '0.0.0.0', - 'gateway': '51.68.80.1'}], - 'address': '51.68.89.122', - 'ipv4': True}], - 'mac_address': 'fa:16:3e:25:b4:59', - 'name': 'eth0'}, - {'type': 'physical', - 'mtu': 9000, - 'subnets': [{'type': 'dhcp4'}], - 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]} + "version": 1, + "config": [ + { + "type": "physical", + "mtu": 1500, + "subnets": [ + { + "type": "static", + "netmask": "255.255.240.0", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "51.68.80.1", + } + ], + "address": "51.68.89.122", + "ipv4": True, + } + ], + "mac_address": "fa:16:3e:25:b4:59", + "name": "eth0", + }, + { + "type": "physical", + "mtu": 9000, + "subnets": [{"type": "dhcp4"}], + "mac_address": "fa:16:3e:b1:ca:29", + "name": "eth1", + }, + ], +} DEFAULT_DEV_ATTRS = { - 'eth1000': { + "eth1000": { "bridge": False, "carrier": False, "dormant": False, @@ -2734,16 +3150,26 @@ DEFAULT_DEV_ATTRS = { } -def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path, dev_attrs=None): +def _setup_test( + tmp_dir, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + dev_attrs=None, +): if not dev_attrs: dev_attrs = DEFAULT_DEV_ATTRS mock_get_devicelist.return_value = dev_attrs.keys() - def fake_read(devname, path, translate=None, - on_enoent=None, on_keyerror=None, - on_einval=None): + def fake_read( + devname, + path, + translate=None, + on_enoent=None, + on_keyerror=None, + on_einval=None, + ): return dev_attrs[devname][path] mock_read_sys_net.side_effect = fake_read @@ -2753,99 +3179,137 @@ def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, for dev in dev_attrs: os.makedirs(os.path.join(tmp_dir, dev)) - with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh: - fh.write(dev_attrs[dev]['operstate']) + with open(os.path.join(tmp_dir, dev, "operstate"), "w") as fh: + fh.write(dev_attrs[dev]["operstate"]) os.makedirs(os.path.join(tmp_dir, dev, "device")) - for key in ['device/driver']: + for key in ["device/driver"]: if key in dev_attrs[dev] and dev_attrs[dev][key]: target = dev_attrs[dev][key] link = os.path.join(tmp_dir, dev, key) - print('symlink %s -> %s' % (link, target)) + print("symlink %s -> %s" % (link, target)) os.symlink(target, link) mock_sys_dev_path.side_effect = sys_dev_path class TestGenerateFallbackConfig(CiTestCase): - def setUp(self): super(TestGenerateFallbackConfig, self).setUp() self.add_patch( - "cloudinit.util.get_cmdline", "m_get_cmdline", - return_value="root=/dev/sda1") + "cloudinit.util.get_cmdline", + "m_get_cmdline", + return_value="root=/dev/sda1", + ) @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + def test_device_driver_v2( + self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ): """Network configuration for generate_fallback_config is version 2.""" devices = { - 'eth0': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3', - 'name_assign_type': '4'}, - 'eth1': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7', - 'name_assign_type': '4'}, - + "eth0": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "hv_netsvc", + "device/device": "0x3", + "name_assign_type": "4", + }, + "eth1": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "mlx4_core", + "device/device": "0x7", + "name_assign_type": "4", + }, } tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path, - dev_attrs=devices) + _setup_test( + tmp_dir, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + dev_attrs=devices, + ) network_cfg = net.generate_fallback_config(config_driver=True) expected = { - 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0', - 'match': {'macaddress': '00:11:22:33:44:55', - 'driver': 'hv_netsvc'}}}, - 'version': 2} + "ethernets": { + "eth0": { + "dhcp4": True, + "set-name": "eth0", + "match": { + "macaddress": "00:11:22:33:44:55", + "driver": "hv_netsvc", + }, + } + }, + "version": 2, + } self.assertEqual(expected, network_cfg) @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_device_driver(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): + def test_device_driver( + self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ): devices = { - 'eth0': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3', - 'name_assign_type': '4'}, - 'eth1': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7', - 'name_assign_type': '4'}, - + "eth0": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "hv_netsvc", + "device/device": "0x3", + "name_assign_type": "4", + }, + "eth1": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "mlx4_core", + "device/device": "0x7", + "name_assign_type": "4", + }, } tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path, - dev_attrs=devices) + _setup_test( + tmp_dir, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + dev_attrs=devices, + ) network_cfg = net.generate_fallback_config(config_driver=True) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) # don't set rulepath so eni writes them renderer = eni.Renderer( - {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) + {"eni_path": "interfaces", "netrules_path": "netrules"} + ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, - 'interfaces'))) - with open(os.path.join(render_dir, 'interfaces')) as fh: + self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() print(contents) expected = """ @@ -2857,8 +3321,8 @@ iface eth0 inet dhcp """ self.assertEqual(expected.lstrip(), contents.lstrip()) - self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules'))) - with open(os.path.join(render_dir, 'netrules')) as fh: + self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules"))) + with open(os.path.join(render_dir, "netrules")) as fh: contents = fh.read() print(contents) expected_rule = [ @@ -2868,48 +3332,65 @@ iface eth0 inet dhcp 'ATTR{address}=="00:11:22:33:44:55"', 'NAME="eth0"', ] - self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip()) @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_device_driver_blacklist(self, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path): + def test_device_driver_blacklist( + self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ): devices = { - 'eth1': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3', - 'name_assign_type': '4'}, - 'eth0': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7', - 'name_assign_type': '4'}, + "eth1": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "hv_netsvc", + "device/device": "0x3", + "name_assign_type": "4", + }, + "eth0": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "mlx4_core", + "device/device": "0x7", + "name_assign_type": "4", + }, } tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path, - dev_attrs=devices) + _setup_test( + tmp_dir, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + dev_attrs=devices, + ) - blacklist = ['mlx4_core'] - network_cfg = net.generate_fallback_config(blacklist_drivers=blacklist, - config_driver=True) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + blacklist = ["mlx4_core"] + network_cfg = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True + ) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) # don't set rulepath so eni writes them renderer = eni.Renderer( - {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) + {"eni_path": "interfaces", "netrules_path": "netrules"} + ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, - 'interfaces'))) - with open(os.path.join(render_dir, 'interfaces')) as fh: + self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() print(contents) expected = """ @@ -2921,8 +3402,8 @@ iface eth1 inet dhcp """ self.assertEqual(expected.lstrip(), contents.lstrip()) - self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules'))) - with open(os.path.join(render_dir, 'netrules')) as fh: + self.assertTrue(os.path.exists(os.path.join(render_dir, "netrules"))) + with open(os.path.join(render_dir, "netrules")) as fh: contents = fh.read() print(contents) expected_rule = [ @@ -2932,35 +3413,54 @@ iface eth1 inet dhcp 'ATTR{address}=="00:11:22:33:44:55"', 'NAME="eth1"', ] - self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + self.assertEqual(", ".join(expected_rule) + "\n", contents.lstrip()) @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.udevadm_settle") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path, mock_settle, m_get_cmdline): + def test_unstable_names( + self, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + mock_settle, + m_get_cmdline, + ): """verify that udevadm settle is called when we find unstable names""" devices = { - 'eth0': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3', - 'name_assign_type': False}, - 'ens4': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7', - 'name_assign_type': '4'}, - + "eth0": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "hv_netsvc", + "device/device": "0x3", + "name_assign_type": False, + }, + "ens4": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "mlx4_core", + "device/device": "0x7", + "name_assign_type": "4", + }, } - m_get_cmdline.return_value = '' + m_get_cmdline.return_value = "" tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path, - dev_attrs=devices) + _setup_test( + tmp_dir, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + dev_attrs=devices, + ) net.generate_fallback_config(config_driver=True) self.assertEqual(1, mock_settle.call_count) @@ -2969,52 +3469,73 @@ iface eth1 inet dhcp @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_unstable_names_disabled(self, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path, - mock_settle, m_get_cmdline): + def test_unstable_names_disabled( + self, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + mock_settle, + m_get_cmdline, + ): """verify udevadm settle not called when cmdline has net.ifnames=0""" devices = { - 'eth0': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3', - 'name_assign_type': False}, - 'ens4': { - 'bridge': False, 'carrier': False, 'dormant': False, - 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7', - 'name_assign_type': '4'}, - + "eth0": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "hv_netsvc", + "device/device": "0x3", + "name_assign_type": False, + }, + "ens4": { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "00:11:22:33:44:55", + "device/driver": "mlx4_core", + "device/device": "0x7", + "name_assign_type": "4", + }, } - m_get_cmdline.return_value = 'net.ifnames=0' + m_get_cmdline.return_value = "net.ifnames=0" tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path, - dev_attrs=devices) + _setup_test( + tmp_dir, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + dev_attrs=devices, + ) net.generate_fallback_config(config_driver=True) self.assertEqual(0, mock_settle.call_count) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestRhelSysConfigRendering(CiTestCase): with_logs = True nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" - scripts_dir = '/etc/sysconfig/network-scripts' - header = ('# Created by cloud-init on instance boot automatically, ' - 'do not edit.\n#\n') + scripts_dir = "/etc/sysconfig/network-scripts" + header = ( + "# Created by cloud-init on instance boot automatically, " + "do not edit.\n#\n" + ) - expected_name = 'expected_sysconfig_rhel' + expected_name = "expected_sysconfig_rhel" def _get_renderer(self): - distro_cls = distros.fetch('rhel') + distro_cls = distros.fetch("rhel") return sysconfig.Renderer( - config=distro_cls.renderer_configs.get('sysconfig')) + config=distro_cls.renderer_configs.get("sysconfig") + ) def _render_and_read(self, network_config=None, state=None, dir=None): if dir is None: @@ -3032,9 +3553,8 @@ class TestRhelSysConfigRendering(CiTestCase): return dir2dict(dir) def _compare_files_to_expected(self, expected, found): - def _try_load(f): - ''' Attempt to load shell content, otherwise return as-is ''' + """Attempt to load shell content, otherwise return as-is""" try: return util.load_shell_content(f) except ValueError: @@ -3045,12 +3565,15 @@ class TestRhelSysConfigRendering(CiTestCase): orig_maxdiff = self.maxDiff expected_d = dict( (os.path.join(self.scripts_dir, k), _try_load(v)) - for k, v in expected.items()) + for k, v in expected.items() + ) # only compare the files in scripts_dir scripts_found = dict( - (k, _try_load(v)) for k, v in found.items() - if k.startswith(self.scripts_dir)) + (k, _try_load(v)) + for k, v in found.items() + if k.startswith(self.scripts_dir) + ) try: self.maxDiff = None self.assertEqual(expected_d, scripts_found) @@ -3058,9 +3581,14 @@ class TestRhelSysConfigRendering(CiTestCase): self.maxDiff = orig_maxdiff def _assert_headers(self, found): - missing = [f for f in found - if (f.startswith(self.scripts_dir) and - not found[f].startswith(self.header))] + missing = [ + f + for f in found + if ( + f.startswith(self.scripts_dir) + and not found[f].startswith(self.header) + ) + ] if missing: raise AssertionError("Missing headers in: %s" % missing) @@ -3068,16 +3596,22 @@ class TestRhelSysConfigRendering(CiTestCase): @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_default_generation(self, mock_get_devicelist, - mock_read_sys_net, - mock_sys_dev_path, m_get_cmdline): + def test_default_generation( + self, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + m_get_cmdline, + ): tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path) + _setup_test( + tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ) network_cfg = net.generate_fallback_config() - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) @@ -3085,7 +3619,7 @@ class TestRhelSysConfigRendering(CiTestCase): renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) - render_file = 'etc/sysconfig/network-scripts/ifcfg-eth1000' + render_file = "etc/sysconfig/network-scripts/ifcfg-eth1000" with open(os.path.join(render_dir, render_file)) as fh: content = fh.read() expected_content = """ @@ -3105,35 +3639,44 @@ USERCTL=no """ValueError is raised when duplicate ipv4 gateways exist.""" net_json = { "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", - "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", - "routes": [{ - "netmask": "0.0.0.0", - "network": "0.0.0.0", - "gateway": "172.19.3.254", - }, { - "netmask": "0.0.0.0", # A second default gateway - "network": "0.0.0.0", - "gateway": "172.20.3.254", - }], - "ip_address": "172.19.1.34", "id": "network0" - }], + "networks": [ + { + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }, + { + "netmask": "0.0.0.0", # A second default gateway + "network": "0.0.0.0", + "gateway": "172.20.3.254", + }, + ], + "ip_address": "172.19.1.34", + "id": "network0", + } + ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", }, ], } - macs = {'fa:16:3e:ed:9a:59': 'eth0'} + macs = {"fa:16:3e:ed:9a:59": "eth0"} render_dir = self.tmp_dir() network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) renderer = self._get_renderer() with self.assertRaises(ValueError): renderer.render_network_state(ns, target=render_dir) @@ -3143,35 +3686,44 @@ USERCTL=no """ValueError is raised when duplicate ipv6 gateways exist.""" net_json = { "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "public-ipv6", - "type": "ipv6", "netmask": "", - "link": "tap1a81968a-79", - "routes": [{ - "gateway": "2001:DB8::1", - "netmask": "::", - "network": "::" - }, { - "gateway": "2001:DB9::1", - "netmask": "::", - "network": "::" - }], - "ip_address": "2001:DB8::10", "id": "network1" - }], + "networks": [ + { + "network_id": "public-ipv6", + "type": "ipv6", + "netmask": "", + "link": "tap1a81968a-79", + "routes": [ + { + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::", + }, + { + "gateway": "2001:DB9::1", + "netmask": "::", + "network": "::", + }, + ], + "ip_address": "2001:DB8::10", + "id": "network1", + } + ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", }, ], } - macs = {'fa:16:3e:ed:9a:59': 'eth0'} + macs = {"fa:16:3e:ed:9a:59": "eth0"} render_dir = self.tmp_dir() network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) renderer = self._get_renderer() with self.assertRaises(ValueError): renderer.render_network_state(ns, target=render_dir) @@ -3180,19 +3732,22 @@ USERCTL=no def test_openstack_rendering_samples(self): for os_sample in OS_SAMPLES: render_dir = self.tmp_dir() - ex_input = os_sample['in_data'] - ex_mac_addrs = os_sample['in_macs'] + ex_input = os_sample["in_data"] + ex_mac_addrs = os_sample["in_macs"] network_cfg = openstack.convert_net_json( - ex_input, known_macs=ex_mac_addrs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ex_input, known_macs=ex_mac_addrs + ) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) renderer = self._get_renderer() # render a multiple times to simulate reboots renderer.render_network_state(ns, target=render_dir) renderer.render_network_state(ns, target=render_dir) renderer.render_network_state(ns, target=render_dir) - for fn, expected_content in os_sample.get('out_sysconfig_rhel', - []): + for fn, expected_content in os_sample.get( + "out_sysconfig_rhel", [] + ): with open(os.path.join(render_dir, fn)) as fh: self.assertEqual(expected_content, fh.read()) @@ -3203,8 +3758,8 @@ USERCTL=no renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) - nspath = '/etc/sysconfig/network-scripts/' - self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + nspath = "/etc/sysconfig/network-scripts/" + self.assertNotIn(nspath + "ifcfg-lo", found.keys()) expected = """\ # Created by cloud-init on instance boot automatically, do not edit. # @@ -3220,10 +3775,10 @@ ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + self.assertEqual(expected, found[nspath + "ifcfg-interface0"]) # The configuration has no nameserver information make sure we # do not write the resolv.conf file - respath = '/etc/resolv.conf' + respath = "/etc/resolv.conf" self.assertNotIn(respath, found.keys()) def test_network_config_v1_multi_iface_samples(self): @@ -3233,8 +3788,8 @@ USERCTL=no renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) - nspath = '/etc/sysconfig/network-scripts/' - self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + nspath = "/etc/sysconfig/network-scripts/" + self.assertNotIn(nspath + "ifcfg-lo", found.keys()) expected_i1 = """\ # Created by cloud-init on instance boot automatically, do not edit. # @@ -3251,7 +3806,7 @@ ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0']) + self.assertEqual(expected_i1, found[nspath + "ifcfg-eth0"]) expected_i2 = """\ # Created by cloud-init on instance boot automatically, do not edit. # @@ -3265,21 +3820,21 @@ ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1']) + self.assertEqual(expected_i2, found[nspath + "ifcfg-eth1"]) def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) # write an etc/resolv.conf and expect it to not be modified - resolvconf = os.path.join(render_dir, 'etc/resolv.conf') + resolvconf = os.path.join(render_dir, "etc/resolv.conf") resolvconf_content = "# Original Content" util.write_file(resolvconf, resolvconf_content) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) - nspath = '/etc/sysconfig/network-scripts/' - self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + nspath = "/etc/sysconfig/network-scripts/" + self.assertNotIn(nspath + "ifcfg-lo", found.keys()) expected = """\ # Created by cloud-init on instance boot automatically, do not edit. # @@ -3290,171 +3845,188 @@ ONBOOT=yes TYPE=Ethernet USERCTL=no """ - self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + self.assertEqual(expected, found[nspath + "ifcfg-eth0"]) # a dhcp only config should not modify resolv.conf - self.assertEqual(resolvconf_content, found['/etc/resolv.conf']) + self.assertEqual(resolvconf_content, found["/etc/resolv.conf"]) def test_bond_config(self): - entry = NETWORK_CONFIGS['bond'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["bond"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_vlan_config(self): - entry = NETWORK_CONFIGS['vlan'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["vlan"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_bridge_config(self): - entry = NETWORK_CONFIGS['bridge'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["bridge"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_manual_config(self): - entry = NETWORK_CONFIGS['manual'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["manual"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_all_config(self): - entry = NETWORK_CONFIGS['all'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["all"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) self.assertNotIn( - 'WARNING: Network config: ignoring eth0.101 device-level mtu', - self.logs.getvalue()) + "WARNING: Network config: ignoring eth0.101 device-level mtu", + self.logs.getvalue(), + ) def test_small_config(self): - entry = NETWORK_CONFIGS['small'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["small"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS['v4_and_v6_static'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v4_and_v6_static"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) expected_msg = ( - 'WARNING: Network config: ignoring iface0 device-level mtu:8999' - ' because ipv4 subnet-level mtu:9000 provided.') + "WARNING: Network config: ignoring iface0 device-level mtu:8999" + " because ipv4 subnet-level mtu:9000 provided." + ) self.assertIn(expected_msg, self.logs.getvalue()) def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS['dhcpv6_only'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_only"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_dhcpv6_accept_ra_config_v1(self): - entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_dhcpv6_accept_ra_config_v2(self): - entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_dhcpv6_reject_ra_config_v1(self): - entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_stattic6_from_json(self): net_json = { "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", - "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", - "routes": [{ - "netmask": "0.0.0.0", - "network": "0.0.0.0", - "gateway": "172.19.3.254", - }, { - "netmask": "0.0.0.0", # A second default gateway - "network": "0.0.0.0", - "gateway": "172.20.3.254", - }], - "ip_address": "172.19.1.34", "id": "network0" - }, { - "network_id": "mgmt", - "netmask": "ffff:ffff:ffff:ffff::", - "link": "interface1", - "mode": "link-local", - "routes": [], - "ip_address": "fe80::c096:67ff:fe5c:6e84", - "type": "static6", - "id": "network1", - "services": [], - "accept-ra": "false" - }], + "networks": [ + { + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", + "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [ + { + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }, + { + "netmask": "0.0.0.0", # A second default gateway + "network": "0.0.0.0", + "gateway": "172.20.3.254", + }, + ], + "ip_address": "172.19.1.34", + "id": "network0", + }, + { + "network_id": "mgmt", + "netmask": "ffff:ffff:ffff:ffff::", + "link": "interface1", + "mode": "link-local", + "routes": [], + "ip_address": "fe80::c096:67ff:fe5c:6e84", + "type": "static6", + "id": "network1", + "services": [], + "accept-ra": "false", + }, + ], "links": [ { "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + "mtu": None, + "type": "bridge", + "id": "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f", }, ], } - macs = {'fa:16:3e:ed:9a:59': 'eth0'} + macs = {"fa:16:3e:ed:9a:59": "eth0"} render_dir = self.tmp_dir() network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) renderer = self._get_renderer() with self.assertRaises(ValueError): renderer.render_network_state(ns, target=render_dir) self.assertEqual([], os.listdir(render_dir)) def test_static6_from_yaml(self): - entry = NETWORK_CONFIGS['static6'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + entry = NETWORK_CONFIGS["static6"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_dhcpv6_reject_ra_config_v2(self): - entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS['dhcpv6_stateless'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_stateless"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_dhcpv6_stateful_config(self): - entry = NETWORK_CONFIGS['dhcpv6_stateful'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_stateful"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_disabled'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_disabled"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_enabled'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_enabled"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) @@ -3465,20 +4037,21 @@ USERCTL=no util.ensure_dir(os.path.dirname(nm_cfg)) # write a template nm.conf, note plugins is a list here - with open(nm_cfg, 'w') as fh: - fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n') + with open(nm_cfg, "w") as fh: + fh.write("# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n") self.assertTrue(os.path.exists(nm_cfg)) # render and read - entry = NETWORK_CONFIGS['small'] - found = self._render_and_read(network_config=yaml.load(entry['yaml']), - dir=render_dir) + entry = NETWORK_CONFIGS["small"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml"]), dir=render_dir + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) # check ifcfg-rh is in the 'plugins' list config = sysconfig.ConfigObj(nm_cfg) - self.assertIn('ifcfg-rh', config['main']['plugins']) + self.assertIn("ifcfg-rh", config["main"]["plugins"]) def test_check_ifcfg_rh_plugins_string(self): """ifcfg-rh plugin is append when plugins is a string.""" @@ -3488,22 +4061,23 @@ USERCTL=no util.ensure_dir(os.path.dirname(nm_cfg)) # write a template nm.conf, note plugins is a value here - util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n') + util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\nplugins=foo\n") # render and read - entry = NETWORK_CONFIGS['small'] - found = self._render_and_read(network_config=yaml.load(entry['yaml']), - dir=render_dir) + entry = NETWORK_CONFIGS["small"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml"]), dir=render_dir + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) # check raw content has plugin nm_file_content = util.load_file(nm_cfg) - self.assertIn('ifcfg-rh', nm_file_content) + self.assertIn("ifcfg-rh", nm_file_content) # check ifcfg-rh is in the 'plugins' list config = sysconfig.ConfigObj(nm_cfg) - self.assertIn('ifcfg-rh', config['main']['plugins']) + self.assertIn("ifcfg-rh", config["main"]["plugins"]) def test_check_ifcfg_rh_plugins_no_plugins(self): """enable_ifcfg_plugin creates plugins value if missing.""" @@ -3513,28 +4087,32 @@ USERCTL=no util.ensure_dir(os.path.dirname(nm_cfg)) # write a template nm.conf, note plugins is missing - util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n') + util.write_file(nm_cfg, "# test_check_ifcfg_rh\n[main]\n") self.assertTrue(os.path.exists(nm_cfg)) # render and read - entry = NETWORK_CONFIGS['small'] - found = self._render_and_read(network_config=yaml.load(entry['yaml']), - dir=render_dir) + entry = NETWORK_CONFIGS["small"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml"]), dir=render_dir + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) # check ifcfg-rh is in the 'plugins' list config = sysconfig.ConfigObj(nm_cfg) - self.assertIn('ifcfg-rh', config['main']['plugins']) + self.assertIn("ifcfg-rh", config["main"]["plugins"]) def test_netplan_dhcp_false_disable_dhcp_in_state(self): """netplan config with dhcp[46]: False should not add dhcp in state""" net_config = yaml.load(NETPLAN_DHCP_FALSE) - ns = network_state.parse_net_config_data(net_config, - skip_broken=False) + ns = network_state.parse_net_config_data(net_config, skip_broken=False) - dhcp_found = [snet for iface in ns.iter_interfaces() - for snet in iface['subnets'] if 'dhcp' in snet['type']] + dhcp_found = [ + snet + for iface in ns.iter_interfaces() + for snet in iface["subnets"] + if "dhcp" in snet["type"] + ] self.assertEqual([], dhcp_found) @@ -3542,9 +4120,10 @@ USERCTL=no """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" entry = { - 'yaml': NETPLAN_DHCP_FALSE, - 'expected_sysconfig': { - 'ifcfg-ens3': textwrap.dedent("""\ + "yaml": NETPLAN_DHCP_FALSE, + "expected_sysconfig": { + "ifcfg-ens3": textwrap.dedent( + """\ BOOTPROTO=none DEFROUTE=yes DEVICE=ens3 @@ -3564,33 +4143,42 @@ USERCTL=no ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - } + """ + ), + }, } - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry['expected_sysconfig'], found) + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) + self._compare_files_to_expected(entry["expected_sysconfig"], found) self._assert_headers(found) def test_from_v2_vlan_mtu(self): """verify mtu gets rendered on bond when source is netplan.""" v2data = { - 'version': 2, - 'ethernets': {'eno1': {}}, - 'vlans': { - 'eno1.1000': { - 'addresses': ["192.6.1.9/24"], - 'id': 1000, 'link': 'eno1', 'mtu': 1495}}} + "version": 2, + "ethernets": {"eno1": {}}, + "vlans": { + "eno1.1000": { + "addresses": ["192.6.1.9/24"], + "id": 1000, + "link": "eno1", + "mtu": 1495, + } + }, + } expected = { - 'ifcfg-eno1': textwrap.dedent("""\ + "ifcfg-eno1": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eno1 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet USERCTL=no - """), - 'ifcfg-eno1.1000': textwrap.dedent("""\ + """ + ), + "ifcfg-eno1.1000": textwrap.dedent( + """\ BOOTPROTO=none DEVICE=eno1.1000 IPADDR=192.6.1.9 @@ -3601,23 +4189,29 @@ USERCTL=no PHYSDEV=eno1 USERCTL=no VLAN=yes - """) + """ + ), } self._compare_files_to_expected( - expected, self._render_and_read(network_config=v2data)) + expected, self._render_and_read(network_config=v2data) + ) def test_from_v2_bond_mtu(self): """verify mtu gets rendered on bond when source is netplan.""" v2data = { - 'version': 2, - 'bonds': { - 'bond0': {'addresses': ['10.101.8.65/26'], - 'interfaces': ['enp0s0', 'enp0s1'], - 'mtu': 1334, - 'parameters': {}}} + "version": 2, + "bonds": { + "bond0": { + "addresses": ["10.101.8.65/26"], + "interfaces": ["enp0s0", "enp0s1"], + "mtu": 1334, + "parameters": {}, + } + }, } expected = { - 'ifcfg-bond0': textwrap.dedent("""\ + "ifcfg-bond0": textwrap.dedent( + """\ BONDING_MASTER=yes BONDING_SLAVE0=enp0s0 BONDING_SLAVE1=enp0s1 @@ -3630,8 +4224,10 @@ USERCTL=no ONBOOT=yes TYPE=Bond USERCTL=no - """), - 'ifcfg-enp0s0': textwrap.dedent("""\ + """ + ), + "ifcfg-enp0s0": textwrap.dedent( + """\ BONDING_MASTER=yes BOOTPROTO=none DEVICE=enp0s0 @@ -3641,8 +4237,10 @@ USERCTL=no SLAVE=yes TYPE=Bond USERCTL=no - """), - 'ifcfg-enp0s1': textwrap.dedent("""\ + """ + ), + "ifcfg-enp0s1": textwrap.dedent( + """\ BONDING_MASTER=yes BOOTPROTO=none DEVICE=enp0s1 @@ -3652,21 +4250,28 @@ USERCTL=no SLAVE=yes TYPE=Bond USERCTL=no - """) + """ + ), } self._compare_files_to_expected( - expected, self._render_and_read(network_config=v2data)) + expected, self._render_and_read(network_config=v2data) + ) def test_from_v2_route_metric(self): """verify route-metric gets rendered on nic when source is netplan.""" - overrides = {'route-metric': 100} + overrides = {"route-metric": 100} v2base = { - 'version': 2, - 'ethernets': { - 'eno1': {'dhcp4': True, - 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}} + "version": 2, + "ethernets": { + "eno1": { + "dhcp4": True, + "match": {"macaddress": "07-1c-c6-75-a4-be"}, + } + }, + } expected = { - 'ifcfg-eno1': textwrap.dedent("""\ + "ifcfg-eno1": textwrap.dedent( + """\ BOOTPROTO=dhcp DEVICE=eno1 HWADDR=07-1c-c6-75-a4-be @@ -3675,36 +4280,42 @@ USERCTL=no ONBOOT=yes TYPE=Ethernet USERCTL=no - """), + """ + ), } - for dhcp_ver in ('dhcp4', 'dhcp6'): + for dhcp_ver in ("dhcp4", "dhcp6"): v2data = copy.deepcopy(v2base) - if dhcp_ver == 'dhcp6': - expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n" - v2data['ethernets']['eno1'].update( - {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides}) + if dhcp_ver == "dhcp6": + expected["ifcfg-eno1"] += "IPV6INIT=yes\nDHCPV6C=yes\n" + v2data["ethernets"]["eno1"].update( + {dhcp_ver: True, "{0}-overrides".format(dhcp_ver): overrides} + ) self._compare_files_to_expected( - expected, self._render_and_read(network_config=v2data)) + expected, self._render_and_read(network_config=v2data) + ) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestOpenSuseSysConfigRendering(CiTestCase): with_logs = True - scripts_dir = '/etc/sysconfig/network' - header = ('# Created by cloud-init on instance boot automatically, ' - 'do not edit.\n#\n') + scripts_dir = "/etc/sysconfig/network" + header = ( + "# Created by cloud-init on instance boot automatically, " + "do not edit.\n#\n" + ) - expected_name = 'expected_sysconfig_opensuse' + expected_name = "expected_sysconfig_opensuse" def _get_renderer(self): - distro_cls = distros.fetch('opensuse') + distro_cls = distros.fetch("opensuse") return sysconfig.Renderer( - config=distro_cls.renderer_configs.get('sysconfig')) + config=distro_cls.renderer_configs.get("sysconfig") + ) def _render_and_read(self, network_config=None, state=None, dir=None): if dir is None: @@ -3725,12 +4336,15 @@ class TestOpenSuseSysConfigRendering(CiTestCase): orig_maxdiff = self.maxDiff expected_d = dict( (os.path.join(self.scripts_dir, k), util.load_shell_content(v)) - for k, v in expected.items()) + for k, v in expected.items() + ) # only compare the files in scripts_dir scripts_found = dict( - (k, util.load_shell_content(v)) for k, v in found.items() - if k.startswith(self.scripts_dir)) + (k, util.load_shell_content(v)) + for k, v in found.items() + if k.startswith(self.scripts_dir) + ) try: self.maxDiff = None self.assertEqual(expected_d, scripts_found) @@ -3738,9 +4352,14 @@ class TestOpenSuseSysConfigRendering(CiTestCase): self.maxDiff = orig_maxdiff def _assert_headers(self, found): - missing = [f for f in found - if (f.startswith(self.scripts_dir) and - not found[f].startswith(self.header))] + missing = [ + f + for f in found + if ( + f.startswith(self.scripts_dir) + and not found[f].startswith(self.header) + ) + ] if missing: raise AssertionError("Missing headers in: %s" % missing) @@ -3748,16 +4367,22 @@ class TestOpenSuseSysConfigRendering(CiTestCase): @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_default_generation(self, mock_get_devicelist, - mock_read_sys_net, - mock_sys_dev_path, m_get_cmdline): + def test_default_generation( + self, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + m_get_cmdline, + ): tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path) + _setup_test( + tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ) network_cfg = net.generate_fallback_config() - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) @@ -3765,7 +4390,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase): renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) - render_file = 'etc/sysconfig/network/ifcfg-eth1000' + render_file = "etc/sysconfig/network/ifcfg-eth1000" with open(os.path.join(render_dir, render_file)) as fh: content = fh.read() expected_content = """ @@ -3779,98 +4404,101 @@ STARTMODE=auto # TODO(rjschwei): re-enable test once route writing is implemented # for SUSE distros -# def test_multiple_ipv4_default_gateways(self): -# """ValueError is raised when duplicate ipv4 gateways exist.""" -# net_json = { -# "services": [{"type": "dns", "address": "172.19.0.12"}], -# "networks": [{ -# "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", -# "type": "ipv4", "netmask": "255.255.252.0", -# "link": "tap1a81968a-79", -# "routes": [{ -# "netmask": "0.0.0.0", -# "network": "0.0.0.0", -# "gateway": "172.19.3.254", -# }, { -# "netmask": "0.0.0.0", # A second default gateway -# "network": "0.0.0.0", -# "gateway": "172.20.3.254", -# }], -# "ip_address": "172.19.1.34", "id": "network0" -# }], -# "links": [ -# { -# "ethernet_mac_address": "fa:16:3e:ed:9a:59", -# "mtu": None, "type": "bridge", "id": -# "tap1a81968a-79", -# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" -# }, -# ], -# } -# macs = {'fa:16:3e:ed:9a:59': 'eth0'} -# render_dir = self.tmp_dir() -# network_cfg = openstack.convert_net_json(net_json, known_macs=macs) -# ns = network_state.parse_net_config_data(network_cfg, -# skip_broken=False) -# renderer = self._get_renderer() -# with self.assertRaises(ValueError): -# renderer.render_network_state(ns, target=render_dir) -# self.assertEqual([], os.listdir(render_dir)) -# -# def test_multiple_ipv6_default_gateways(self): -# """ValueError is raised when duplicate ipv6 gateways exist.""" -# net_json = { -# "services": [{"type": "dns", "address": "172.19.0.12"}], -# "networks": [{ -# "network_id": "public-ipv6", -# "type": "ipv6", "netmask": "", -# "link": "tap1a81968a-79", -# "routes": [{ -# "gateway": "2001:DB8::1", -# "netmask": "::", -# "network": "::" -# }, { -# "gateway": "2001:DB9::1", -# "netmask": "::", -# "network": "::" -# }], -# "ip_address": "2001:DB8::10", "id": "network1" -# }], -# "links": [ -# { -# "ethernet_mac_address": "fa:16:3e:ed:9a:59", -# "mtu": None, "type": "bridge", "id": -# "tap1a81968a-79", -# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" -# }, -# ], -# } -# macs = {'fa:16:3e:ed:9a:59': 'eth0'} -# render_dir = self.tmp_dir() -# network_cfg = openstack.convert_net_json(net_json, known_macs=macs) -# ns = network_state.parse_net_config_data(network_cfg, -# skip_broken=False) -# renderer = self._get_renderer() -# with self.assertRaises(ValueError): -# renderer.render_network_state(ns, target=render_dir) -# self.assertEqual([], os.listdir(render_dir)) + # def test_multiple_ipv4_default_gateways(self): + # """ValueError is raised when duplicate ipv4 gateways exist.""" + # net_json = { + # "services": [{"type": "dns", "address": "172.19.0.12"}], + # "networks": [{ + # "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + # "type": "ipv4", "netmask": "255.255.252.0", + # "link": "tap1a81968a-79", + # "routes": [{ + # "netmask": "0.0.0.0", + # "network": "0.0.0.0", + # "gateway": "172.19.3.254", + # }, { + # "netmask": "0.0.0.0", # A second default gateway + # "network": "0.0.0.0", + # "gateway": "172.20.3.254", + # }], + # "ip_address": "172.19.1.34", "id": "network0" + # }], + # "links": [ + # { + # "ethernet_mac_address": "fa:16:3e:ed:9a:59", + # "mtu": None, "type": "bridge", "id": + # "tap1a81968a-79", + # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + # }, + # ], + # } + # macs = {'fa:16:3e:ed:9a:59': 'eth0'} + # render_dir = self.tmp_dir() + # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501 + # ns = network_state.parse_net_config_data(network_cfg, + # skip_broken=False) + # renderer = self._get_renderer() + # with self.assertRaises(ValueError): + # renderer.render_network_state(ns, target=render_dir) + # self.assertEqual([], os.listdir(render_dir)) + # + # def test_multiple_ipv6_default_gateways(self): + # """ValueError is raised when duplicate ipv6 gateways exist.""" + # net_json = { + # "services": [{"type": "dns", "address": "172.19.0.12"}], + # "networks": [{ + # "network_id": "public-ipv6", + # "type": "ipv6", "netmask": "", + # "link": "tap1a81968a-79", + # "routes": [{ + # "gateway": "2001:DB8::1", + # "netmask": "::", + # "network": "::" + # }, { + # "gateway": "2001:DB9::1", + # "netmask": "::", + # "network": "::" + # }], + # "ip_address": "2001:DB8::10", "id": "network1" + # }], + # "links": [ + # { + # "ethernet_mac_address": "fa:16:3e:ed:9a:59", + # "mtu": None, "type": "bridge", "id": + # "tap1a81968a-79", + # "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + # }, + # ], + # } + # macs = {'fa:16:3e:ed:9a:59': 'eth0'} + # render_dir = self.tmp_dir() + # network_cfg = openstack.convert_net_json(net_json, known_macs=macs) # noqa: E501 + # ns = network_state.parse_net_config_data(network_cfg, + # skip_broken=False) + # renderer = self._get_renderer() + # with self.assertRaises(ValueError): + # renderer.render_network_state(ns, target=render_dir) + # self.assertEqual([], os.listdir(render_dir)) def test_openstack_rendering_samples(self): for os_sample in OS_SAMPLES: render_dir = self.tmp_dir() - ex_input = os_sample['in_data'] - ex_mac_addrs = os_sample['in_macs'] + ex_input = os_sample["in_data"] + ex_mac_addrs = os_sample["in_macs"] network_cfg = openstack.convert_net_json( - ex_input, known_macs=ex_mac_addrs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ex_input, known_macs=ex_mac_addrs + ) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) renderer = self._get_renderer() # render a multiple times to simulate reboots renderer.render_network_state(ns, target=render_dir) renderer.render_network_state(ns, target=render_dir) renderer.render_network_state(ns, target=render_dir) - for fn, expected_content in os_sample.get('out_sysconfig_opensuse', - []): + for fn, expected_content in os_sample.get( + "out_sysconfig_opensuse", [] + ): with open(os.path.join(render_dir, fn)) as fh: self.assertEqual(expected_content, fh.read()) @@ -3881,8 +4509,8 @@ STARTMODE=auto renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) - nspath = '/etc/sysconfig/network/' - self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + nspath = "/etc/sysconfig/network/" + self.assertNotIn(nspath + "ifcfg-lo", found.keys()) expected = """\ # Created by cloud-init on instance boot automatically, do not edit. # @@ -3892,10 +4520,10 @@ LLADDR=52:54:00:12:34:00 NETMASK=255.255.255.0 STARTMODE=auto """ - self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + self.assertEqual(expected, found[nspath + "ifcfg-interface0"]) # The configuration has no nameserver information make sure we # do not write the resolv.conf file - respath = '/etc/resolv.conf' + respath = "/etc/resolv.conf" self.assertNotIn(respath, found.keys()) def test_config_with_explicit_loopback(self): @@ -3903,33 +4531,33 @@ STARTMODE=auto render_dir = self.tmp_path("render") os.makedirs(render_dir) # write an etc/resolv.conf and expect it to not be modified - resolvconf = os.path.join(render_dir, 'etc/resolv.conf') + resolvconf = os.path.join(render_dir, "etc/resolv.conf") resolvconf_content = "# Original Content" util.write_file(resolvconf, resolvconf_content) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) - nspath = '/etc/sysconfig/network/' - self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + nspath = "/etc/sysconfig/network/" + self.assertNotIn(nspath + "ifcfg-lo", found.keys()) expected = """\ # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=dhcp STARTMODE=auto """ - self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + self.assertEqual(expected, found[nspath + "ifcfg-eth0"]) # a dhcp only config should not modify resolv.conf - self.assertEqual(resolvconf_content, found['/etc/resolv.conf']) + self.assertEqual(resolvconf_content, found["/etc/resolv.conf"]) def test_bond_config(self): - expected_name = 'expected_sysconfig_opensuse' - entry = NETWORK_CONFIGS['bond'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + expected_name = "expected_sysconfig_opensuse" + entry = NETWORK_CONFIGS["bond"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) for fname, contents in entry[expected_name].items(): print(fname) print(contents) print() - print('-- expected ^ | v rendered --') + print("-- expected ^ | v rendered --") for fname, contents in found.items(): print(fname) print(contents) @@ -3938,120 +4566,129 @@ STARTMODE=auto self._assert_headers(found) def test_vlan_config(self): - entry = NETWORK_CONFIGS['vlan'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["vlan"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_bridge_config(self): - entry = NETWORK_CONFIGS['bridge'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["bridge"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_manual_config(self): - entry = NETWORK_CONFIGS['manual'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["manual"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_all_config(self): - entry = NETWORK_CONFIGS['all'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["all"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) self.assertNotIn( - 'WARNING: Network config: ignoring eth0.101 device-level mtu', - self.logs.getvalue()) + "WARNING: Network config: ignoring eth0.101 device-level mtu", + self.logs.getvalue(), + ) def test_small_config(self): - entry = NETWORK_CONFIGS['small'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["small"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_v4_and_v6_static_config(self): - entry = NETWORK_CONFIGS['v4_and_v6_static'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v4_and_v6_static"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) expected_msg = ( - 'WARNING: Network config: ignoring iface0 device-level mtu:8999' - ' because ipv4 subnet-level mtu:9000 provided.') + "WARNING: Network config: ignoring iface0 device-level mtu:8999" + " because ipv4 subnet-level mtu:9000 provided." + ) self.assertIn(expected_msg, self.logs.getvalue()) def test_dhcpv6_only_config(self): - entry = NETWORK_CONFIGS['dhcpv6_only'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_only"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_simple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS['ipv6_slaac'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["ipv6_slaac"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_dhcpv6_stateless_config(self): - entry = NETWORK_CONFIGS['dhcpv6_stateless'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_stateless"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_disabled'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_disabled"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_enabled'] - found = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_enabled"] + found = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_render_v4_and_v6(self): - entry = NETWORK_CONFIGS['v4_and_v6'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v4_and_v6"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_render_v6_and_v4(self): - entry = NETWORK_CONFIGS['v6_and_v4'] - found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v6_and_v4"] + found = self._render_and_read(network_config=yaml.load(entry["yaml"])) self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) class TestEniNetRendering(CiTestCase): - @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_default_generation(self, mock_get_devicelist, - mock_read_sys_net, - mock_sys_dev_path, m_get_cmdline): + def test_default_generation( + self, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + m_get_cmdline, + ): tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path) + _setup_test( + tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ) network_cfg = net.generate_fallback_config() - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) renderer = eni.Renderer( - {'eni_path': 'interfaces', 'netrules_path': None}) + {"eni_path": "interfaces", "netrules_path": None} + ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, - 'interfaces'))) - with open(os.path.join(render_dir, 'interfaces')) as fh: + self.assertTrue(os.path.exists(os.path.join(render_dir, "interfaces"))) + with open(os.path.join(render_dir, "interfaces")) as fh: contents = fh.read() expected = """ @@ -4076,62 +4713,74 @@ auto eth0 iface eth0 inet dhcp """ self.assertEqual( - expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + expected, dir2dict(tmp_dir)["/etc/network/interfaces"] + ) def test_v2_route_metric_to_eni(self): """Network v2 route-metric overrides are preserved in eni output""" tmp_dir = self.tmp_dir() renderer = eni.Renderer() - expected_tmpl = textwrap.dedent("""\ + expected_tmpl = textwrap.dedent( + """\ auto lo iface lo inet loopback auto eth0 iface eth0 inet{suffix} dhcp metric 100 - """) - for dhcp_ver in ('dhcp4', 'dhcp6'): - suffix = '6' if dhcp_ver == 'dhcp6' else '' + """ + ) + for dhcp_ver in ("dhcp4", "dhcp6"): + suffix = "6" if dhcp_ver == "dhcp6" else "" dhcp_cfg = { dhcp_ver: True, - '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}} - v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}} + "{ver}-overrides".format(ver=dhcp_ver): {"route-metric": 100}, + } + v2_input = {"version": 2, "ethernets": {"eth0": dhcp_cfg}} ns = network_state.parse_net_config_data(v2_input) renderer.render_network_state(ns, target=tmp_dir) self.assertEqual( expected_tmpl.format(suffix=suffix), - dir2dict(tmp_dir)['/etc/network/interfaces']) + dir2dict(tmp_dir)["/etc/network/interfaces"], + ) class TestNetplanNetRendering(CiTestCase): - @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") @mock.patch("cloudinit.net.netplan._clean_default") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_default_generation(self, mock_get_devicelist, - mock_read_sys_net, - mock_sys_dev_path, - mock_clean_default, m_get_cmdline): + def test_default_generation( + self, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + mock_clean_default, + m_get_cmdline, + ): tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path) + _setup_test( + tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ) network_cfg = net.generate_fallback_config() - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) - render_target = 'netplan.yaml' + render_target = "netplan.yaml" renderer = netplan.Renderer( - {'netplan_path': render_target, 'postcmds': False}) + {"netplan_path": render_target, "postcmds": False} + ) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, - render_target))) + self.assertTrue( + os.path.exists(os.path.join(render_dir, render_target)) + ) with open(os.path.join(render_dir, render_target)) as fh: contents = fh.read() print(contents) @@ -4151,8 +4800,9 @@ network: class TestNetplanCleanDefault(CiTestCase): - snapd_known_path = 'etc/netplan/00-snapd-config.yaml' - snapd_known_content = textwrap.dedent("""\ + snapd_known_path = "etc/netplan/00-snapd-config.yaml" + snapd_known_content = textwrap.dedent( + """\ # This is the initial network config. # It can be overwritten by cloud-init or console-conf. network: @@ -4166,15 +4816,18 @@ class TestNetplanCleanDefault(CiTestCase): match: name: "eth*" dhcp4: true - """) + """ + ) stub_known = { - 'run/systemd/network/10-netplan-all-en.network': 'foo-en', - 'run/systemd/network/10-netplan-all-eth.network': 'foo-eth', - 'run/systemd/generator/netplan.stamp': 'stamp', + "run/systemd/network/10-netplan-all-en.network": "foo-en", + "run/systemd/network/10-netplan-all-eth.network": "foo-eth", + "run/systemd/generator/netplan.stamp": "stamp", } def test_clean_known_config_cleaned(self): - content = {self.snapd_known_path: self.snapd_known_content, } + content = { + self.snapd_known_path: self.snapd_known_content, + } content.update(self.stub_known) tmpd = self.tmp_dir() files = sorted(populate_dir(tmpd, content)) @@ -4183,7 +4836,9 @@ class TestNetplanCleanDefault(CiTestCase): self.assertEqual([], found) def test_clean_unknown_config_not_cleaned(self): - content = {self.snapd_known_path: self.snapd_known_content, } + content = { + self.snapd_known_path: self.snapd_known_content, + } content.update(self.stub_known) content[self.snapd_known_path] += "# user put a comment\n" tmpd = self.tmp_dir() @@ -4214,78 +4869,100 @@ class TestNetplanCleanDefault(CiTestCase): class TestNetplanPostcommands(CiTestCase): mycfg = { - 'config': [{"type": "physical", "name": "eth0", - "mac_address": "c0:d6:9f:2c:e8:80", - "subnets": [{"type": "dhcp"}]}], - 'version': 1} - - @mock.patch.object(netplan.Renderer, '_netplan_generate') - @mock.patch.object(netplan.Renderer, '_net_setup_link') - @mock.patch('cloudinit.subp.subp') - def test_netplan_render_calls_postcmds(self, mock_subp, - mock_netplan_generate, - mock_net_setup_link): + "config": [ + { + "type": "physical", + "name": "eth0", + "mac_address": "c0:d6:9f:2c:e8:80", + "subnets": [{"type": "dhcp"}], + } + ], + "version": 1, + } + + @mock.patch.object(netplan.Renderer, "_netplan_generate") + @mock.patch.object(netplan.Renderer, "_net_setup_link") + @mock.patch("cloudinit.subp.subp") + def test_netplan_render_calls_postcmds( + self, mock_subp, mock_netplan_generate, mock_net_setup_link + ): tmp_dir = self.tmp_dir() - ns = network_state.parse_net_config_data(self.mycfg, - skip_broken=False) + ns = network_state.parse_net_config_data(self.mycfg, skip_broken=False) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) - render_target = 'netplan.yaml' + render_target = "netplan.yaml" renderer = netplan.Renderer( - {'netplan_path': render_target, 'postcmds': True}) + {"netplan_path": render_target, "postcmds": True} + ) mock_subp.side_effect = iter([subp.ProcessExecutionError]) renderer.render_network_state(ns, target=render_dir) mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) - @mock.patch('cloudinit.util.SeLinuxGuard') + @mock.patch("cloudinit.util.SeLinuxGuard") @mock.patch.object(netplan, "get_devicelist") - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): mock_sel.__enter__ = mock.Mock(return_value=False) mock_sel.__exit__ = mock.Mock() - mock_devlist.side_effect = [['lo']] + mock_devlist.side_effect = [["lo"]] tmp_dir = self.tmp_dir() - ns = network_state.parse_net_config_data(self.mycfg, - skip_broken=False) + ns = network_state.parse_net_config_data(self.mycfg, skip_broken=False) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) - render_target = 'netplan.yaml' + render_target = "netplan.yaml" renderer = netplan.Renderer( - {'netplan_path': render_target, 'postcmds': True}) - mock_subp.side_effect = iter([ - subp.ProcessExecutionError, - ('', ''), - ('', ''), - ]) + {"netplan_path": render_target, "postcmds": True} + ) + mock_subp.side_effect = iter( + [ + subp.ProcessExecutionError, + ("", ""), + ("", ""), + ] + ) expected = [ - mock.call(['netplan', 'info'], capture=True), - mock.call(['netplan', 'generate'], capture=True), - mock.call(['udevadm', 'test-builtin', 'net_setup_link', - '/sys/class/net/lo'], capture=True), + mock.call(["netplan", "info"], capture=True), + mock.call(["netplan", "generate"], capture=True), + mock.call( + [ + "udevadm", + "test-builtin", + "net_setup_link", + "/sys/class/net/lo", + ], + capture=True, + ), ] - with mock.patch.object(os.path, 'islink', return_value=True): + with mock.patch.object(os.path, "islink", return_value=True): renderer.render_network_state(ns, target=render_dir) mock_subp.assert_has_calls(expected) class TestEniNetworkStateToEni(CiTestCase): mycfg = { - 'config': [{"type": "physical", "name": "eth0", - "mac_address": "c0:d6:9f:2c:e8:80", - "subnets": [{"type": "dhcp"}]}], - 'version': 1} - my_mac = 'c0:d6:9f:2c:e8:80' + "config": [ + { + "type": "physical", + "name": "eth0", + "mac_address": "c0:d6:9f:2c:e8:80", + "subnets": [{"type": "dhcp"}], + } + ], + "version": 1, + } + my_mac = "c0:d6:9f:2c:e8:80" def test_no_header(self): rendered = eni.network_state_to_eni( network_state=network_state.parse_net_config_data(self.mycfg), - render_hwaddress=True) + render_hwaddress=True, + ) self.assertIn(self.my_mac, rendered) self.assertIn("hwaddress", rendered) @@ -4293,14 +4970,17 @@ class TestEniNetworkStateToEni(CiTestCase): header = "# hello world\n" rendered = eni.network_state_to_eni( network_state=network_state.parse_net_config_data(self.mycfg), - header=header, render_hwaddress=True) + header=header, + render_hwaddress=True, + ) self.assertIn(header, rendered) self.assertIn(self.my_mac, rendered) def test_no_hwaddress(self): rendered = eni.network_state_to_eni( network_state=network_state.parse_net_config_data(self.mycfg), - render_hwaddress=False) + render_hwaddress=False, + ) self.assertNotIn(self.my_mac, rendered) self.assertNotIn("hwaddress", rendered) @@ -4309,189 +4989,218 @@ class TestCmdlineConfigParsing(CiTestCase): with_logs = True simple_cfg = { - 'config': [{"type": "physical", "name": "eth0", - "mac_address": "c0:d6:9f:2c:e8:80", - "subnets": [{"type": "dhcp"}]}]} + "config": [ + { + "type": "physical", + "name": "eth0", + "mac_address": "c0:d6:9f:2c:e8:80", + "subnets": [{"type": "dhcp"}], + } + ] + } def test_cmdline_convert_dhcp(self): found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1) - self.assertEqual(found, ('eth0', DHCP_EXPECTED_1)) + self.assertEqual(found, ("eth0", DHCP_EXPECTED_1)) def test_cmdline_convert_dhcp6(self): found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1) - self.assertEqual(found, ('eno1', DHCP6_EXPECTED_1)) + self.assertEqual(found, ("eno1", DHCP6_EXPECTED_1)) def test_cmdline_convert_static(self): found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1) - self.assertEqual(found, ('eth1', STATIC_EXPECTED_1)) + self.assertEqual(found, ("eth1", STATIC_EXPECTED_1)) def test_config_from_cmdline_net_cfg(self): files = [] - pairs = (('net-eth0.cfg', DHCP_CONTENT_1), - ('net-eth1.cfg', STATIC_CONTENT_1)) + pairs = ( + ("net-eth0.cfg", DHCP_CONTENT_1), + ("net-eth1.cfg", STATIC_CONTENT_1), + ) - macs = {'eth1': 'b8:ae:ed:75:ff:2b', - 'eth0': 'b8:ae:ed:75:ff:2a'} + macs = {"eth1": "b8:ae:ed:75:ff:2b", "eth0": "b8:ae:ed:75:ff:2a"} dhcp = copy.deepcopy(DHCP_EXPECTED_1) - dhcp['mac_address'] = macs['eth0'] + dhcp["mac_address"] = macs["eth0"] static = copy.deepcopy(STATIC_EXPECTED_1) - static['mac_address'] = macs['eth1'] + static["mac_address"] = macs["eth1"] - expected = {'version': 1, 'config': [dhcp, static]} + expected = {"version": 1, "config": [dhcp, static]} with temp_utils.tempdir() as tmpd: for fname, content in pairs: fp = os.path.join(tmpd, fname) files.append(fp) util.write_file(fp, content) - found = cmdline.config_from_klibc_net_cfg(files=files, - mac_addrs=macs) + found = cmdline.config_from_klibc_net_cfg( + files=files, mac_addrs=macs + ) self.assertEqual(found, expected) def test_cmdline_with_b64(self): data = base64.b64encode(json.dumps(self.simple_cfg).encode()) encoded_text = data.decode() - raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo' + raw_cmdline = "ro network-config=" + encoded_text + " root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) self.assertEqual(found, self.simple_cfg) def test_cmdline_with_net_config_disabled(self): - raw_cmdline = 'ro network-config=disabled root=foo' + raw_cmdline = "ro network-config=disabled root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) - self.assertEqual(found, {'config': 'disabled'}) + self.assertEqual(found, {"config": "disabled"}) def test_cmdline_with_net_config_unencoded_logs_error(self): """network-config cannot be unencoded besides 'disabled'.""" - raw_cmdline = 'ro network-config={config:disabled} root=foo' + raw_cmdline = "ro network-config={config:disabled} root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) self.assertIsNone(found) expected_log = ( - 'ERROR: Expected base64 encoded kernel commandline parameter' - ' network-config. Ignoring network-config={config:disabled}.') + "ERROR: Expected base64 encoded kernel commandline parameter" + " network-config. Ignoring network-config={config:disabled}." + ) self.assertIn(expected_log, self.logs.getvalue()) def test_cmdline_with_b64_gz(self): data = _gzip_data(json.dumps(self.simple_cfg).encode()) encoded_text = base64.b64encode(data).decode() - raw_cmdline = 'ro network-config=' + encoded_text + ' root=foo' + raw_cmdline = "ro network-config=" + encoded_text + " root=foo" found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) self.assertEqual(found, self.simple_cfg) class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): macs = { - 'eth0': '14:02:ec:42:48:00', - 'eno1': '14:02:ec:42:48:01', + "eth0": "14:02:ec:42:48:00", + "eno1": "14:02:ec:42:48:01", } def test_without_ip(self): - content = {'/run/net-eth0.conf': DHCP_CONTENT_1, - cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n"} + content = { + "/run/net-eth0.conf": DHCP_CONTENT_1, + cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n", + } exp1 = copy.deepcopy(DHCP_EXPECTED_1) - exp1['mac_address'] = self.macs['eth0'] + exp1["mac_address"] = self.macs["eth0"] root = self.tmp_dir() populate_dir(root, content) self.reRoot(root) src = cmdline.KlibcNetworkConfigSource( - _cmdline='foo root=/root/bar', _mac_addrs=self.macs, + _cmdline="foo root=/root/bar", + _mac_addrs=self.macs, ) self.assertTrue(src.is_applicable()) found = src.render_config() - self.assertEqual(found['version'], 1) - self.assertEqual(found['config'], [exp1]) + self.assertEqual(found["version"], 1) + self.assertEqual(found["config"], [exp1]) def test_with_ip(self): - content = {'/run/net-eth0.conf': DHCP_CONTENT_1} + content = {"/run/net-eth0.conf": DHCP_CONTENT_1} exp1 = copy.deepcopy(DHCP_EXPECTED_1) - exp1['mac_address'] = self.macs['eth0'] + exp1["mac_address"] = self.macs["eth0"] root = self.tmp_dir() populate_dir(root, content) self.reRoot(root) src = cmdline.KlibcNetworkConfigSource( - _cmdline='foo ip=dhcp', _mac_addrs=self.macs, + _cmdline="foo ip=dhcp", + _mac_addrs=self.macs, ) self.assertTrue(src.is_applicable()) found = src.render_config() - self.assertEqual(found['version'], 1) - self.assertEqual(found['config'], [exp1]) + self.assertEqual(found["version"], 1) + self.assertEqual(found["config"], [exp1]) def test_with_ip6(self): - content = {'/run/net6-eno1.conf': DHCP6_CONTENT_1} + content = {"/run/net6-eno1.conf": DHCP6_CONTENT_1} root = self.tmp_dir() populate_dir(root, content) self.reRoot(root) src = cmdline.KlibcNetworkConfigSource( - _cmdline='foo ip6=dhcp root=/dev/sda', _mac_addrs=self.macs, + _cmdline="foo ip6=dhcp root=/dev/sda", + _mac_addrs=self.macs, ) self.assertTrue(src.is_applicable()) found = src.render_config() self.assertEqual( found, - {'version': 1, 'config': [ - {'type': 'physical', 'name': 'eno1', - 'mac_address': self.macs['eno1'], - 'subnets': [ - {'dns_nameservers': ['2001:67c:1562:8010::2:1'], - 'control': 'manual', 'type': 'dhcp6', 'netmask': '64'}]}]}) + { + "version": 1, + "config": [ + { + "type": "physical", + "name": "eno1", + "mac_address": self.macs["eno1"], + "subnets": [ + { + "dns_nameservers": ["2001:67c:1562:8010::2:1"], + "control": "manual", + "type": "dhcp6", + "netmask": "64", + } + ], + } + ], + }, + ) def test_with_no_ip_or_ip6(self): # if there is no ip= or ip6= on cmdline, return value should be None - content = {'net6-eno1.conf': DHCP6_CONTENT_1} + content = {"net6-eno1.conf": DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) src = cmdline.KlibcNetworkConfigSource( - _files=files, _cmdline='foo root=/dev/sda', _mac_addrs=self.macs, + _files=files, + _cmdline="foo root=/dev/sda", + _mac_addrs=self.macs, ) self.assertFalse(src.is_applicable()) def test_with_faux_ip(self): - content = {'net6-eno1.conf': DHCP6_CONTENT_1} + content = {"net6-eno1.conf": DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) src = cmdline.KlibcNetworkConfigSource( _files=files, - _cmdline='foo iscsi_target_ip=root=/dev/sda', + _cmdline="foo iscsi_target_ip=root=/dev/sda", _mac_addrs=self.macs, ) self.assertFalse(src.is_applicable()) def test_empty_cmdline(self): - content = {'net6-eno1.conf': DHCP6_CONTENT_1} + content = {"net6-eno1.conf": DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) src = cmdline.KlibcNetworkConfigSource( _files=files, - _cmdline='', + _cmdline="", _mac_addrs=self.macs, ) self.assertFalse(src.is_applicable()) def test_whitespace_cmdline(self): - content = {'net6-eno1.conf': DHCP6_CONTENT_1} + content = {"net6-eno1.conf": DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) src = cmdline.KlibcNetworkConfigSource( _files=files, - _cmdline=' ', + _cmdline=" ", _mac_addrs=self.macs, ) self.assertFalse(src.is_applicable()) def test_cmdline_no_lhand(self): - content = {'net6-eno1.conf': DHCP6_CONTENT_1} + content = {"net6-eno1.conf": DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) src = cmdline.KlibcNetworkConfigSource( _files=files, - _cmdline='=wut', + _cmdline="=wut", _mac_addrs=self.macs, ) self.assertFalse(src.is_applicable()) def test_cmdline_embedded_ip(self): - content = {'net6-eno1.conf': DHCP6_CONTENT_1} + content = {"net6-eno1.conf": DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) src = cmdline.KlibcNetworkConfigSource( _files=files, @@ -4502,13 +5211,19 @@ class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): def test_with_both_ip_ip6(self): content = { - '/run/net-eth0.conf': DHCP_CONTENT_1, - '/run/net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')} + "/run/net-eth0.conf": DHCP_CONTENT_1, + "/run/net6-eth0.conf": DHCP6_CONTENT_1.replace("eno1", "eth0"), + } eth0 = copy.deepcopy(DHCP_EXPECTED_1) - eth0['mac_address'] = self.macs['eth0'] - eth0['subnets'].append( - {'control': 'manual', 'type': 'dhcp6', - 'netmask': '64', 'dns_nameservers': ['2001:67c:1562:8010::2:1']}) + eth0["mac_address"] = self.macs["eth0"] + eth0["subnets"].append( + { + "control": "manual", + "type": "dhcp6", + "netmask": "64", + "dns_nameservers": ["2001:67c:1562:8010::2:1"], + } + ) expected = [eth0] root = self.tmp_dir() @@ -4516,17 +5231,17 @@ class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): self.reRoot(root) src = cmdline.KlibcNetworkConfigSource( - _cmdline='foo ip=dhcp ip6=dhcp', _mac_addrs=self.macs, + _cmdline="foo ip=dhcp ip6=dhcp", + _mac_addrs=self.macs, ) self.assertTrue(src.is_applicable()) found = src.render_config() - self.assertEqual(found['version'], 1) - self.assertEqual(found['config'], expected) + self.assertEqual(found["version"], 1) + self.assertEqual(found["config"], expected) class TestReadInitramfsConfig(CiTestCase): - def _config_source_cls_mock(self, is_applicable, render_config=None): return lambda: mock.Mock( is_applicable=lambda: is_applicable, @@ -4534,7 +5249,7 @@ class TestReadInitramfsConfig(CiTestCase): ) def test_no_sources(self): - with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', []): + with mock.patch("cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", []): self.assertIsNone(cmdline.read_initramfs_config()) def test_no_applicable_sources(self): @@ -4543,19 +5258,22 @@ class TestReadInitramfsConfig(CiTestCase): self._config_source_cls_mock(is_applicable=False), self._config_source_cls_mock(is_applicable=False), ] - with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', - sources): + with mock.patch( + "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources + ): self.assertIsNone(cmdline.read_initramfs_config()) def test_one_applicable_source(self): expected_config = object() sources = [ self._config_source_cls_mock( - is_applicable=True, render_config=expected_config, + is_applicable=True, + render_config=expected_config, ), ] - with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', - sources): + with mock.patch( + "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources + ): self.assertEqual(expected_config, cmdline.read_initramfs_config()) def test_one_applicable_source_after_inapplicable_sources(self): @@ -4564,45 +5282,53 @@ class TestReadInitramfsConfig(CiTestCase): self._config_source_cls_mock(is_applicable=False), self._config_source_cls_mock(is_applicable=False), self._config_source_cls_mock( - is_applicable=True, render_config=expected_config, + is_applicable=True, + render_config=expected_config, ), ] - with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', - sources): + with mock.patch( + "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources + ): self.assertEqual(expected_config, cmdline.read_initramfs_config()) def test_first_applicable_source_is_used(self): first_config, second_config = object(), object() sources = [ self._config_source_cls_mock( - is_applicable=True, render_config=first_config, + is_applicable=True, + render_config=first_config, ), self._config_source_cls_mock( - is_applicable=True, render_config=second_config, + is_applicable=True, + render_config=second_config, ), ] - with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', - sources): + with mock.patch( + "cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES", sources + ): self.assertEqual(first_config, cmdline.read_initramfs_config()) class TestNetplanRoundTrip(CiTestCase): - NETPLAN_INFO_OUT = textwrap.dedent(""" + NETPLAN_INFO_OUT = textwrap.dedent( + """ netplan.io: features: - dhcp-use-domains - ipv6-mtu website: https://netplan.io/ - """) + """ + ) def setUp(self): super(TestNetplanRoundTrip, self).setUp() - self.add_patch('cloudinit.net.netplan.subp.subp', 'm_subp') - self.m_subp.return_value = (self.NETPLAN_INFO_OUT, '') + self.add_patch("cloudinit.net.netplan.subp.subp", "m_subp") + self.m_subp.return_value = (self.NETPLAN_INFO_OUT, "") - def _render_and_read(self, network_config=None, state=None, - netplan_path=None, target=None): + def _render_and_read( + self, network_config=None, state=None, netplan_path=None, target=None + ): if target is None: target = self.tmp_dir() @@ -4614,188 +5340,212 @@ class TestNetplanRoundTrip(CiTestCase): raise ValueError("Expected data or state, got neither") if netplan_path is None: - netplan_path = 'etc/netplan/50-cloud-init.yaml' + netplan_path = "etc/netplan/50-cloud-init.yaml" - renderer = netplan.Renderer( - config={'netplan_path': netplan_path}) + renderer = netplan.Renderer(config={"netplan_path": netplan_path}) renderer.render_network_state(ns, target=target) return dir2dict(target) def testsimple_render_bond_netplan(self): - entry = NETWORK_CONFIGS['bond'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) - print(entry['expected_netplan']) - print('-- expected ^ | v rendered --') - print(files['/etc/netplan/50-cloud-init.yaml']) + entry = NETWORK_CONFIGS["bond"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + print(entry["expected_netplan"]) + print("-- expected ^ | v rendered --") + print(files["/etc/netplan/50-cloud-init.yaml"]) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_bond_v2_input_netplan(self): - entry = NETWORK_CONFIGS['bond'] + entry = NETWORK_CONFIGS["bond"] files = self._render_and_read( - network_config=yaml.load(entry['yaml-v2'])) - print(entry['expected_netplan-v2']) - print('-- expected ^ | v rendered --') - print(files['/etc/netplan/50-cloud-init.yaml']) + network_config=yaml.load(entry["yaml-v2"]) + ) + print(entry["expected_netplan-v2"]) + print("-- expected ^ | v rendered --") + print(files["/etc/netplan/50-cloud-init.yaml"]) self.assertEqual( - entry['expected_netplan-v2'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan-v2"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_small_netplan(self): - entry = NETWORK_CONFIGS['small'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["small"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_v4_and_v6(self): - entry = NETWORK_CONFIGS['v4_and_v6'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v4_and_v6"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_v4_and_v6_static(self): - entry = NETWORK_CONFIGS['v4_and_v6_static'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v4_and_v6_static"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_dhcpv6_only(self): - entry = NETWORK_CONFIGS['dhcpv6_only'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_only"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_dhcpv6_accept_ra(self): - entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_dhcpv6_reject_ra(self): - entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS['ipv6_slaac'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + entry = NETWORK_CONFIGS["ipv6_slaac"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_dhcpv6_stateless(self): - entry = NETWORK_CONFIGS['dhcpv6_stateless'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_stateless"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_dhcpv6_stateful(self): - entry = NETWORK_CONFIGS['dhcpv6_stateful'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_stateful"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_disabled'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_disabled"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_enabled'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_enabled"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_all(self): - entry = NETWORK_CONFIGS['all'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) - print(entry['expected_netplan']) - print('-- expected ^ | v rendered --') - print(files['/etc/netplan/50-cloud-init.yaml']) + entry = NETWORK_CONFIGS["all"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) + print(entry["expected_netplan"]) + print("-- expected ^ | v rendered --") + print(files["/etc/netplan/50-cloud-init.yaml"]) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def testsimple_render_manual(self): - entry = NETWORK_CONFIGS['manual'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["manual"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def test_render_output_has_yaml_no_aliases(self): entry = { - 'yaml': V1_NAMESERVER_ALIAS, - 'expected_netplan': NETPLAN_NO_ALIAS, + "yaml": V1_NAMESERVER_ALIAS, + "expected_netplan": NETPLAN_NO_ALIAS, } - network_config = yaml.load(entry['yaml']) + network_config = yaml.load(entry["yaml"]) ns = network_state.parse_net_config_data(network_config) files = self._render_and_read(state=ns) # check for alias - content = files['/etc/netplan/50-cloud-init.yaml'] + content = files["/etc/netplan/50-cloud-init.yaml"] # test load the yaml to ensure we don't render something not loadable # this allows single aliases, but not duplicate ones - parsed = yaml.load(files['/etc/netplan/50-cloud-init.yaml']) + parsed = yaml.load(files["/etc/netplan/50-cloud-init.yaml"]) self.assertNotEqual(None, parsed) # now look for any alias, avoid rendering them entirely # generate the first anchor string using the template # as of this writing, looks like "&id001" - anchor = r'&' + Serializer.ANCHOR_TEMPLATE % 1 + anchor = r"&" + Serializer.ANCHOR_TEMPLATE % 1 found_alias = re.search(anchor, content, re.MULTILINE) if found_alias: msg = "Error at: %s\nContent:\n%s" % (found_alias, content) - raise ValueError('Found yaml alias in rendered netplan: ' + msg) + raise ValueError("Found yaml alias in rendered netplan: " + msg) - print(entry['expected_netplan']) - print('-- expected ^ | v rendered --') - print(files['/etc/netplan/50-cloud-init.yaml']) + print(entry["expected_netplan"]) + print("-- expected ^ | v rendered --") + print(files["/etc/netplan/50-cloud-init.yaml"]) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) def test_render_output_supports_both_grat_arp_spelling(self): entry = { - 'yaml': NETPLAN_BOND_GRAT_ARP, - 'expected_netplan': NETPLAN_BOND_GRAT_ARP.replace('gratuitous', - 'gratuitious'), + "yaml": NETPLAN_BOND_GRAT_ARP, + "expected_netplan": NETPLAN_BOND_GRAT_ARP.replace( + "gratuitous", "gratuitious" + ), } - network_config = yaml.load(entry['yaml']).get('network') + network_config = yaml.load(entry["yaml"]).get("network") files = self._render_and_read(network_config=network_config) - print(entry['expected_netplan']) - print('-- expected ^ | v rendered --') - print(files['/etc/netplan/50-cloud-init.yaml']) + print(entry["expected_netplan"]) + print("-- expected ^ | v rendered --") + print(files["/etc/netplan/50-cloud-init.yaml"]) self.assertEqual( - entry['expected_netplan'].splitlines(), - files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + entry["expected_netplan"].splitlines(), + files["/etc/netplan/50-cloud-init.yaml"].splitlines(), + ) class TestEniRoundTrip(CiTestCase): - - def _render_and_read(self, network_config=None, state=None, eni_path=None, - netrules_path=None, dir=None): + def _render_and_read( + self, + network_config=None, + state=None, + eni_path=None, + netrules_path=None, + dir=None, + ): if dir is None: dir = self.tmp_dir() @@ -4807,10 +5557,11 @@ class TestEniRoundTrip(CiTestCase): raise ValueError("Expected data or state, got neither") if eni_path is None: - eni_path = 'etc/network/interfaces' + eni_path = "etc/network/interfaces" renderer = eni.Renderer( - config={'eni_path': eni_path, 'netrules_path': netrules_path}) + config={"eni_path": eni_path, "netrules_path": netrules_path} + ) renderer.render_network_state(ns, target=dir) return dir2dict(dir) @@ -4820,95 +5571,112 @@ class TestEniRoundTrip(CiTestCase): files = self._render_and_read(network_config=network_config) self.assertEqual( RENDERED_ENI.splitlines(), - files['/etc/network/interfaces'].splitlines()) + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_all(self): - entry = NETWORK_CONFIGS['all'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["all"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_small(self): - entry = NETWORK_CONFIGS['small'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["small"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_v4_and_v6(self): - entry = NETWORK_CONFIGS['v4_and_v6'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v4_and_v6"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_dhcpv6_only(self): - entry = NETWORK_CONFIGS['dhcpv6_only'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_only"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_v4_and_v6_static(self): - entry = NETWORK_CONFIGS['v4_and_v6_static'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["v4_and_v6_static"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_dhcpv6_stateless(self): - entry = NETWORK_CONFIGS['dhcpv6_stateless'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_stateless"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_ipv6_slaac(self): - entry = NETWORK_CONFIGS['ipv6_slaac'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["ipv6_slaac"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_dhcpv6_stateful(self): - entry = NETWORK_CONFIGS['dhcpv6_stateless'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["dhcpv6_stateless"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_dhcpv6_accept_ra(self): - entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_dhcpv6_reject_ra(self): - entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_wakeonlan_disabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_disabled'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_disabled"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_wakeonlan_enabled_config_v2(self): - entry = NETWORK_CONFIGS['wakeonlan_enabled'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + entry = NETWORK_CONFIGS["wakeonlan_enabled"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def testsimple_render_manual(self): """Test rendering of 'manual' for 'type' and 'control'. @@ -4918,145 +5686,179 @@ class TestEniRoundTrip(CiTestCase): if there were no addresses to configure. Also strange is the fact that in order to apply that MTU the ifupdown device must be set to 'auto', or the MTU would not be set.""" - entry = NETWORK_CONFIGS['manual'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["manual"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) def test_routes_rendered(self): # as reported in bug 1649652 conf = [ - {'name': 'eth0', 'type': 'physical', - 'subnets': [{ - 'address': '172.23.31.42/26', - 'dns_nameservers': [], 'gateway': '172.23.31.2', - 'type': 'static'}]}, - {'type': 'route', 'id': 4, - 'metric': 0, 'destination': '10.0.0.0/12', - 'gateway': '172.23.31.1'}, - {'type': 'route', 'id': 5, - 'metric': 0, 'destination': '192.168.2.0/16', - 'gateway': '172.23.31.1'}, - {'type': 'route', 'id': 6, - 'metric': 1, 'destination': '10.0.200.0/16', - 'gateway': '172.23.31.1'}, - {'type': 'route', 'id': 7, - 'metric': 1, 'destination': '10.0.0.100/32', - 'gateway': '172.23.31.1'}, + { + "name": "eth0", + "type": "physical", + "subnets": [ + { + "address": "172.23.31.42/26", + "dns_nameservers": [], + "gateway": "172.23.31.2", + "type": "static", + } + ], + }, + { + "type": "route", + "id": 4, + "metric": 0, + "destination": "10.0.0.0/12", + "gateway": "172.23.31.1", + }, + { + "type": "route", + "id": 5, + "metric": 0, + "destination": "192.168.2.0/16", + "gateway": "172.23.31.1", + }, + { + "type": "route", + "id": 6, + "metric": 1, + "destination": "10.0.200.0/16", + "gateway": "172.23.31.1", + }, + { + "type": "route", + "id": 7, + "metric": 1, + "destination": "10.0.0.100/32", + "gateway": "172.23.31.1", + }, ] files = self._render_and_read( - network_config={'config': conf, 'version': 1}) + network_config={"config": conf, "version": 1} + ) expected = [ - 'auto lo', - 'iface lo inet loopback', - 'auto eth0', - 'iface eth0 inet static', - ' address 172.23.31.42/26', - ' gateway 172.23.31.2', - ('post-up route add -net 10.0.0.0/12 gw ' - '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 10.0.0.0/12 gw ' - '172.23.31.1 metric 0 || true'), - ('post-up route add -net 192.168.2.0/16 gw ' - '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 192.168.2.0/16 gw ' - '172.23.31.1 metric 0 || true'), - ('post-up route add -net 10.0.200.0/16 gw ' - '172.23.31.1 metric 1 || true'), - ('pre-down route del -net 10.0.200.0/16 gw ' - '172.23.31.1 metric 1 || true'), - ('post-up route add -host 10.0.0.100/32 gw ' - '172.23.31.1 metric 1 || true'), - ('pre-down route del -host 10.0.0.100/32 gw ' - '172.23.31.1 metric 1 || true'), + "auto lo", + "iface lo inet loopback", + "auto eth0", + "iface eth0 inet static", + " address 172.23.31.42/26", + " gateway 172.23.31.2", + "post-up route add -net 10.0.0.0/12 gw " + "172.23.31.1 metric 0 || true", + "pre-down route del -net 10.0.0.0/12 gw " + "172.23.31.1 metric 0 || true", + "post-up route add -net 192.168.2.0/16 gw " + "172.23.31.1 metric 0 || true", + "pre-down route del -net 192.168.2.0/16 gw " + "172.23.31.1 metric 0 || true", + "post-up route add -net 10.0.200.0/16 gw " + "172.23.31.1 metric 1 || true", + "pre-down route del -net 10.0.200.0/16 gw " + "172.23.31.1 metric 1 || true", + "post-up route add -host 10.0.0.100/32 gw " + "172.23.31.1 metric 1 || true", + "pre-down route del -host 10.0.0.100/32 gw " + "172.23.31.1 metric 1 || true", ] - found = files['/etc/network/interfaces'].splitlines() + found = files["/etc/network/interfaces"].splitlines() - self.assertEqual( - expected, [line for line in found if line]) + self.assertEqual(expected, [line for line in found if line]) def test_ipv6_static_routes(self): # as reported in bug 1818669 conf = [ - {'name': 'eno3', 'type': 'physical', - 'subnets': [{ - 'address': 'fd00::12/64', - 'dns_nameservers': ['fd00:2::15'], - 'gateway': 'fd00::1', - 'ipv6': True, - 'type': 'static', - 'routes': [{'netmask': '32', - 'network': 'fd00:12::', - 'gateway': 'fd00::2'}, - {'network': 'fd00:14::', - 'gateway': 'fd00::3'}, - {'destination': 'fe00:14::/48', - 'gateway': 'fe00::4', - 'metric': 500}, - {'gateway': '192.168.23.1', - 'metric': 999, - 'netmask': 24, - 'network': '192.168.23.0'}, - {'destination': '10.23.23.0/24', - 'gateway': '10.23.23.2', - 'metric': 300}]}]}, + { + "name": "eno3", + "type": "physical", + "subnets": [ + { + "address": "fd00::12/64", + "dns_nameservers": ["fd00:2::15"], + "gateway": "fd00::1", + "ipv6": True, + "type": "static", + "routes": [ + { + "netmask": "32", + "network": "fd00:12::", + "gateway": "fd00::2", + }, + {"network": "fd00:14::", "gateway": "fd00::3"}, + { + "destination": "fe00:14::/48", + "gateway": "fe00::4", + "metric": 500, + }, + { + "gateway": "192.168.23.1", + "metric": 999, + "netmask": 24, + "network": "192.168.23.0", + }, + { + "destination": "10.23.23.0/24", + "gateway": "10.23.23.2", + "metric": 300, + }, + ], + } + ], + }, ] files = self._render_and_read( - network_config={'config': conf, 'version': 1}) + network_config={"config": conf, "version": 1} + ) expected = [ - 'auto lo', - 'iface lo inet loopback', - 'auto eno3', - 'iface eno3 inet6 static', - ' address fd00::12/64', - ' dns-nameservers fd00:2::15', - ' gateway fd00::1', - (' post-up route add -A inet6 fd00:12::/32 gw ' - 'fd00::2 || true'), - (' pre-down route del -A inet6 fd00:12::/32 gw ' - 'fd00::2 || true'), - (' post-up route add -A inet6 fd00:14::/64 gw ' - 'fd00::3 || true'), - (' pre-down route del -A inet6 fd00:14::/64 gw ' - 'fd00::3 || true'), - (' post-up route add -A inet6 fe00:14::/48 gw ' - 'fe00::4 metric 500 || true'), - (' pre-down route del -A inet6 fe00:14::/48 gw ' - 'fe00::4 metric 500 || true'), - (' post-up route add -net 192.168.23.0/24 gw ' - '192.168.23.1 metric 999 || true'), - (' pre-down route del -net 192.168.23.0/24 gw ' - '192.168.23.1 metric 999 || true'), - (' post-up route add -net 10.23.23.0/24 gw ' - '10.23.23.2 metric 300 || true'), - (' pre-down route del -net 10.23.23.0/24 gw ' - '10.23.23.2 metric 300 || true'), - + "auto lo", + "iface lo inet loopback", + "auto eno3", + "iface eno3 inet6 static", + " address fd00::12/64", + " dns-nameservers fd00:2::15", + " gateway fd00::1", + " post-up route add -A inet6 fd00:12::/32 gw fd00::2 || true", + " pre-down route del -A inet6 fd00:12::/32 gw fd00::2 || true", + " post-up route add -A inet6 fd00:14::/64 gw fd00::3 || true", + " pre-down route del -A inet6 fd00:14::/64 gw fd00::3 || true", + " post-up route add -A inet6 fe00:14::/48 gw " + "fe00::4 metric 500 || true", + " pre-down route del -A inet6 fe00:14::/48 gw " + "fe00::4 metric 500 || true", + " post-up route add -net 192.168.23.0/24 gw " + "192.168.23.1 metric 999 || true", + " pre-down route del -net 192.168.23.0/24 gw " + "192.168.23.1 metric 999 || true", + " post-up route add -net 10.23.23.0/24 gw " + "10.23.23.2 metric 300 || true", + " pre-down route del -net 10.23.23.0/24 gw " + "10.23.23.2 metric 300 || true", ] - found = files['/etc/network/interfaces'].splitlines() + found = files["/etc/network/interfaces"].splitlines() - self.assertEqual( - expected, [line for line in found if line]) + self.assertEqual(expected, [line for line in found if line]) def testsimple_render_bond(self): - entry = NETWORK_CONFIGS['bond'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + entry = NETWORK_CONFIGS["bond"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) self.assertEqual( - entry['expected_eni'].splitlines(), - files['/etc/network/interfaces'].splitlines()) + entry["expected_eni"].splitlines(), + files["/etc/network/interfaces"].splitlines(), + ) class TestNetworkdNetRendering(CiTestCase): - def create_conf_dict(self, contents): content_dict = {} for line in contents: if line: line = line.strip() - if line and re.search(r'^\[(.+)\]$', line): + if line and re.search(r"^\[(.+)\]$", line): content_dict[line] = [] key = line elif line: @@ -5073,40 +5875,48 @@ class TestNetworkdNetRendering(CiTestCase): @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") - def test_networkd_default_generation(self, mock_get_devicelist, - mock_read_sys_net, - mock_sys_dev_path, - m_get_cmdline, - m_chown): + def test_networkd_default_generation( + self, + mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + m_get_cmdline, + m_chown, + ): tmp_dir = self.tmp_dir() - _setup_test(tmp_dir, mock_get_devicelist, - mock_read_sys_net, mock_sys_dev_path) + _setup_test( + tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path + ) network_cfg = net.generate_fallback_config() - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data( + network_cfg, skip_broken=False + ) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) - render_target = 'etc/systemd/network/10-cloud-init-eth1000.network' + render_target = "etc/systemd/network/10-cloud-init-eth1000.network" renderer = networkd.Renderer({}) renderer.render_network_state(ns, target=render_dir) - self.assertTrue(os.path.exists(os.path.join(render_dir, - render_target))) + self.assertTrue( + os.path.exists(os.path.join(render_dir, render_target)) + ) with open(os.path.join(render_dir, render_target)) as fh: contents = fh.readlines() actual = self.create_conf_dict(contents) print(actual) - expected = textwrap.dedent("""\ + expected = textwrap.dedent( + """\ [Match] Name=eth1000 MACAddress=07-1c-c6-75-a4-be [Network] - DHCP=ipv4""").rstrip(' ') + DHCP=ipv4""" + ).rstrip(" ") expected = self.create_conf_dict(expected.splitlines()) @@ -5114,13 +5924,12 @@ class TestNetworkdNetRendering(CiTestCase): class TestNetworkdRoundTrip(CiTestCase): - def create_conf_dict(self, contents): content_dict = {} for line in contents: if line: line = line.strip() - if line and re.search(r'^\[(.+)\]$', line): + if line and re.search(r"^\[(.+)\]$", line): content_dict[line] = [] key = line elif line: @@ -5132,8 +5941,9 @@ class TestNetworkdRoundTrip(CiTestCase): for k, v in actual.items(): self.assertEqual(sorted(expected[k]), sorted(v)) - def _render_and_read(self, network_config=None, state=None, nwkd_path=None, - dir=None): + def _render_and_read( + self, network_config=None, state=None, nwkd_path=None, dir=None + ): if dir is None: dir = self.tmp_dir() @@ -5145,24 +5955,24 @@ class TestNetworkdRoundTrip(CiTestCase): raise ValueError("Expected data or state, got neither") if not nwkd_path: - nwkd_path = '/etc/systemd/network/' + nwkd_path = "/etc/systemd/network/" - renderer = networkd.Renderer(config={'network_conf_dir': nwkd_path}) + renderer = networkd.Renderer(config={"network_conf_dir": nwkd_path}) renderer.render_network_state(ns, target=dir) return dir2dict(dir) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def testsimple_render_small_networkd(self, m_chown): - nwk_fn1 = '/etc/systemd/network/10-cloud-init-eth99.network' - nwk_fn2 = '/etc/systemd/network/10-cloud-init-eth1.network' - entry = NETWORK_CONFIGS['small'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + nwk_fn1 = "/etc/systemd/network/10-cloud-init-eth99.network" + nwk_fn2 = "/etc/systemd/network/10-cloud-init-eth1.network" + entry = NETWORK_CONFIGS["small"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) actual = files[nwk_fn1].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd_eth99'].splitlines() + expected = entry["expected_networkd_eth99"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @@ -5170,135 +5980,146 @@ class TestNetworkdRoundTrip(CiTestCase): actual = files[nwk_fn2].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd_eth1'].splitlines() + expected = entry["expected_networkd_eth1"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def testsimple_render_v4_and_v6(self, m_chown): - nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' - entry = NETWORK_CONFIGS['v4_and_v6'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS["v4_and_v6"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd'].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def testsimple_render_v4_and_v6_static(self, m_chown): - nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' - entry = NETWORK_CONFIGS['v4_and_v6_static'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS["v4_and_v6_static"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd'].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def testsimple_render_dhcpv6_only(self, m_chown): - nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' - entry = NETWORK_CONFIGS['dhcpv6_only'] - files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS["dhcpv6_only"] + files = self._render_and_read(network_config=yaml.load(entry["yaml"])) actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd'].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def test_dhcpv6_accept_ra_config_v1(self, m_chown): - nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' - entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd'].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def test_dhcpv6_accept_ra_config_v2(self, m_chown): - nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' - entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS["dhcpv6_accept_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd'].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def test_dhcpv6_reject_ra_config_v1(self, m_chown): - nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' - entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v1'])) + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v1"]) + ) actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd'].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) @mock.patch("cloudinit.net.util.chownbyname", return_value=True) def test_dhcpv6_reject_ra_config_v2(self, m_chown): - nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' - entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] - files = self._render_and_read(network_config=yaml.load( - entry['yaml_v2'])) + nwk_fn = "/etc/systemd/network/10-cloud-init-iface0.network" + entry = NETWORK_CONFIGS["dhcpv6_reject_ra"] + files = self._render_and_read( + network_config=yaml.load(entry["yaml_v2"]) + ) actual = files[nwk_fn].splitlines() actual = self.create_conf_dict(actual) - expected = entry['expected_networkd'].splitlines() + expected = entry["expected_networkd"].splitlines() expected = self.create_conf_dict(expected) self.compare_dicts(actual, expected) class TestRenderersSelect: - @pytest.mark.parametrize( - 'renderer_selected,netplan,eni,nm,scfg,sys,networkd', ( + "renderer_selected,netplan,eni,nm,scfg,sys,networkd", + ( # -netplan -ifupdown -nm -scfg -sys raises error - (net.RendererNotFoundError, False, False, False, False, False, - False), + ( + net.RendererNotFoundError, + False, + False, + False, + False, + False, + False, + ), # -netplan +ifupdown -nm -scfg -sys selects eni - ('eni', False, True, False, False, False, False), + ("eni", False, True, False, False, False, False), # +netplan +ifupdown -nm -scfg -sys selects eni - ('eni', True, True, False, False, False, False), + ("eni", True, True, False, False, False, False), # +netplan -ifupdown -nm -scfg -sys selects netplan - ('netplan', True, False, False, False, False, False), + ("netplan", True, False, False, False, False, False), # Ubuntu with Network-Manager installed # +netplan -ifupdown +nm -scfg -sys selects netplan - ('netplan', True, False, True, False, False, False), + ("netplan", True, False, True, False, False, False), # Centos/OpenSuse with Network-Manager installed selects sysconfig # -netplan -ifupdown +nm -scfg +sys selects netplan - ('sysconfig', False, False, True, False, True, False), + ("sysconfig", False, False, True, False, True, False), # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd - ('networkd', False, False, False, False, False, True), + ("networkd", False, False, False, False, False, True), ), ) @mock.patch("cloudinit.net.renderers.networkd.available") @@ -5308,15 +6129,26 @@ class TestRenderersSelect: @mock.patch("cloudinit.net.renderers.sysconfig.available_nm") @mock.patch("cloudinit.net.renderers.eni.available") def test_valid_renderer_from_defaults_depending_on_availability( - self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail, - m_netplan_avail, m_networkd_avail, renderer_selected, - netplan, eni, nm, scfg, sys, networkd + self, + m_eni_avail, + m_nm_avail, + m_scfg_avail, + m_sys_avail, + m_netplan_avail, + m_networkd_avail, + renderer_selected, + netplan, + eni, + nm, + scfg, + sys, + networkd, ): """Assert proper renderer per DEFAULT_PRIORITY given availability.""" - m_eni_avail.return_value = eni # ifupdown pkg presence - m_nm_avail.return_value = nm # network-manager presence - m_scfg_avail.return_value = scfg # sysconfig presence - m_sys_avail.return_value = sys # sysconfig/ifup/down presence + m_eni_avail.return_value = eni # ifupdown pkg presence + m_nm_avail.return_value = nm # network-manager presence + m_scfg_avail.return_value = scfg # sysconfig presence + m_sys_avail.return_value = sys # sysconfig/ifup/down presence m_netplan_avail.return_value = netplan # netplan presence m_networkd_avail.return_value = networkd # networkd presence if isinstance(renderer_selected, str): @@ -5335,14 +6167,14 @@ class TestNetRenderers(CiTestCase): def test_eni_and_sysconfig_available(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = True m_sysc_avail.return_value = True - found = renderers.search(priority=['sysconfig', 'eni'], first=False) + found = renderers.search(priority=["sysconfig", "eni"], first=False) names = [f[0] for f in found] - self.assertEqual(['sysconfig', 'eni'], names) + self.assertEqual(["sysconfig", "eni"], names) @mock.patch("cloudinit.net.renderers.eni.available") def test_search_returns_empty_on_none(self, m_eni_avail): m_eni_avail.return_value = False - found = renderers.search(priority=['eni'], first=False) + found = renderers.search(priority=["eni"], first=False) self.assertEqual([], found) @mock.patch("cloudinit.net.renderers.sysconfig.available") @@ -5351,16 +6183,16 @@ class TestNetRenderers(CiTestCase): # available should only be called until one is found. m_eni_avail.return_value = True m_sysc_avail.side_effect = Exception("Should not call me") - found = renderers.search(priority=['eni', 'sysconfig'], first=True)[0] - self.assertEqual(['eni'], [found[0]]) + found = renderers.search(priority=["eni", "sysconfig"], first=True)[0] + self.assertEqual(["eni"], [found[0]]) @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") def test_select_positive(self, m_eni_avail, m_sysc_avail): m_eni_avail.return_value = True m_sysc_avail.return_value = False - found = renderers.select(priority=['sysconfig', 'eni']) - self.assertEqual('eni', found[0]) + found = renderers.select(priority=["sysconfig", "eni"]) + self.assertEqual("eni", found[0]) @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.eni.available") @@ -5369,24 +6201,25 @@ class TestNetRenderers(CiTestCase): m_eni_avail.return_value = False m_sysc_avail.return_value = False - self.assertRaises(net.RendererNotFoundError, renderers.select, - priority=['sysconfig', 'eni']) + self.assertRaises( + net.RendererNotFoundError, + renderers.select, + priority=["sysconfig", "eni"], + ) @mock.patch("cloudinit.net.sysconfig.available_sysconfig") @mock.patch("cloudinit.util.system_info") def test_sysconfig_available_uses_variant_mapping(self, m_info, m_avail): m_avail.return_value = True variants = [ - 'suse', - 'centos', - 'eurolinux', - 'fedora', - 'rhel', + "suse", + "centos", + "eurolinux", + "fedora", + "rhel", ] for distro_name in variants: - m_info.return_value = { - "variant": distro_name - } + m_info.return_value = {"variant": distro_name} if hasattr(util.system_info, "cache_clear"): util.system_info.cache_clear() result = sysconfig.available() @@ -5395,73 +6228,93 @@ class TestNetRenderers(CiTestCase): @mock.patch("cloudinit.net.renderers.networkd.available") def test_networkd_available(self, m_nwkd_avail): m_nwkd_avail.return_value = True - found = renderers.search(priority=['networkd'], first=False) - self.assertEqual('networkd', found[0][0]) + found = renderers.search(priority=["networkd"], first=False) + self.assertEqual("networkd", found[0][0]) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestGetInterfaces(CiTestCase): - _data = {'bonds': ['bond1'], - 'bridges': ['bridge1'], - 'vlans': ['bond1.101'], - 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', - 'bond1.101', 'lo', 'eth1'], - 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', - 'enp0s2': 'aa:aa:aa:aa:aa:02', - 'bond1': 'aa:aa:aa:aa:aa:01', - 'bond1.101': 'aa:aa:aa:aa:aa:01', - 'bridge1': 'aa:aa:aa:aa:aa:03', - 'bridge1-nic': 'aa:aa:aa:aa:aa:03', - 'lo': '00:00:00:00:00:00', - 'greptap0': '00:00:00:00:00:00', - 'eth1': 'aa:aa:aa:aa:aa:01', - 'tun0': None}, - 'drivers': {'enp0s1': 'virtio_net', - 'enp0s2': 'e1000', - 'bond1': None, - 'bond1.101': None, - 'bridge1': None, - 'bridge1-nic': None, - 'lo': None, - 'greptap0': None, - 'eth1': 'mlx4_core', - 'tun0': None}} + _data = { + "bonds": ["bond1"], + "bridges": ["bridge1"], + "vlans": ["bond1.101"], + "own_macs": [ + "enp0s1", + "enp0s2", + "bridge1-nic", + "bridge1", + "bond1.101", + "lo", + "eth1", + ], + "macs": { + "enp0s1": "aa:aa:aa:aa:aa:01", + "enp0s2": "aa:aa:aa:aa:aa:02", + "bond1": "aa:aa:aa:aa:aa:01", + "bond1.101": "aa:aa:aa:aa:aa:01", + "bridge1": "aa:aa:aa:aa:aa:03", + "bridge1-nic": "aa:aa:aa:aa:aa:03", + "lo": "00:00:00:00:00:00", + "greptap0": "00:00:00:00:00:00", + "eth1": "aa:aa:aa:aa:aa:01", + "tun0": None, + }, + "drivers": { + "enp0s1": "virtio_net", + "enp0s2": "e1000", + "bond1": None, + "bond1.101": None, + "bridge1": None, + "bridge1-nic": None, + "lo": None, + "greptap0": None, + "eth1": "mlx4_core", + "tun0": None, + }, + } data = {} def _se_get_devicelist(self): - return list(self.data['devices']) + return list(self.data["devices"]) def _se_device_driver(self, name): - return self.data['drivers'][name] + return self.data["drivers"][name] def _se_device_devid(self, name): - return '0x%s' % sorted(list(self.data['drivers'].keys())).index(name) + return "0x%s" % sorted(list(self.data["drivers"].keys())).index(name) def _se_get_interface_mac(self, name): - return self.data['macs'][name] + return self.data["macs"][name] def _se_is_bridge(self, name): - return name in self.data['bridges'] + return name in self.data["bridges"] def _se_is_vlan(self, name): - return name in self.data['vlans'] + return name in self.data["vlans"] def _se_interface_has_own_mac(self, name): - return name in self.data['own_macs'] + return name in self.data["own_macs"] def _mock_setup(self): self.data = copy.deepcopy(self._data) - self.data['devices'] = set(list(self.data['macs'].keys())) - mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', - 'interface_has_own_mac', 'is_vlan', 'device_driver', - 'device_devid') + self.data["devices"] = set(list(self.data["macs"].keys())) + mocks = ( + "get_devicelist", + "get_interface_mac", + "is_bridge", + "interface_has_own_mac", + "is_vlan", + "device_driver", + "device_devid", + ) self.mocks = {} for n in mocks: - m = mock.patch('cloudinit.net.' + n, - side_effect=getattr(self, '_se_' + n)) + m = mock.patch( + "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) + ) self.addCleanup(m.stop) self.mocks[n] = m.start() @@ -5469,30 +6322,31 @@ class TestGetInterfaces(CiTestCase): self._mock_setup() ret = net.get_interfaces() - self.assertIn('enp0s1', self._se_get_devicelist()) - self.assertIn('eth1', self._se_get_devicelist()) - found = [ent for ent in ret if 'aa:aa:aa:aa:aa:01' in ent] + self.assertIn("enp0s1", self._se_get_devicelist()) + self.assertIn("eth1", self._se_get_devicelist()) + found = [ent for ent in ret if "aa:aa:aa:aa:aa:01" in ent] self.assertEqual(len(found), 2) def test_gi_excludes_any_without_mac_address(self): self._mock_setup() ret = net.get_interfaces() - self.assertIn('tun0', self._se_get_devicelist()) - found = [ent for ent in ret if 'tun0' in ent] + self.assertIn("tun0", self._se_get_devicelist()) + found = [ent for ent in ret if "tun0" in ent] self.assertEqual(len(found), 0) def test_gi_excludes_stolen_macs(self): self._mock_setup() ret = net.get_interfaces() - self.mocks['interface_has_own_mac'].assert_has_calls( - [mock.call('enp0s1'), mock.call('bond1')], any_order=True) + self.mocks["interface_has_own_mac"].assert_has_calls( + [mock.call("enp0s1"), mock.call("bond1")], any_order=True + ) expected = [ - ('enp0s2', 'aa:aa:aa:aa:aa:02', 'e1000', '0x5'), - ('enp0s1', 'aa:aa:aa:aa:aa:01', 'virtio_net', '0x4'), - ('eth1', 'aa:aa:aa:aa:aa:01', 'mlx4_core', '0x6'), - ('lo', '00:00:00:00:00:00', None, '0x8'), - ('bridge1-nic', 'aa:aa:aa:aa:aa:03', None, '0x3'), + ("enp0s2", "aa:aa:aa:aa:aa:02", "e1000", "0x5"), + ("enp0s1", "aa:aa:aa:aa:aa:01", "virtio_net", "0x4"), + ("eth1", "aa:aa:aa:aa:aa:01", "mlx4_core", "0x6"), + ("lo", "00:00:00:00:00:00", None, "0x8"), + ("bridge1-nic", "aa:aa:aa:aa:aa:03", None, "0x3"), ] self.assertEqual(sorted(expected), sorted(ret)) @@ -5501,24 +6355,29 @@ class TestGetInterfaces(CiTestCase): # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a bridge. # then expect b1 is the only thing left. - self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' - self.data['drivers']['b1'] = None - self.data['devices'].add('b1') - self.data['bonds'] = [] - self.data['own_macs'] = self.data['devices'] - self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"] + self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1" + self.data["drivers"]["b1"] = None + self.data["devices"].add("b1") + self.data["bonds"] = [] + self.data["own_macs"] = self.data["devices"] + self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces() - self.assertEqual([('b1', 'aa:aa:aa:aa:aa:b1', None, '0x0')], ret) - self.mocks['is_bridge'].assert_has_calls( - [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), - mock.call('b1')], - any_order=True) + self.assertEqual([("b1", "aa:aa:aa:aa:aa:b1", None, "0x0")], ret) + self.mocks["is_bridge"].assert_has_calls( + [ + mock.call("bridge1"), + mock.call("enp0s1"), + mock.call("bond1"), + mock.call("b1"), + ], + any_order=True, + ) class TestInterfaceHasOwnMac(CiTestCase): """Test interface_has_own_mac. This is admittedly a bit whitebox.""" - @mock.patch('cloudinit.net.read_sys_net_int', return_value=None) + @mock.patch("cloudinit.net.read_sys_net_int", return_value=None) def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int): """If nic does not have addr_assign_type, it is not "stolen". @@ -5535,18 +6394,19 @@ class TestInterfaceHasOwnMac(CiTestCase): """ self.assertTrue(interface_has_own_mac("eth0")) - @mock.patch('cloudinit.net.read_sys_net_int', return_value=None) + @mock.patch("cloudinit.net.read_sys_net_int", return_value=None) def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int): with self.assertRaises(ValueError): interface_has_own_mac("eth0", True) - @mock.patch('cloudinit.net.read_sys_net_int') + @mock.patch("cloudinit.net.read_sys_net_int") def test_expected_values(self, m_read_sys_net_int): msg = "address_assign_type=%d said to not have own mac" for address_assign_type in (0, 1, 3): m_read_sys_net_int.return_value = address_assign_type self.assertTrue( - interface_has_own_mac("eth0", msg % address_assign_type)) + interface_has_own_mac("eth0", msg % address_assign_type) + ) m_read_sys_net_int.return_value = 2 self.assertFalse(interface_has_own_mac("eth0")) @@ -5554,218 +6414,281 @@ class TestInterfaceHasOwnMac(CiTestCase): @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestGetInterfacesByMac(CiTestCase): - _data = {'bonds': ['bond1'], - 'bridges': ['bridge1'], - 'vlans': ['bond1.101'], - 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', - 'bond1.101', 'lo'], - 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', - 'enp0s2': 'aa:aa:aa:aa:aa:02', - 'bond1': 'aa:aa:aa:aa:aa:01', - 'bond1.101': 'aa:aa:aa:aa:aa:01', - 'bridge1': 'aa:aa:aa:aa:aa:03', - 'bridge1-nic': 'aa:aa:aa:aa:aa:03', - 'lo': '00:00:00:00:00:00', - 'greptap0': '00:00:00:00:00:00', - 'tun0': None}} + _data = { + "bonds": ["bond1"], + "bridges": ["bridge1"], + "vlans": ["bond1.101"], + "own_macs": [ + "enp0s1", + "enp0s2", + "bridge1-nic", + "bridge1", + "bond1.101", + "lo", + ], + "macs": { + "enp0s1": "aa:aa:aa:aa:aa:01", + "enp0s2": "aa:aa:aa:aa:aa:02", + "bond1": "aa:aa:aa:aa:aa:01", + "bond1.101": "aa:aa:aa:aa:aa:01", + "bridge1": "aa:aa:aa:aa:aa:03", + "bridge1-nic": "aa:aa:aa:aa:aa:03", + "lo": "00:00:00:00:00:00", + "greptap0": "00:00:00:00:00:00", + "tun0": None, + }, + } data = {} def _se_get_devicelist(self): - return list(self.data['devices']) + return list(self.data["devices"]) def _se_get_interface_mac(self, name): - return self.data['macs'][name] + return self.data["macs"][name] def _se_is_bridge(self, name): - return name in self.data['bridges'] + return name in self.data["bridges"] def _se_is_vlan(self, name): - return name in self.data['vlans'] + return name in self.data["vlans"] def _se_interface_has_own_mac(self, name): - return name in self.data['own_macs'] + return name in self.data["own_macs"] def _se_get_ib_interface_hwaddr(self, name, ethernet_format): - ib_hwaddr = self.data.get('ib_hwaddr', {}) + ib_hwaddr = self.data.get("ib_hwaddr", {}) return ib_hwaddr.get(name, {}).get(ethernet_format) def _mock_setup(self): self.data = copy.deepcopy(self._data) - self.data['devices'] = set(list(self.data['macs'].keys())) - mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', - 'interface_has_own_mac', 'is_vlan', 'get_ib_interface_hwaddr') + self.data["devices"] = set(list(self.data["macs"].keys())) + mocks = ( + "get_devicelist", + "get_interface_mac", + "is_bridge", + "interface_has_own_mac", + "is_vlan", + "get_ib_interface_hwaddr", + ) self.mocks = {} for n in mocks: - m = mock.patch('cloudinit.net.' + n, - side_effect=getattr(self, '_se_' + n)) + m = mock.patch( + "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) + ) self.addCleanup(m.stop) self.mocks[n] = m.start() def test_raise_exception_on_duplicate_macs(self): self._mock_setup() - self.data['macs']['bridge1-nic'] = self.data['macs']['enp0s1'] + self.data["macs"]["bridge1-nic"] = self.data["macs"]["enp0s1"] self.assertRaises(RuntimeError, net.get_interfaces_by_mac) def test_excludes_any_without_mac_address(self): self._mock_setup() ret = net.get_interfaces_by_mac() - self.assertIn('tun0', self._se_get_devicelist()) - self.assertNotIn('tun0', ret.values()) + self.assertIn("tun0", self._se_get_devicelist()) + self.assertNotIn("tun0", ret.values()) def test_excludes_stolen_macs(self): self._mock_setup() ret = net.get_interfaces_by_mac() - self.mocks['interface_has_own_mac'].assert_has_calls( - [mock.call('enp0s1'), mock.call('bond1')], any_order=True) + self.mocks["interface_has_own_mac"].assert_has_calls( + [mock.call("enp0s1"), mock.call("bond1")], any_order=True + ) self.assertEqual( - {'aa:aa:aa:aa:aa:01': 'enp0s1', 'aa:aa:aa:aa:aa:02': 'enp0s2', - 'aa:aa:aa:aa:aa:03': 'bridge1-nic', '00:00:00:00:00:00': 'lo'}, - ret) + { + "aa:aa:aa:aa:aa:01": "enp0s1", + "aa:aa:aa:aa:aa:02": "enp0s2", + "aa:aa:aa:aa:aa:03": "bridge1-nic", + "00:00:00:00:00:00": "lo", + }, + ret, + ) def test_excludes_bridges(self): self._mock_setup() # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a bridge. # then expect b1 is the only thing left. - self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' - self.data['devices'].add('b1') - self.data['bonds'] = [] - self.data['own_macs'] = self.data['devices'] - self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"] + self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1" + self.data["devices"].add("b1") + self.data["bonds"] = [] + self.data["own_macs"] = self.data["devices"] + self.data["bridges"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces_by_mac() - self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret) - self.mocks['is_bridge'].assert_has_calls( - [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), - mock.call('b1')], - any_order=True) + self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret) + self.mocks["is_bridge"].assert_has_calls( + [ + mock.call("bridge1"), + mock.call("enp0s1"), + mock.call("bond1"), + mock.call("b1"), + ], + any_order=True, + ) def test_excludes_vlans(self): self._mock_setup() # add a device 'b1', make all return they have their "own mac", # set everything other than 'b1' to be a vlan. # then expect b1 is the only thing left. - self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' - self.data['devices'].add('b1') - self.data['bonds'] = [] - self.data['bridges'] = [] - self.data['own_macs'] = self.data['devices'] - self.data['vlans'] = [f for f in self.data['devices'] if f != "b1"] + self.data["macs"]["b1"] = "aa:aa:aa:aa:aa:b1" + self.data["devices"].add("b1") + self.data["bonds"] = [] + self.data["bridges"] = [] + self.data["own_macs"] = self.data["devices"] + self.data["vlans"] = [f for f in self.data["devices"] if f != "b1"] ret = net.get_interfaces_by_mac() - self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret) - self.mocks['is_vlan'].assert_has_calls( - [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), - mock.call('b1')], - any_order=True) + self.assertEqual({"aa:aa:aa:aa:aa:b1": "b1"}, ret) + self.mocks["is_vlan"].assert_has_calls( + [ + mock.call("bridge1"), + mock.call("enp0s1"), + mock.call("bond1"), + mock.call("b1"), + ], + any_order=True, + ) def test_duplicates_of_empty_mac_are_ok(self): """Duplicate macs of 00:00:00:00:00:00 should be skipped.""" self._mock_setup() empty_mac = "00:00:00:00:00:00" - addnics = ('greptap1', 'lo', 'greptap2') - self.data['macs'].update(dict((k, empty_mac) for k in addnics)) - self.data['devices'].update(set(addnics)) - self.data['own_macs'].extend(list(addnics)) + addnics = ("greptap1", "lo", "greptap2") + self.data["macs"].update(dict((k, empty_mac) for k in addnics)) + self.data["devices"].update(set(addnics)) + self.data["own_macs"].extend(list(addnics)) ret = net.get_interfaces_by_mac() - self.assertEqual('lo', ret[empty_mac]) + self.assertEqual("lo", ret[empty_mac]) def test_skip_all_zeros(self): """Any mac of 00:... should be skipped.""" self._mock_setup() emac1, emac2, emac4, emac6 = ( - '00', '00:00', '00:00:00:00', '00:00:00:00:00:00') - addnics = {'empty1': emac1, 'emac2a': emac2, 'emac2b': emac2, - 'emac4': emac4, 'emac6': emac6} - self.data['macs'].update(addnics) - self.data['devices'].update(set(addnics)) - self.data['own_macs'].extend(addnics.keys()) + "00", + "00:00", + "00:00:00:00", + "00:00:00:00:00:00", + ) + addnics = { + "empty1": emac1, + "emac2a": emac2, + "emac2b": emac2, + "emac4": emac4, + "emac6": emac6, + } + self.data["macs"].update(addnics) + self.data["devices"].update(set(addnics)) + self.data["own_macs"].extend(addnics.keys()) ret = net.get_interfaces_by_mac() - self.assertEqual('lo', ret['00:00:00:00:00:00']) + self.assertEqual("lo", ret["00:00:00:00:00:00"]) def test_ib(self): - ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' - ib_addr_eth_format = '00:11:22:33:44:56' + ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56" + ib_addr_eth_format = "00:11:22:33:44:56" self._mock_setup() - self.data['devices'] = ['enp0s1', 'ib0'] - self.data['own_macs'].append('ib0') - self.data['macs']['ib0'] = ib_addr - self.data['ib_hwaddr'] = {'ib0': {True: ib_addr_eth_format, - False: ib_addr}} + self.data["devices"] = ["enp0s1", "ib0"] + self.data["own_macs"].append("ib0") + self.data["macs"]["ib0"] = ib_addr + self.data["ib_hwaddr"] = { + "ib0": {True: ib_addr_eth_format, False: ib_addr} + } result = net.get_interfaces_by_mac() - expected = {'aa:aa:aa:aa:aa:01': 'enp0s1', - ib_addr_eth_format: 'ib0', ib_addr: 'ib0'} + expected = { + "aa:aa:aa:aa:aa:01": "enp0s1", + ib_addr_eth_format: "ib0", + ib_addr: "ib0", + } self.assertEqual(expected, result) class TestInterfacesSorting(CiTestCase): - def test_natural_order(self): - data = ['ens5', 'ens6', 'ens3', 'ens20', 'ens13', 'ens2'] + data = ["ens5", "ens6", "ens3", "ens20", "ens13", "ens2"] self.assertEqual( sorted(data, key=natural_sort_key), - ['ens2', 'ens3', 'ens5', 'ens6', 'ens13', 'ens20']) - data2 = ['enp2s0', 'enp2s3', 'enp0s3', 'enp0s13', 'enp0s8', 'enp1s2'] + ["ens2", "ens3", "ens5", "ens6", "ens13", "ens20"], + ) + data2 = ["enp2s0", "enp2s3", "enp0s3", "enp0s13", "enp0s8", "enp1s2"] self.assertEqual( sorted(data2, key=natural_sort_key), - ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) + ["enp0s3", "enp0s8", "enp0s13", "enp1s2", "enp2s0", "enp2s3"], + ) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) + mock.Mock(return_value=False), ) class TestGetIBHwaddrsByInterface(CiTestCase): - _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' - _ib_addr_eth_format = '00:11:22:33:44:56' - _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1', - 'bridge1-nic', 'tun0', 'ib0'], - 'bonds': ['bond1'], - 'bridges': ['bridge1'], - 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', 'ib0'], - 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', - 'enp0s2': 'aa:aa:aa:aa:aa:02', - 'bond1': 'aa:aa:aa:aa:aa:01', - 'bridge1': 'aa:aa:aa:aa:aa:03', - 'bridge1-nic': 'aa:aa:aa:aa:aa:03', - 'tun0': None, - 'ib0': _ib_addr}, - 'ib_hwaddr': {'ib0': {True: _ib_addr_eth_format, - False: _ib_addr}}} + _ib_addr = "80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56" + _ib_addr_eth_format = "00:11:22:33:44:56" + _data = { + "devices": [ + "enp0s1", + "enp0s2", + "bond1", + "bridge1", + "bridge1-nic", + "tun0", + "ib0", + ], + "bonds": ["bond1"], + "bridges": ["bridge1"], + "own_macs": ["enp0s1", "enp0s2", "bridge1-nic", "bridge1", "ib0"], + "macs": { + "enp0s1": "aa:aa:aa:aa:aa:01", + "enp0s2": "aa:aa:aa:aa:aa:02", + "bond1": "aa:aa:aa:aa:aa:01", + "bridge1": "aa:aa:aa:aa:aa:03", + "bridge1-nic": "aa:aa:aa:aa:aa:03", + "tun0": None, + "ib0": _ib_addr, + }, + "ib_hwaddr": {"ib0": {True: _ib_addr_eth_format, False: _ib_addr}}, + } data = {} def _mock_setup(self): self.data = copy.deepcopy(self._data) - mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', - 'interface_has_own_mac', 'get_ib_interface_hwaddr') + mocks = ( + "get_devicelist", + "get_interface_mac", + "is_bridge", + "interface_has_own_mac", + "get_ib_interface_hwaddr", + ) self.mocks = {} for n in mocks: - m = mock.patch('cloudinit.net.' + n, - side_effect=getattr(self, '_se_' + n)) + m = mock.patch( + "cloudinit.net." + n, side_effect=getattr(self, "_se_" + n) + ) self.addCleanup(m.stop) self.mocks[n] = m.start() def _se_get_devicelist(self): - return self.data['devices'] + return self.data["devices"] def _se_get_interface_mac(self, name): - return self.data['macs'][name] + return self.data["macs"][name] def _se_is_bridge(self, name): - return name in self.data['bridges'] + return name in self.data["bridges"] def _se_interface_has_own_mac(self, name): - return name in self.data['own_macs'] + return name in self.data["own_macs"] def _se_get_ib_interface_hwaddr(self, name, ethernet_format): - ib_hwaddr = self.data.get('ib_hwaddr', {}) + ib_hwaddr = self.data.get("ib_hwaddr", {}) return ib_hwaddr.get(name, {}).get(ethernet_format) def test_ethernet(self): self._mock_setup() - self.data['devices'].remove('ib0') + self.data["devices"].remove("ib0") result = net.get_ib_hwaddrs_by_interface() expected = {} self.assertEqual(expected, result) @@ -5773,7 +6696,7 @@ class TestGetIBHwaddrsByInterface(CiTestCase): def test_ib(self): self._mock_setup() result = net.get_ib_hwaddrs_by_interface() - expected = {'ib0': self._ib_addr} + expected = {"ib0": self._ib_addr} self.assertEqual(expected, result) @@ -5786,239 +6709,305 @@ def _gzip_data(data): class TestRenameInterfaces(CiTestCase): - - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_rename_all(self, mock_subp): renames = [ - ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'), - ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'), + ("00:11:22:33:44:55", "interface0", "virtio_net", "0x3"), + ("00:11:22:33:44:aa", "interface2", "virtio_net", "0x5"), ] current_info = { - 'ens3': { - 'downable': True, - 'device_id': '0x3', - 'driver': 'virtio_net', - 'mac': '00:11:22:33:44:55', - 'name': 'ens3', - 'up': False}, - 'ens5': { - 'downable': True, - 'device_id': '0x5', - 'driver': 'virtio_net', - 'mac': '00:11:22:33:44:aa', - 'name': 'ens5', - 'up': False}, + "ens3": { + "downable": True, + "device_id": "0x3", + "driver": "virtio_net", + "mac": "00:11:22:33:44:55", + "name": "ens3", + "up": False, + }, + "ens5": { + "downable": True, + "device_id": "0x5", + "driver": "virtio_net", + "mac": "00:11:22:33:44:aa", + "name": "ens5", + "up": False, + }, } net._rename_interfaces(renames, current_info=current_info) print(mock_subp.call_args_list) - mock_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'], - capture=True), - mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'], - capture=True), - ]) - - @mock.patch('cloudinit.subp.subp') + mock_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "ens3", "name", "interface0"], + capture=True, + ), + mock.call( + ["ip", "link", "set", "ens5", "name", "interface2"], + capture=True, + ), + ] + ) + + @mock.patch("cloudinit.subp.subp") def test_rename_no_driver_no_device_id(self, mock_subp): renames = [ - ('00:11:22:33:44:55', 'interface0', None, None), - ('00:11:22:33:44:aa', 'interface1', None, None), + ("00:11:22:33:44:55", "interface0", None, None), + ("00:11:22:33:44:aa", "interface1", None, None), ] current_info = { - 'eth0': { - 'downable': True, - 'device_id': None, - 'driver': None, - 'mac': '00:11:22:33:44:55', - 'name': 'eth0', - 'up': False}, - 'eth1': { - 'downable': True, - 'device_id': None, - 'driver': None, - 'mac': '00:11:22:33:44:aa', - 'name': 'eth1', - 'up': False}, + "eth0": { + "downable": True, + "device_id": None, + "driver": None, + "mac": "00:11:22:33:44:55", + "name": "eth0", + "up": False, + }, + "eth1": { + "downable": True, + "device_id": None, + "driver": None, + "mac": "00:11:22:33:44:aa", + "name": "eth1", + "up": False, + }, } net._rename_interfaces(renames, current_info=current_info) print(mock_subp.call_args_list) - mock_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', 'eth0', 'name', 'interface0'], - capture=True), - mock.call(['ip', 'link', 'set', 'eth1', 'name', 'interface1'], - capture=True), - ]) - - @mock.patch('cloudinit.subp.subp') + mock_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "eth0", "name", "interface0"], + capture=True, + ), + mock.call( + ["ip", "link", "set", "eth1", "name", "interface1"], + capture=True, + ), + ] + ) + + @mock.patch("cloudinit.subp.subp") def test_rename_all_bounce(self, mock_subp): renames = [ - ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'), - ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'), + ("00:11:22:33:44:55", "interface0", "virtio_net", "0x3"), + ("00:11:22:33:44:aa", "interface2", "virtio_net", "0x5"), ] current_info = { - 'ens3': { - 'downable': True, - 'device_id': '0x3', - 'driver': 'virtio_net', - 'mac': '00:11:22:33:44:55', - 'name': 'ens3', - 'up': True}, - 'ens5': { - 'downable': True, - 'device_id': '0x5', - 'driver': 'virtio_net', - 'mac': '00:11:22:33:44:aa', - 'name': 'ens5', - 'up': True}, + "ens3": { + "downable": True, + "device_id": "0x3", + "driver": "virtio_net", + "mac": "00:11:22:33:44:55", + "name": "ens3", + "up": True, + }, + "ens5": { + "downable": True, + "device_id": "0x5", + "driver": "virtio_net", + "mac": "00:11:22:33:44:aa", + "name": "ens5", + "up": True, + }, } net._rename_interfaces(renames, current_info=current_info) print(mock_subp.call_args_list) - mock_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', 'ens3', 'down'], capture=True), - mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'], - capture=True), - mock.call(['ip', 'link', 'set', 'ens5', 'down'], capture=True), - mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'], - capture=True), - mock.call(['ip', 'link', 'set', 'interface0', 'up'], capture=True), - mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True) - ]) - - @mock.patch('cloudinit.subp.subp') + mock_subp.assert_has_calls( + [ + mock.call(["ip", "link", "set", "ens3", "down"], capture=True), + mock.call( + ["ip", "link", "set", "ens3", "name", "interface0"], + capture=True, + ), + mock.call(["ip", "link", "set", "ens5", "down"], capture=True), + mock.call( + ["ip", "link", "set", "ens5", "name", "interface2"], + capture=True, + ), + mock.call( + ["ip", "link", "set", "interface0", "up"], capture=True + ), + mock.call( + ["ip", "link", "set", "interface2", "up"], capture=True + ), + ] + ) + + @mock.patch("cloudinit.subp.subp") def test_rename_duplicate_macs(self, mock_subp): renames = [ - ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'), - ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'), + ("00:11:22:33:44:55", "eth0", "hv_netsvc", "0x3"), + ("00:11:22:33:44:55", "vf1", "mlx4_core", "0x5"), ] current_info = { - 'eth0': { - 'downable': True, - 'device_id': '0x3', - 'driver': 'hv_netsvc', - 'mac': '00:11:22:33:44:55', - 'name': 'eth0', - 'up': False}, - 'eth1': { - 'downable': True, - 'device_id': '0x5', - 'driver': 'mlx4_core', - 'mac': '00:11:22:33:44:55', - 'name': 'eth1', - 'up': False}, + "eth0": { + "downable": True, + "device_id": "0x3", + "driver": "hv_netsvc", + "mac": "00:11:22:33:44:55", + "name": "eth0", + "up": False, + }, + "eth1": { + "downable": True, + "device_id": "0x5", + "driver": "mlx4_core", + "mac": "00:11:22:33:44:55", + "name": "eth1", + "up": False, + }, } net._rename_interfaces(renames, current_info=current_info) print(mock_subp.call_args_list) - mock_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], - capture=True), - ]) + mock_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "eth1", "name", "vf1"], capture=True + ), + ] + ) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_rename_duplicate_macs_driver_no_devid(self, mock_subp): renames = [ - ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None), - ('00:11:22:33:44:55', 'vf1', 'mlx4_core', None), + ("00:11:22:33:44:55", "eth0", "hv_netsvc", None), + ("00:11:22:33:44:55", "vf1", "mlx4_core", None), ] current_info = { - 'eth0': { - 'downable': True, - 'device_id': '0x3', - 'driver': 'hv_netsvc', - 'mac': '00:11:22:33:44:55', - 'name': 'eth0', - 'up': False}, - 'eth1': { - 'downable': True, - 'device_id': '0x5', - 'driver': 'mlx4_core', - 'mac': '00:11:22:33:44:55', - 'name': 'eth1', - 'up': False}, + "eth0": { + "downable": True, + "device_id": "0x3", + "driver": "hv_netsvc", + "mac": "00:11:22:33:44:55", + "name": "eth0", + "up": False, + }, + "eth1": { + "downable": True, + "device_id": "0x5", + "driver": "mlx4_core", + "mac": "00:11:22:33:44:55", + "name": "eth1", + "up": False, + }, } net._rename_interfaces(renames, current_info=current_info) print(mock_subp.call_args_list) - mock_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], - capture=True), - ]) + mock_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "eth1", "name", "vf1"], capture=True + ), + ] + ) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_rename_multi_mac_dups(self, mock_subp): renames = [ - ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'), - ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'), - ('00:11:22:33:44:55', 'vf2', 'mlx4_core', '0x7'), + ("00:11:22:33:44:55", "eth0", "hv_netsvc", "0x3"), + ("00:11:22:33:44:55", "vf1", "mlx4_core", "0x5"), + ("00:11:22:33:44:55", "vf2", "mlx4_core", "0x7"), ] current_info = { - 'eth0': { - 'downable': True, - 'device_id': '0x3', - 'driver': 'hv_netsvc', - 'mac': '00:11:22:33:44:55', - 'name': 'eth0', - 'up': False}, - 'eth1': { - 'downable': True, - 'device_id': '0x5', - 'driver': 'mlx4_core', - 'mac': '00:11:22:33:44:55', - 'name': 'eth1', - 'up': False}, - 'eth2': { - 'downable': True, - 'device_id': '0x7', - 'driver': 'mlx4_core', - 'mac': '00:11:22:33:44:55', - 'name': 'eth2', - 'up': False}, + "eth0": { + "downable": True, + "device_id": "0x3", + "driver": "hv_netsvc", + "mac": "00:11:22:33:44:55", + "name": "eth0", + "up": False, + }, + "eth1": { + "downable": True, + "device_id": "0x5", + "driver": "mlx4_core", + "mac": "00:11:22:33:44:55", + "name": "eth1", + "up": False, + }, + "eth2": { + "downable": True, + "device_id": "0x7", + "driver": "mlx4_core", + "mac": "00:11:22:33:44:55", + "name": "eth2", + "up": False, + }, } net._rename_interfaces(renames, current_info=current_info) print(mock_subp.call_args_list) - mock_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], - capture=True), - mock.call(['ip', 'link', 'set', 'eth2', 'name', 'vf2'], - capture=True), - ]) - - @mock.patch('cloudinit.subp.subp') + mock_subp.assert_has_calls( + [ + mock.call( + ["ip", "link", "set", "eth1", "name", "vf1"], capture=True + ), + mock.call( + ["ip", "link", "set", "eth2", "name", "vf2"], capture=True + ), + ] + ) + + @mock.patch("cloudinit.subp.subp") def test_rename_macs_case_insensitive(self, mock_subp): """_rename_interfaces must support upper or lower case macs.""" renames = [ - ('aa:aa:aa:aa:aa:aa', 'en0', None, None), - ('BB:BB:BB:BB:BB:BB', 'en1', None, None), - ('cc:cc:cc:cc:cc:cc', 'en2', None, None), - ('DD:DD:DD:DD:DD:DD', 'en3', None, None), + ("aa:aa:aa:aa:aa:aa", "en0", None, None), + ("BB:BB:BB:BB:BB:BB", "en1", None, None), + ("cc:cc:cc:cc:cc:cc", "en2", None, None), + ("DD:DD:DD:DD:DD:DD", "en3", None, None), ] current_info = { - 'eth0': {'downable': True, 'mac': 'AA:AA:AA:AA:AA:AA', - 'name': 'eth0', 'up': False}, - 'eth1': {'downable': True, 'mac': 'bb:bb:bb:bb:bb:bb', - 'name': 'eth1', 'up': False}, - 'eth2': {'downable': True, 'mac': 'cc:cc:cc:cc:cc:cc', - 'name': 'eth2', 'up': False}, - 'eth3': {'downable': True, 'mac': 'DD:DD:DD:DD:DD:DD', - 'name': 'eth3', 'up': False}, + "eth0": { + "downable": True, + "mac": "AA:AA:AA:AA:AA:AA", + "name": "eth0", + "up": False, + }, + "eth1": { + "downable": True, + "mac": "bb:bb:bb:bb:bb:bb", + "name": "eth1", + "up": False, + }, + "eth2": { + "downable": True, + "mac": "cc:cc:cc:cc:cc:cc", + "name": "eth2", + "up": False, + }, + "eth3": { + "downable": True, + "mac": "DD:DD:DD:DD:DD:DD", + "name": "eth3", + "up": False, + }, } net._rename_interfaces(renames, current_info=current_info) expected = [ - mock.call(['ip', 'link', 'set', 'eth%d' % i, 'name', 'en%d' % i], - capture=True) - for i in range(len(renames))] + mock.call( + ["ip", "link", "set", "eth%d" % i, "name", "en%d" % i], + capture=True, + ) + for i in range(len(renames)) + ] mock_subp.assert_has_calls(expected) class TestNetworkState(CiTestCase): - def test_bcast_addr(self): """Test mask_and_ipv4_to_bcast_addr proper execution.""" bcast_addr = network_state.mask_and_ipv4_to_bcast_addr - self.assertEqual("192.168.1.255", - bcast_addr("255.255.255.0", "192.168.1.1")) - self.assertEqual("128.42.7.255", - bcast_addr("255.255.248.0", "128.42.5.4")) - self.assertEqual("10.1.21.255", - bcast_addr("255.255.255.0", "10.1.21.4")) + self.assertEqual( + "192.168.1.255", bcast_addr("255.255.255.0", "192.168.1.1") + ) + self.assertEqual( + "128.42.7.255", bcast_addr("255.255.248.0", "128.42.5.4") + ) + self.assertEqual( + "10.1.21.255", bcast_addr("255.255.255.0", "10.1.21.4") + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index 9da21195..0e3ab43f 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -5,20 +5,17 @@ import pytest from cloudinit.net.activators import ( DEFAULT_PRIORITY, - search_activator, - select_activator, -) -from cloudinit.net.activators import ( IfUpDownActivator, NetplanActivator, - NetworkManagerActivator, NetworkdActivator, + NetworkManagerActivator, NoActivatorException, + search_activator, + select_activator, ) from cloudinit.net.network_state import parse_net_config_data from cloudinit.safeyaml import load - V1_CONFIG = """\ version: 1 config: @@ -38,23 +35,23 @@ ethernets: """ NETPLAN_CALL_LIST = [ - ((['netplan', 'apply'], ), {}), + ((["netplan", "apply"],), {}), ] @pytest.yield_fixture def available_mocks(): - mocks = namedtuple('Mocks', 'm_which, m_file') - with patch('cloudinit.subp.which', return_value=True) as m_which: - with patch('os.path.isfile', return_value=True) as m_file: + mocks = namedtuple("Mocks", "m_which, m_file") + with patch("cloudinit.subp.which", return_value=True) as m_which: + with patch("os.path.isfile", return_value=True) as m_file: yield mocks(m_which, m_file) @pytest.yield_fixture def unavailable_mocks(): - mocks = namedtuple('Mocks', 'm_which, m_file') - with patch('cloudinit.subp.which', return_value=False) as m_which: - with patch('os.path.isfile', return_value=False) as m_file: + mocks = namedtuple("Mocks", "m_which, m_file") + with patch("cloudinit.subp.which", return_value=False) as m_which: + with patch("os.path.isfile", return_value=False) as m_file: yield mocks(m_which, m_file) @@ -75,14 +72,16 @@ class TestSearchAndSelect: assert activator == new_order[0] def test_target(self, available_mocks): - search_activator(target='/tmp') - assert '/tmp' == available_mocks.m_which.call_args[1]['target'] + search_activator(target="/tmp") + assert "/tmp" == available_mocks.m_which.call_args[1]["target"] - select_activator(target='/tmp') - assert '/tmp' == available_mocks.m_which.call_args[1]['target'] + select_activator(target="/tmp") + assert "/tmp" == available_mocks.m_which.call_args[1]["target"] - @patch('cloudinit.net.activators.IfUpDownActivator.available', - return_value=False) + @patch( + "cloudinit.net.activators.IfUpDownActivator.available", + return_value=False, + ) def test_first_not_available(self, m_available, available_mocks): resp = search_activator() assert resp == DEFAULT_PRIORITY[1:] @@ -92,9 +91,9 @@ class TestSearchAndSelect: def test_priority_not_exist(self, available_mocks): with pytest.raises(ValueError): - search_activator(priority=['spam', 'eggs']) + search_activator(priority=["spam", "eggs"]) with pytest.raises(ValueError): - select_activator(priority=['spam', 'eggs']) + select_activator(priority=["spam", "eggs"]) def test_none_available(self, unavailable_mocks): resp = search_activator() @@ -105,82 +104,86 @@ class TestSearchAndSelect: IF_UP_DOWN_AVAILABLE_CALLS = [ - (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), - (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), - (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), + (("ifquery",), {"search": ["/sbin", "/usr/sbin"], "target": None}), + (("ifup",), {"search": ["/sbin", "/usr/sbin"], "target": None}), + (("ifdown",), {"search": ["/sbin", "/usr/sbin"], "target": None}), ] NETPLAN_AVAILABLE_CALLS = [ - (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}), + (("netplan",), {"search": ["/usr/sbin", "/sbin"], "target": None}), ] NETWORK_MANAGER_AVAILABLE_CALLS = [ - (('nmcli',), {'target': None}), + (("nmcli",), {"target": None}), ] NETWORKD_AVAILABLE_CALLS = [ - (('ip',), {'search': ['/usr/sbin', '/bin'], 'target': None}), - (('systemctl',), {'search': ['/usr/sbin', '/bin'], 'target': None}), + (("ip",), {"search": ["/usr/sbin", "/bin"], "target": None}), + (("systemctl",), {"search": ["/usr/sbin", "/bin"], "target": None}), ] -@pytest.mark.parametrize('activator, available_calls', [ - (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS), - (NetplanActivator, NETPLAN_AVAILABLE_CALLS), - (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS), - (NetworkdActivator, NETWORKD_AVAILABLE_CALLS), -]) +@pytest.mark.parametrize( + "activator, available_calls", + [ + (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS), + (NetplanActivator, NETPLAN_AVAILABLE_CALLS), + (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS), + (NetworkdActivator, NETWORKD_AVAILABLE_CALLS), + ], +) class TestActivatorsAvailable: - def test_available( - self, activator, available_calls, available_mocks - ): + def test_available(self, activator, available_calls, available_mocks): activator.available() assert available_mocks.m_which.call_args_list == available_calls IF_UP_DOWN_BRING_UP_CALL_LIST = [ - ((['ifup', 'eth0'], ), {}), - ((['ifup', 'eth1'], ), {}), + ((["ifup", "eth0"],), {}), + ((["ifup", "eth1"],), {}), ] NETWORK_MANAGER_BRING_UP_CALL_LIST = [ - ((['nmcli', 'connection', 'up', 'ifname', 'eth0'], ), {}), - ((['nmcli', 'connection', 'up', 'ifname', 'eth1'], ), {}), + ((["nmcli", "connection", "up", "ifname", "eth0"],), {}), + ((["nmcli", "connection", "up", "ifname", "eth1"],), {}), ] NETWORKD_BRING_UP_CALL_LIST = [ - ((['ip', 'link', 'set', 'up', 'eth0'], ), {}), - ((['ip', 'link', 'set', 'up', 'eth1'], ), {}), - ((['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'], ), {}), + ((["ip", "link", "set", "up", "eth0"],), {}), + ((["ip", "link", "set", "up", "eth1"],), {}), + ((["systemctl", "restart", "systemd-networkd", "systemd-resolved"],), {}), ] -@pytest.mark.parametrize('activator, expected_call_list', [ - (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST), - (NetplanActivator, NETPLAN_CALL_LIST), - (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST), - (NetworkdActivator, NETWORKD_BRING_UP_CALL_LIST), -]) +@pytest.mark.parametrize( + "activator, expected_call_list", + [ + (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST), + (NetplanActivator, NETPLAN_CALL_LIST), + (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST), + (NetworkdActivator, NETWORKD_BRING_UP_CALL_LIST), + ], +) class TestActivatorsBringUp: - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_up_interface( self, m_subp, activator, expected_call_list, available_mocks ): - activator.bring_up_interface('eth0') + activator.bring_up_interface("eth0") assert len(m_subp.call_args_list) == 1 assert m_subp.call_args_list[0] == expected_call_list[0] - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_up_interfaces( self, m_subp, activator, expected_call_list, available_mocks ): index = 0 - activator.bring_up_interfaces(['eth0', 'eth1']) + activator.bring_up_interfaces(["eth0", "eth1"]) for call in m_subp.call_args_list: assert call == expected_call_list[index] index += 1 - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_up_all_interfaces_v1( self, m_subp, activator, expected_call_list, available_mocks ): @@ -189,7 +192,7 @@ class TestActivatorsBringUp: for call in m_subp.call_args_list: assert call in expected_call_list - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_up_all_interfaces_v2( self, m_subp, activator, expected_call_list, available_mocks ): @@ -200,44 +203,47 @@ class TestActivatorsBringUp: IF_UP_DOWN_BRING_DOWN_CALL_LIST = [ - ((['ifdown', 'eth0'], ), {}), - ((['ifdown', 'eth1'], ), {}), + ((["ifdown", "eth0"],), {}), + ((["ifdown", "eth1"],), {}), ] NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [ - ((['nmcli', 'connection', 'down', 'eth0'], ), {}), - ((['nmcli', 'connection', 'down', 'eth1'], ), {}), + ((["nmcli", "connection", "down", "eth0"],), {}), + ((["nmcli", "connection", "down", "eth1"],), {}), ] NETWORKD_BRING_DOWN_CALL_LIST = [ - ((['ip', 'link', 'set', 'down', 'eth0'], ), {}), - ((['ip', 'link', 'set', 'down', 'eth1'], ), {}), + ((["ip", "link", "set", "down", "eth0"],), {}), + ((["ip", "link", "set", "down", "eth1"],), {}), ] -@pytest.mark.parametrize('activator, expected_call_list', [ - (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST), - (NetplanActivator, NETPLAN_CALL_LIST), - (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST), - (NetworkdActivator, NETWORKD_BRING_DOWN_CALL_LIST), -]) +@pytest.mark.parametrize( + "activator, expected_call_list", + [ + (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST), + (NetplanActivator, NETPLAN_CALL_LIST), + (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST), + (NetworkdActivator, NETWORKD_BRING_DOWN_CALL_LIST), + ], +) class TestActivatorsBringDown: - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_down_interface( self, m_subp, activator, expected_call_list, available_mocks ): - activator.bring_down_interface('eth0') + activator.bring_down_interface("eth0") assert len(m_subp.call_args_list) == 1 assert m_subp.call_args_list[0] == expected_call_list[0] - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_down_interfaces( self, m_subp, activator, expected_call_list, available_mocks ): - activator.bring_down_interfaces(['eth0', 'eth1']) + activator.bring_down_interfaces(["eth0", "eth1"]) assert expected_call_list == m_subp.call_args_list - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_down_all_interfaces_v1( self, m_subp, activator, expected_call_list, available_mocks ): @@ -246,7 +252,7 @@ class TestActivatorsBringDown: for call in m_subp.call_args_list: assert call in expected_call_list - @patch('cloudinit.subp.subp', return_value=('', '')) + @patch("cloudinit.subp.subp", return_value=("", "")) def test_bring_down_all_interfaces_v2( self, m_subp, activator, expected_call_list, available_mocks ): diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index f0dde097..3facb2bb 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -3,8 +3,7 @@ import os import cloudinit.net import cloudinit.net.network_state from cloudinit import safeyaml -from tests.unittests.helpers import (CiTestCase, mock, readResource, dir2dict) - +from tests.unittests.helpers import CiTestCase, dir2dict, mock, readResource SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") V1 = """ @@ -22,34 +21,36 @@ version: 1 class TestInterfacesByMac(CiTestCase): - - @mock.patch('cloudinit.subp.subp') - @mock.patch('cloudinit.util.is_FreeBSD') + @mock.patch("cloudinit.subp.subp") + @mock.patch("cloudinit.util.is_FreeBSD") def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp): mock_is_FreeBSD.return_value = True mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0) a = cloudinit.net.get_interfaces_by_mac() - assert a == {'52:54:00:50:b7:0d': 'vtnet0', - '80:00:73:63:5c:48': 're0.33', - '02:14:39:0e:25:00': 'bridge0', - '02:ff:60:8c:f3:72': 'vnet0:11'} + assert a == { + "52:54:00:50:b7:0d": "vtnet0", + "80:00:73:63:5c:48": "re0.33", + "02:14:39:0e:25:00": "bridge0", + "02:ff:60:8c:f3:72": "vnet0:11", + } class TestFreeBSDRoundTrip(CiTestCase): - - def _render_and_read(self, network_config=None, state=None, - netplan_path=None, target=None): + def _render_and_read( + self, network_config=None, state=None, netplan_path=None, target=None + ): if target is None: target = self.tmp_dir() os.mkdir("%s/etc" % target) - with open("%s/etc/rc.conf" % target, 'a') as fd: + with open("%s/etc/rc.conf" % target, "a") as fd: fd.write("# dummy rc.conf\n") - with open("%s/etc/resolv.conf" % target, 'a') as fd: + with open("%s/etc/resolv.conf" % target, "a") as fd: fd.write("# dummy resolv.conf\n") if network_config: ns = cloudinit.net.network_state.parse_net_config_data( - network_config) + network_config + ) elif state: ns = state else: @@ -59,18 +60,20 @@ class TestFreeBSDRoundTrip(CiTestCase): renderer.render_network_state(ns, target=target) return dir2dict(target) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_render_output_has_yaml(self, mock_subp): entry = { - 'yaml': V1, + "yaml": V1, } - network_config = safeyaml.load(entry['yaml']) + network_config = safeyaml.load(entry["yaml"]) ns = cloudinit.net.network_state.parse_net_config_data(network_config) files = self._render_and_read(state=ns) assert files == { - '/etc/resolv.conf': '# dummy resolv.conf\n', - '/etc/rc.conf': ( + "/etc/resolv.conf": "# dummy resolv.conf\n", + "/etc/rc.conf": ( "# dummy rc.conf\n" "ifconfig_eno1=" - "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n")} + "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n" + ), + } diff --git a/tests/unittests/test_netinfo.py b/tests/unittests/test_netinfo.py index 238f7b0a..5ed15729 100644 --- a/tests/unittests/test_netinfo.py +++ b/tests/unittests/test_netinfo.py @@ -7,7 +7,6 @@ from copy import copy from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat from tests.unittests.helpers import CiTestCase, mock, readResource - # Example ifconfig and route output SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") @@ -27,155 +26,199 @@ class TestNetInfo(CiTestCase): maxDiff = None with_logs = True - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_netdev_old_nettools_pformat(self, m_subp, m_which): """netdev_pformat properly rendering old nettools info.""" - m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '') - m_which.side_effect = lambda x: x if x == 'ifconfig' else None + m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, "") + m_which.side_effect = lambda x: x if x == "ifconfig" else None content = netdev_pformat() self.assertEqual(NETDEV_FORMATTED_OUT, content) - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_netdev_new_nettools_pformat(self, m_subp, m_which): """netdev_pformat properly rendering netdev new nettools info.""" - m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '') - m_which.side_effect = lambda x: x if x == 'ifconfig' else None + m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, "") + m_which.side_effect = lambda x: x if x == "ifconfig" else None content = netdev_pformat() self.assertEqual(NETDEV_FORMATTED_OUT, content) - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): """netdev_pformat properly rendering netdev new nettools info.""" - m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') - m_which.side_effect = lambda x: x if x == 'ifconfig' else None + m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, "") + m_which.side_effect = lambda x: x if x == "ifconfig" else None content = netdev_pformat() print() print(content) print() self.assertEqual(FREEBSD_NETDEV_OUT, content) - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_netdev_iproute_pformat(self, m_subp, m_which): """netdev_pformat properly rendering ip route info.""" - m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') - m_which.side_effect = lambda x: x if x == 'ip' else None + m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, "") + m_which.side_effect = lambda x: x if x == "ip" else None content = netdev_pformat() new_output = copy(NETDEV_FORMATTED_OUT) # ip route show describes global scopes on ipv4 addresses # whereas ifconfig does not. Add proper global/host scope to output. - new_output = new_output.replace('| . | 50:7b', '| global | 50:7b') + new_output = new_output.replace("| . | 50:7b", "| global | 50:7b") new_output = new_output.replace( - '255.0.0.0 | . |', '255.0.0.0 | host |') + "255.0.0.0 | . |", "255.0.0.0 | host |" + ) self.assertEqual(new_output, content) - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_netdev_warn_on_missing_commands(self, m_subp, m_which): """netdev_pformat warns when missing both ip and 'netstat'.""" m_which.return_value = None # Niether ip nor netstat found content = netdev_pformat() - self.assertEqual('\n', content) + self.assertEqual("\n", content) self.assertEqual( "WARNING: Could not print networks: missing 'ip' and 'ifconfig'" " commands\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) m_subp.assert_not_called() - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_netdev_info_nettools_down(self, m_subp, m_which): """test netdev_info using nettools and down interfaces.""" m_subp.return_value = ( - readResource("netinfo/new-ifconfig-output-down"), "") - m_which.side_effect = lambda x: x if x == 'ifconfig' else None + readResource("netinfo/new-ifconfig-output-down"), + "", + ) + m_which.side_effect = lambda x: x if x == "ifconfig" else None self.assertEqual( - {'eth0': {'ipv4': [], 'ipv6': [], - 'hwaddr': '00:16:3e:de:51:a6', 'up': False}, - 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}], - 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], - 'hwaddr': '.', 'up': True}}, - netdev_info(".")) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + { + "eth0": { + "ipv4": [], + "ipv6": [], + "hwaddr": "00:16:3e:de:51:a6", + "up": False, + }, + "lo": { + "ipv4": [{"ip": "127.0.0.1", "mask": "255.0.0.0"}], + "ipv6": [{"ip": "::1/128", "scope6": "host"}], + "hwaddr": ".", + "up": True, + }, + }, + netdev_info("."), + ) + + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_netdev_info_iproute_down(self, m_subp, m_which): """Test netdev_info with ip and down interfaces.""" m_subp.return_value = ( - readResource("netinfo/sample-ipaddrshow-output-down"), "") - m_which.side_effect = lambda x: x if x == 'ip' else None + readResource("netinfo/sample-ipaddrshow-output-down"), + "", + ) + m_which.side_effect = lambda x: x if x == "ip" else None self.assertEqual( - {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.', - 'mask': '255.0.0.0', 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], - 'hwaddr': '.', 'up': True}, - 'eth0': {'ipv4': [], 'ipv6': [], - 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}, - netdev_info(".")) - - @mock.patch('cloudinit.netinfo.netdev_info') + { + "lo": { + "ipv4": [ + { + "ip": "127.0.0.1", + "bcast": ".", + "mask": "255.0.0.0", + "scope": "host", + } + ], + "ipv6": [{"ip": "::1/128", "scope6": "host"}], + "hwaddr": ".", + "up": True, + }, + "eth0": { + "ipv4": [], + "ipv6": [], + "hwaddr": "00:16:3e:de:51:a6", + "up": False, + }, + }, + netdev_info("."), + ) + + @mock.patch("cloudinit.netinfo.netdev_info") def test_netdev_pformat_with_down(self, m_netdev_info): """test netdev_pformat when netdev_info returns 'down' interfaces.""" - m_netdev_info.return_value = ( - {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0', - 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], - 'hwaddr': '.', 'up': True}, - 'eth0': {'ipv4': [], 'ipv6': [], - 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}) + m_netdev_info.return_value = { + "lo": { + "ipv4": [ + {"ip": "127.0.0.1", "mask": "255.0.0.0", "scope": "host"} + ], + "ipv6": [{"ip": "::1/128", "scope6": "host"}], + "hwaddr": ".", + "up": True, + }, + "eth0": { + "ipv4": [], + "ipv6": [], + "hwaddr": "00:16:3e:de:51:a6", + "up": False, + }, + } self.assertEqual( readResource("netinfo/netdev-formatted-output-down"), - netdev_pformat()) + netdev_pformat(), + ) - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_route_nettools_pformat(self, m_subp, m_which): """route_pformat properly rendering nettools route info.""" def subp_netstat_route_selector(*args, **kwargs): - if args[0] == ['netstat', '--route', '--numeric', '--extend']: - return (SAMPLE_ROUTE_OUT_V4, '') - if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']: - return (SAMPLE_ROUTE_OUT_V6, '') - raise Exception('Unexpected subp call %s' % args[0]) + if args[0] == ["netstat", "--route", "--numeric", "--extend"]: + return (SAMPLE_ROUTE_OUT_V4, "") + if args[0] == ["netstat", "-A", "inet6", "--route", "--numeric"]: + return (SAMPLE_ROUTE_OUT_V6, "") + raise Exception("Unexpected subp call %s" % args[0]) m_subp.side_effect = subp_netstat_route_selector - m_which.side_effect = lambda x: x if x == 'netstat' else None + m_which.side_effect = lambda x: x if x == "netstat" else None content = route_pformat() self.assertEqual(ROUTE_FORMATTED_OUT, content) - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_route_iproute_pformat(self, m_subp, m_which): """route_pformat properly rendering ip route info.""" def subp_iproute_selector(*args, **kwargs): - if ['ip', '-o', 'route', 'list'] == args[0]: - return (SAMPLE_IPROUTE_OUT_V4, '') - v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all'] + if ["ip", "-o", "route", "list"] == args[0]: + return (SAMPLE_IPROUTE_OUT_V4, "") + v6cmd = ["ip", "--oneline", "-6", "route", "list", "table", "all"] if v6cmd == args[0]: - return (SAMPLE_IPROUTE_OUT_V6, '') - raise Exception('Unexpected subp call %s' % args[0]) + return (SAMPLE_IPROUTE_OUT_V6, "") + raise Exception("Unexpected subp call %s" % args[0]) m_subp.side_effect = subp_iproute_selector - m_which.side_effect = lambda x: x if x == 'ip' else None + m_which.side_effect = lambda x: x if x == "ip" else None content = route_pformat() self.assertEqual(ROUTE_FORMATTED_OUT, content) - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') + @mock.patch("cloudinit.netinfo.subp.which") + @mock.patch("cloudinit.netinfo.subp.subp") def test_route_warn_on_missing_commands(self, m_subp, m_which): """route_pformat warns when missing both ip and 'netstat'.""" m_which.return_value = None # Niether ip nor netstat found content = route_pformat() - self.assertEqual('\n', content) + self.assertEqual("\n", content) self.assertEqual( "WARNING: Could not print routes: missing 'ip' and 'netstat'" " commands\n", - self.logs.getvalue()) + self.logs.getvalue(), + ) m_subp.assert_not_called() + # vi: ts=4 expandtab diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py index 4e737ad7..83141263 100644 --- a/tests/unittests/test_pathprefix2dict.py +++ b/tests/unittests/test_pathprefix2dict.py @@ -1,46 +1,46 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import util - -from tests.unittests.helpers import TestCase, populate_dir - import shutil import tempfile +from cloudinit import util +from tests.unittests.helpers import TestCase, populate_dir -class TestPathPrefix2Dict(TestCase): +class TestPathPrefix2Dict(TestCase): def setUp(self): super(TestPathPrefix2Dict, self).setUp() self.tmp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp) def test_required_only(self): - dirdata = {'f1': b'f1content', 'f2': b'f2content'} + dirdata = {"f1": b"f1content", "f2": b"f2content"} populate_dir(self.tmp, dirdata) - ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2']) + ret = util.pathprefix2dict(self.tmp, required=["f1", "f2"]) self.assertEqual(dirdata, ret) def test_required_missing(self): - dirdata = {'f1': b'f1content'} + dirdata = {"f1": b"f1content"} populate_dir(self.tmp, dirdata) - kwargs = {'required': ['f1', 'f2']} + kwargs = {"required": ["f1", "f2"]} self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs) def test_no_required_and_optional(self): - dirdata = {'f1': b'f1c', 'f2': b'f2c'} + dirdata = {"f1": b"f1c", "f2": b"f2c"} populate_dir(self.tmp, dirdata) - ret = util.pathprefix2dict(self.tmp, required=None, - optional=['f1', 'f2']) + ret = util.pathprefix2dict( + self.tmp, required=None, optional=["f1", "f2"] + ) self.assertEqual(dirdata, ret) def test_required_and_optional(self): - dirdata = {'f1': b'f1c', 'f2': b'f2c'} + dirdata = {"f1": b"f1c", "f2": b"f2c"} populate_dir(self.tmp, dirdata) - ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2']) + ret = util.pathprefix2dict(self.tmp, required=["f1"], optional=["f2"]) self.assertEqual(dirdata, ret) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py index 4c7df186..28ee04ec 100644 --- a/tests/unittests/test_registry.py +++ b/tests/unittests/test_registry.py @@ -1,32 +1,33 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.registry import DictRegistry - -from tests.unittests.helpers import (mock, TestCase) +from tests.unittests.helpers import TestCase, mock class TestDictRegistry(TestCase): - def test_added_item_included_in_output(self): registry = DictRegistry() - item_key, item_to_register = 'test_key', mock.Mock() + item_key, item_to_register = "test_key", mock.Mock() registry.register_item(item_key, item_to_register) - self.assertEqual({item_key: item_to_register}, - registry.registered_items) + self.assertEqual( + {item_key: item_to_register}, registry.registered_items + ) def test_registry_starts_out_empty(self): self.assertEqual({}, DictRegistry().registered_items) def test_modifying_registered_items_isnt_exposed_to_other_callers(self): registry = DictRegistry() - registry.registered_items['test_item'] = mock.Mock() + registry.registered_items["test_item"] = mock.Mock() self.assertEqual({}, registry.registered_items) def test_keys_cannot_be_replaced(self): registry = DictRegistry() - item_key = 'test_key' + item_key = "test_key" registry.register_item(item_key, mock.Mock()) - self.assertRaises(ValueError, - registry.register_item, item_key, mock.Mock()) + self.assertRaises( + ValueError, registry.register_item, item_key, mock.Mock() + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py index b2222747..81110e61 100644 --- a/tests/unittests/test_render_cloudcfg.py +++ b/tests/unittests/test_render_cloudcfg.py @@ -4,57 +4,74 @@ import sys import pytest -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from tests.unittests.helpers import cloud_init_project_dir # TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES) -DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora", - "freebsd", "netbsd", "openbsd", "photon", "rhel", "suse", - "ubuntu", "unknown"] +DISTRO_VARIANTS = [ + "amazon", + "arch", + "centos", + "debian", + "eurolinux", + "fedora", + "freebsd", + "netbsd", + "openbsd", + "photon", + "rhel", + "suse", + "ubuntu", + "unknown", +] @pytest.mark.allow_subp_for(sys.executable) class TestRenderCloudCfg: - cmd = [sys.executable, cloud_init_project_dir('tools/render-cloudcfg')] - tmpl_path = cloud_init_project_dir('config/cloud.cfg.tmpl') + cmd = [sys.executable, cloud_init_project_dir("tools/render-cloudcfg")] + tmpl_path = cloud_init_project_dir("config/cloud.cfg.tmpl") - @pytest.mark.parametrize('variant', (DISTRO_VARIANTS)) + @pytest.mark.parametrize("variant", (DISTRO_VARIANTS)) def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir): - outfile = tmpdir.join('outcfg').strpath - subp.subp( - self.cmd + ['--variant', variant, self.tmpl_path, outfile]) + outfile = tmpdir.join("outcfg").strpath + subp.subp(self.cmd + ["--variant", variant, self.tmpl_path, outfile]) with open(outfile) as stream: system_cfg = util.load_yaml(stream.read()) - if variant == 'unknown': - variant = 'ubuntu' # Unknown is defaulted to ubuntu - assert system_cfg['system_info']['distro'] == variant + if variant == "unknown": + variant = "ubuntu" # Unknown is defaulted to ubuntu + assert system_cfg["system_info"]["distro"] == variant - @pytest.mark.parametrize('variant', (DISTRO_VARIANTS)) + @pytest.mark.parametrize("variant", (DISTRO_VARIANTS)) def test_variant_sets_default_user_in_cloud_cfg(self, variant, tmpdir): - outfile = tmpdir.join('outcfg').strpath - subp.subp( - self.cmd + ['--variant', variant, self.tmpl_path, outfile]) + outfile = tmpdir.join("outcfg").strpath + subp.subp(self.cmd + ["--variant", variant, self.tmpl_path, outfile]) with open(outfile) as stream: system_cfg = util.load_yaml(stream.read()) default_user_exceptions = { - 'amazon': 'ec2-user', 'debian': 'ubuntu', 'unknown': 'ubuntu'} - default_user = system_cfg['system_info']['default_user']['name'] + "amazon": "ec2-user", + "debian": "ubuntu", + "unknown": "ubuntu", + } + default_user = system_cfg["system_info"]["default_user"]["name"] assert default_user == default_user_exceptions.get(variant, variant) - @pytest.mark.parametrize('variant,renderers', ( - ('freebsd', ['freebsd']), ('netbsd', ['netbsd']), - ('openbsd', ['openbsd']), ('ubuntu', ['netplan', 'eni', 'sysconfig'])) + @pytest.mark.parametrize( + "variant,renderers", + ( + ("freebsd", ["freebsd"]), + ("netbsd", ["netbsd"]), + ("openbsd", ["openbsd"]), + ("ubuntu", ["netplan", "eni", "sysconfig"]), + ), ) def test_variant_sets_network_renderer_priority_in_cloud_cfg( self, variant, renderers, tmpdir ): - outfile = tmpdir.join('outcfg').strpath - subp.subp( - self.cmd + ['--variant', variant, self.tmpl_path, outfile]) + outfile = tmpdir.join("outcfg").strpath + subp.subp(self.cmd + ["--variant", variant, self.tmpl_path, outfile]) with open(outfile) as stream: system_cfg = util.load_yaml(stream.read()) - assert renderers == system_cfg['system_info']['network']['renderers'] + assert renderers == system_cfg["system_info"]["network"]["renderers"] diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 3aaeea43..f6dd96e0 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -5,273 +5,324 @@ from unittest import mock from cloudinit import reporting -from cloudinit.reporting import events -from cloudinit.reporting import handlers - +from cloudinit.reporting import events, handlers from tests.unittests.helpers import TestCase def _fake_registry(): - return mock.Mock(registered_items={'a': mock.MagicMock(), - 'b': mock.MagicMock()}) + return mock.Mock( + registered_items={"a": mock.MagicMock(), "b": mock.MagicMock()} + ) class TestReportStartEvent(TestCase): - - @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', - new_callable=_fake_registry) + @mock.patch( + "cloudinit.reporting.events.instantiated_handler_registry", + new_callable=_fake_registry, + ) def test_report_start_event_passes_something_with_as_string_to_handlers( - self, instantiated_handler_registry): - event_name, event_description = 'my_test_event', 'my description' + self, instantiated_handler_registry + ): + event_name, event_description = "my_test_event", "my description" events.report_start_event(event_name, event_description) - expected_string_representation = ': '.join( - ['start', event_name, event_description]) - for _, handler in ( - instantiated_handler_registry.registered_items.items()): + expected_string_representation = ": ".join( + ["start", event_name, event_description] + ) + for ( + _, + handler, + ) in instantiated_handler_registry.registered_items.items(): self.assertEqual(1, handler.publish_event.call_count) event = handler.publish_event.call_args[0][0] self.assertEqual(expected_string_representation, event.as_string()) class TestReportFinishEvent(TestCase): - def _report_finish_event(self, result=events.status.SUCCESS): - event_name, event_description = 'my_test_event', 'my description' + event_name, event_description = "my_test_event", "my description" events.report_finish_event( - event_name, event_description, result=result) + event_name, event_description, result=result + ) return event_name, event_description def assertHandlersPassedObjectWithAsString( - self, handlers, expected_as_string): + self, handlers, expected_as_string + ): for _, handler in handlers.items(): self.assertEqual(1, handler.publish_event.call_count) event = handler.publish_event.call_args[0][0] self.assertEqual(expected_as_string, event.as_string()) - @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', - new_callable=_fake_registry) + @mock.patch( + "cloudinit.reporting.events.instantiated_handler_registry", + new_callable=_fake_registry, + ) def test_report_finish_event_passes_something_with_as_string_to_handlers( - self, instantiated_handler_registry): + self, instantiated_handler_registry + ): event_name, event_description = self._report_finish_event() - expected_string_representation = ': '.join( - ['finish', event_name, events.status.SUCCESS, - event_description]) + expected_string_representation = ": ".join( + ["finish", event_name, events.status.SUCCESS, event_description] + ) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, - expected_string_representation) + expected_string_representation, + ) - @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', - new_callable=_fake_registry) + @mock.patch( + "cloudinit.reporting.events.instantiated_handler_registry", + new_callable=_fake_registry, + ) def test_reporting_successful_finish_has_sensible_string_repr( - self, instantiated_handler_registry): + self, instantiated_handler_registry + ): event_name, event_description = self._report_finish_event( - result=events.status.SUCCESS) - expected_string_representation = ': '.join( - ['finish', event_name, events.status.SUCCESS, - event_description]) + result=events.status.SUCCESS + ) + expected_string_representation = ": ".join( + ["finish", event_name, events.status.SUCCESS, event_description] + ) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, - expected_string_representation) + expected_string_representation, + ) - @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', - new_callable=_fake_registry) + @mock.patch( + "cloudinit.reporting.events.instantiated_handler_registry", + new_callable=_fake_registry, + ) def test_reporting_unsuccessful_finish_has_sensible_string_repr( - self, instantiated_handler_registry): + self, instantiated_handler_registry + ): event_name, event_description = self._report_finish_event( - result=events.status.FAIL) - expected_string_representation = ': '.join( - ['finish', event_name, events.status.FAIL, event_description]) + result=events.status.FAIL + ) + expected_string_representation = ": ".join( + ["finish", event_name, events.status.FAIL, event_description] + ) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, - expected_string_representation) + expected_string_representation, + ) def test_invalid_result_raises_attribute_error(self): self.assertRaises(ValueError, self._report_finish_event, ("BOGUS",)) class TestReportingEvent(TestCase): - def test_as_string(self): - event_type, name, description = 'test_type', 'test_name', 'test_desc' + event_type, name, description = "test_type", "test_name", "test_desc" event = events.ReportingEvent(event_type, name, description) - expected_string_representation = ': '.join( - [event_type, name, description]) + expected_string_representation = ": ".join( + [event_type, name, description] + ) self.assertEqual(expected_string_representation, event.as_string()) def test_as_dict(self): - event_type, name, desc = 'test_type', 'test_name', 'test_desc' + event_type, name, desc = "test_type", "test_name", "test_desc" event = events.ReportingEvent(event_type, name, desc) - expected = {'event_type': event_type, 'name': name, - 'description': desc, 'origin': 'cloudinit'} + expected = { + "event_type": event_type, + "name": name, + "description": desc, + "origin": "cloudinit", + } # allow for timestamp to differ, but must be present as_dict = event.as_dict() - self.assertIn('timestamp', as_dict) - del as_dict['timestamp'] + self.assertIn("timestamp", as_dict) + del as_dict["timestamp"] self.assertEqual(expected, as_dict) class TestFinishReportingEvent(TestCase): - def test_as_has_result(self): result = events.status.SUCCESS - name, desc = 'test_name', 'test_desc' + name, desc = "test_name", "test_desc" event = events.FinishReportingEvent(name, desc, result) ret = event.as_dict() - self.assertTrue('result' in ret) - self.assertEqual(ret['result'], result) + self.assertTrue("result" in ret) + self.assertEqual(ret["result"], result) def test_has_result_with_optional_post_files(self): result = events.status.SUCCESS - name, desc, files = 'test_name', 'test_desc', [ - '/really/fake/path/install.log'] + name, desc, files = ( + "test_name", + "test_desc", + ["/really/fake/path/install.log"], + ) event = events.FinishReportingEvent( - name, desc, result, post_files=files) + name, desc, result, post_files=files + ) ret = event.as_dict() - self.assertTrue('result' in ret) - self.assertTrue('files' in ret) - self.assertEqual(ret['result'], result) - posted_install_log = ret['files'][0] - self.assertTrue('path' in posted_install_log) - self.assertTrue('content' in posted_install_log) - self.assertTrue('encoding' in posted_install_log) - self.assertEqual(posted_install_log['path'], files[0]) - self.assertEqual(posted_install_log['encoding'], 'base64') + self.assertTrue("result" in ret) + self.assertTrue("files" in ret) + self.assertEqual(ret["result"], result) + posted_install_log = ret["files"][0] + self.assertTrue("path" in posted_install_log) + self.assertTrue("content" in posted_install_log) + self.assertTrue("encoding" in posted_install_log) + self.assertEqual(posted_install_log["path"], files[0]) + self.assertEqual(posted_install_log["encoding"], "base64") class TestBaseReportingHandler(TestCase): - def test_base_reporting_handler_is_abstract(self): regexp = r".*abstract.*publish_event.*" self.assertRaisesRegex(TypeError, regexp, handlers.ReportingHandler) class TestLogHandler(TestCase): - - @mock.patch.object(reporting.handlers.logging, 'getLogger') + @mock.patch.object(reporting.handlers.logging, "getLogger") def test_appropriate_logger_used(self, getLogger): - event_type, event_name = 'test_type', 'test_name' - event = events.ReportingEvent(event_type, event_name, 'description') + event_type, event_name = "test_type", "test_name" + event = events.ReportingEvent(event_type, event_name, "description") reporting.handlers.LogHandler().publish_event(event) self.assertEqual( - [mock.call( - 'cloudinit.reporting.{0}.{1}'.format(event_type, event_name))], - getLogger.call_args_list) - - @mock.patch.object(reporting.handlers.logging, 'getLogger') + [ + mock.call( + "cloudinit.reporting.{0}.{1}".format( + event_type, event_name + ) + ) + ], + getLogger.call_args_list, + ) + + @mock.patch.object(reporting.handlers.logging, "getLogger") def test_single_log_message_at_info_published(self, getLogger): - event = events.ReportingEvent('type', 'name', 'description') + event = events.ReportingEvent("type", "name", "description") reporting.handlers.LogHandler().publish_event(event) self.assertEqual(1, getLogger.return_value.log.call_count) - @mock.patch.object(reporting.handlers.logging, 'getLogger') + @mock.patch.object(reporting.handlers.logging, "getLogger") def test_log_message_uses_event_as_string(self, getLogger): - event = events.ReportingEvent('type', 'name', 'description') + event = events.ReportingEvent("type", "name", "description") reporting.handlers.LogHandler(level="INFO").publish_event(event) - self.assertIn(event.as_string(), - getLogger.return_value.log.call_args[0][1]) + self.assertIn( + event.as_string(), getLogger.return_value.log.call_args[0][1] + ) class TestDefaultRegisteredHandler(TestCase): - def test_log_handler_registered_by_default(self): registered_items = ( - reporting.instantiated_handler_registry.registered_items) + reporting.instantiated_handler_registry.registered_items + ) for _, item in registered_items.items(): if isinstance(item, reporting.handlers.LogHandler): break else: - self.fail('No reporting LogHandler registered by default.') + self.fail("No reporting LogHandler registered by default.") class TestReportingConfiguration(TestCase): - - @mock.patch.object(reporting, 'instantiated_handler_registry') + @mock.patch.object(reporting, "instantiated_handler_registry") def test_empty_configuration_doesnt_add_handlers( - self, instantiated_handler_registry): + self, instantiated_handler_registry + ): reporting.update_configuration({}) self.assertEqual( - 0, instantiated_handler_registry.register_item.call_count) + 0, instantiated_handler_registry.register_item.call_count + ) @mock.patch.object( - reporting, 'instantiated_handler_registry', reporting.DictRegistry()) - @mock.patch.object(reporting, 'available_handlers') + reporting, "instantiated_handler_registry", reporting.DictRegistry() + ) + @mock.patch.object(reporting, "available_handlers") def test_looks_up_handler_by_type_and_adds_it(self, available_handlers): - handler_type_name = 'test_handler' + handler_type_name = "test_handler" handler_cls = mock.Mock() available_handlers.registered_items = {handler_type_name: handler_cls} - handler_name = 'my_test_handler' + handler_name = "my_test_handler" reporting.update_configuration( - {handler_name: {'type': handler_type_name}}) + {handler_name: {"type": handler_type_name}} + ) self.assertEqual( {handler_name: handler_cls.return_value}, - reporting.instantiated_handler_registry.registered_items) + reporting.instantiated_handler_registry.registered_items, + ) @mock.patch.object( - reporting, 'instantiated_handler_registry', reporting.DictRegistry()) - @mock.patch.object(reporting, 'available_handlers') + reporting, "instantiated_handler_registry", reporting.DictRegistry() + ) + @mock.patch.object(reporting, "available_handlers") def test_uses_non_type_parts_of_config_dict_as_kwargs( - self, available_handlers): - handler_type_name = 'test_handler' + self, available_handlers + ): + handler_type_name = "test_handler" handler_cls = mock.Mock() available_handlers.registered_items = {handler_type_name: handler_cls} - extra_kwargs = {'foo': 'bar', 'bar': 'baz'} + extra_kwargs = {"foo": "bar", "bar": "baz"} handler_config = extra_kwargs.copy() - handler_config.update({'type': handler_type_name}) - handler_name = 'my_test_handler' + handler_config.update({"type": handler_type_name}) + handler_name = "my_test_handler" reporting.update_configuration({handler_name: handler_config}) self.assertEqual( handler_cls.return_value, reporting.instantiated_handler_registry.registered_items[ - handler_name]) - self.assertEqual([mock.call(**extra_kwargs)], - handler_cls.call_args_list) + handler_name + ], + ) + self.assertEqual( + [mock.call(**extra_kwargs)], handler_cls.call_args_list + ) @mock.patch.object( - reporting, 'instantiated_handler_registry', reporting.DictRegistry()) - @mock.patch.object(reporting, 'available_handlers') + reporting, "instantiated_handler_registry", reporting.DictRegistry() + ) + @mock.patch.object(reporting, "available_handlers") def test_handler_config_not_modified(self, available_handlers): - handler_type_name = 'test_handler' + handler_type_name = "test_handler" handler_cls = mock.Mock() available_handlers.registered_items = {handler_type_name: handler_cls} - handler_config = {'type': handler_type_name, 'foo': 'bar'} + handler_config = {"type": handler_type_name, "foo": "bar"} expected_handler_config = handler_config.copy() - reporting.update_configuration({'my_test_handler': handler_config}) + reporting.update_configuration({"my_test_handler": handler_config}) self.assertEqual(expected_handler_config, handler_config) @mock.patch.object( - reporting, 'instantiated_handler_registry', reporting.DictRegistry()) - @mock.patch.object(reporting, 'available_handlers') + reporting, "instantiated_handler_registry", reporting.DictRegistry() + ) + @mock.patch.object(reporting, "available_handlers") def test_handlers_removed_if_falseish_specified(self, available_handlers): - handler_type_name = 'test_handler' + handler_type_name = "test_handler" handler_cls = mock.Mock() available_handlers.registered_items = {handler_type_name: handler_cls} - handler_name = 'my_test_handler' + handler_name = "my_test_handler" reporting.update_configuration( - {handler_name: {'type': handler_type_name}}) + {handler_name: {"type": handler_type_name}} + ) self.assertEqual( - 1, len(reporting.instantiated_handler_registry.registered_items)) + 1, len(reporting.instantiated_handler_registry.registered_items) + ) reporting.update_configuration({handler_name: None}) self.assertEqual( - 0, len(reporting.instantiated_handler_registry.registered_items)) + 0, len(reporting.instantiated_handler_registry.registered_items) + ) class TestReportingEventStack(TestCase): - @mock.patch('cloudinit.reporting.events.report_finish_event') - @mock.patch('cloudinit.reporting.events.report_start_event') + @mock.patch("cloudinit.reporting.events.report_finish_event") + @mock.patch("cloudinit.reporting.events.report_start_event") def test_start_and_finish_success(self, report_start, report_finish): with events.ReportEventStack(name="myname", description="mydesc"): pass self.assertEqual( - [mock.call('myname', 'mydesc')], report_start.call_args_list) + [mock.call("myname", "mydesc")], report_start.call_args_list + ) self.assertEqual( - [mock.call('myname', 'mydesc', events.status.SUCCESS, - post_files=[])], - report_finish.call_args_list) - - @mock.patch('cloudinit.reporting.events.report_finish_event') - @mock.patch('cloudinit.reporting.events.report_start_event') + [ + mock.call( + "myname", "mydesc", events.status.SUCCESS, post_files=[] + ) + ], + report_finish.call_args_list, + ) + + @mock.patch("cloudinit.reporting.events.report_finish_event") + @mock.patch("cloudinit.reporting.events.report_start_event") def test_finish_exception_defaults_fail(self, report_start, report_finish): name = "myname" desc = "mydesc" @@ -283,31 +334,34 @@ class TestReportingEventStack(TestCase): self.assertEqual([mock.call(name, desc)], report_start.call_args_list) self.assertEqual( [mock.call(name, desc, events.status.FAIL, post_files=[])], - report_finish.call_args_list) + report_finish.call_args_list, + ) - @mock.patch('cloudinit.reporting.events.report_finish_event') - @mock.patch('cloudinit.reporting.events.report_start_event') + @mock.patch("cloudinit.reporting.events.report_finish_event") + @mock.patch("cloudinit.reporting.events.report_start_event") def test_result_on_exception_used(self, report_start, report_finish): name = "myname" desc = "mydesc" try: with events.ReportEventStack( - name, desc, result_on_exception=events.status.WARN): + name, desc, result_on_exception=events.status.WARN + ): raise ValueError("This didnt work") except ValueError: pass self.assertEqual([mock.call(name, desc)], report_start.call_args_list) self.assertEqual( [mock.call(name, desc, events.status.WARN, post_files=[])], - report_finish.call_args_list) + report_finish.call_args_list, + ) - @mock.patch('cloudinit.reporting.events.report_start_event') + @mock.patch("cloudinit.reporting.events.report_start_event") def test_child_fullname_respects_parent(self, report_start): parent_name = "topname" c1_name = "c1name" c2_name = "c2name" - c2_expected_fullname = '/'.join([parent_name, c1_name, c2_name]) - c1_expected_fullname = '/'.join([parent_name, c1_name]) + c2_expected_fullname = "/".join([parent_name, c1_name, c2_name]) + c1_expected_fullname = "/".join([parent_name, c1_name]) parent = events.ReportEventStack(parent_name, "topdesc") c1 = events.ReportEventStack(c1_name, "c1desc", parent=parent) @@ -317,8 +371,8 @@ class TestReportingEventStack(TestCase): with c2: report_start.assert_called_with(c2_expected_fullname, "c2desc") - @mock.patch('cloudinit.reporting.events.report_finish_event') - @mock.patch('cloudinit.reporting.events.report_start_event') + @mock.patch("cloudinit.reporting.events.report_finish_event") + @mock.patch("cloudinit.reporting.events.report_start_event") def test_child_result_bubbles_up(self, report_start, report_finish): parent = events.ReportEventStack("topname", "topdesc") child = events.ReportEventStack("c_name", "c_desc", parent=parent) @@ -327,42 +381,53 @@ class TestReportingEventStack(TestCase): child.result = events.status.WARN report_finish.assert_called_with( - "topname", "topdesc", events.status.WARN, post_files=[]) + "topname", "topdesc", events.status.WARN, post_files=[] + ) - @mock.patch('cloudinit.reporting.events.report_finish_event') + @mock.patch("cloudinit.reporting.events.report_finish_event") def test_message_used_in_finish(self, report_finish): - with events.ReportEventStack("myname", "mydesc", - message="mymessage"): + with events.ReportEventStack("myname", "mydesc", message="mymessage"): pass self.assertEqual( - [mock.call("myname", "mymessage", events.status.SUCCESS, - post_files=[])], - report_finish.call_args_list) - - @mock.patch('cloudinit.reporting.events.report_finish_event') + [ + mock.call( + "myname", "mymessage", events.status.SUCCESS, post_files=[] + ) + ], + report_finish.call_args_list, + ) + + @mock.patch("cloudinit.reporting.events.report_finish_event") def test_message_updatable(self, report_finish): with events.ReportEventStack("myname", "mydesc") as c: c.message = "all good" self.assertEqual( - [mock.call("myname", "all good", events.status.SUCCESS, - post_files=[])], - report_finish.call_args_list) - - @mock.patch('cloudinit.reporting.events.report_start_event') - @mock.patch('cloudinit.reporting.events.report_finish_event') + [ + mock.call( + "myname", "all good", events.status.SUCCESS, post_files=[] + ) + ], + report_finish.call_args_list, + ) + + @mock.patch("cloudinit.reporting.events.report_start_event") + @mock.patch("cloudinit.reporting.events.report_finish_event") def test_reporting_disabled_does_not_report_events( - self, report_start, report_finish): + self, report_start, report_finish + ): with events.ReportEventStack("a", "b", reporting_enabled=False): pass self.assertEqual(report_start.call_count, 0) self.assertEqual(report_finish.call_count, 0) - @mock.patch('cloudinit.reporting.events.report_start_event') - @mock.patch('cloudinit.reporting.events.report_finish_event') + @mock.patch("cloudinit.reporting.events.report_start_event") + @mock.patch("cloudinit.reporting.events.report_finish_event") def test_reporting_child_default_to_parent( - self, report_start, report_finish): + self, report_start, report_finish + ): parent = events.ReportEventStack( - "pname", "pdesc", reporting_enabled=False) + "pname", "pdesc", reporting_enabled=False + ) child = events.ReportEventStack("cname", "cdesc", parent=parent) with parent: with child: @@ -371,8 +436,9 @@ class TestReportingEventStack(TestCase): self.assertEqual(report_finish.call_count, 0) def test_reporting_event_has_sane_repr(self): - myrep = events.ReportEventStack("fooname", "foodesc", - reporting_enabled=True).__repr__() + myrep = events.ReportEventStack( + "fooname", "foodesc", reporting_enabled=True + ).__repr__() self.assertIn("fooname", myrep) self.assertIn("foodesc", myrep) self.assertIn("True", myrep) @@ -386,4 +452,5 @@ class TestStatusAccess(TestCase): def test_invalid_status_access_raises_value_error(self): self.assertRaises(AttributeError, getattr, events.status, "BOGUS") + # vi: ts=4 expandtab diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index 24a1dcc7..35ab0c58 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -1,27 +1,25 @@ # This file is part of cloud-init. See LICENSE file for license information. import base64 -import zlib - -from cloudinit.reporting import events, instantiated_handler_registry -from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler - import json import os +import re import struct import time -import re +import zlib from unittest import mock from cloudinit import util -from tests.unittests.helpers import CiTestCase +from cloudinit.reporting import events, instantiated_handler_registry +from cloudinit.reporting.handlers import HyperVKvpReportingHandler, LogHandler from cloudinit.sources.helpers import azure +from tests.unittests.helpers import CiTestCase class TestKvpEncoding(CiTestCase): def test_encode_decode(self): - kvp = {'key': 'key1', 'value': 'value1'} + kvp = {"key": "key1", "value": "value1"} kvp_reporting = HyperVKvpReportingHandler() - data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) + data = kvp_reporting._encode_kvp_item(kvp["key"], kvp["value"]) self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) decoded_kvp = kvp_reporting._decode_kvp_item(data) self.assertEqual(kvp, decoded_kvp) @@ -30,71 +28,72 @@ class TestKvpEncoding(CiTestCase): class TextKvpReporter(CiTestCase): def setUp(self): super(TextKvpReporter, self).setUp() - self.tmp_file_path = self.tmp_path('kvp_pool_file') + self.tmp_file_path = self.tmp_path("kvp_pool_file") util.ensure_file(self.tmp_file_path) def test_events_with_higher_incarnation_not_over_written(self): - reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) reporter.publish_event( - events.ReportingEvent('foo', 'name1', 'description')) + events.ReportingEvent("foo", "name1", "description") + ) reporter.publish_event( - events.ReportingEvent('foo', 'name2', 'description')) + events.ReportingEvent("foo", "name2", "description") + ) reporter.q.join() self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - reporter3 = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) + reporter3 = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) reporter3.incarnation_no = reporter.incarnation_no - 1 reporter3.publish_event( - events.ReportingEvent('foo', 'name3', 'description')) + events.ReportingEvent("foo", "name3", "description") + ) reporter3.q.join() self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) def test_finish_event_result_is_logged(self): - reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) reporter.publish_event( - events.FinishReportingEvent('name2', 'description1', - result=events.status.FAIL)) + events.FinishReportingEvent( + "name2", "description1", result=events.status.FAIL + ) + ) reporter.q.join() - self.assertIn('FAIL', list(reporter._iterate_kvps(0))[0]['value']) + self.assertIn("FAIL", list(reporter._iterate_kvps(0))[0]["value"]) def test_file_operation_issue(self): os.remove(self.tmp_file_path) - reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) reporter.publish_event( - events.FinishReportingEvent('name2', 'description1', - result=events.status.FAIL)) + events.FinishReportingEvent( + "name2", "description1", result=events.status.FAIL + ) + ) reporter.q.join() def test_event_very_long(self): - reporter = HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - description = 'ab' * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + description = "ab" * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE long_event = events.FinishReportingEvent( - 'event_name', - description, - result=events.status.FAIL) + "event_name", description, result=events.status.FAIL + ) reporter.publish_event(long_event) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) self.assertEqual(3, len(kvps)) # restore from the kvp to see the content are all there - full_description = '' + full_description = "" for i in range(len(kvps)): - msg_slice = json.loads(kvps[i]['value']) - self.assertEqual(msg_slice['msg_i'], i) - full_description += msg_slice['msg'] + msg_slice = json.loads(kvps[i]["value"]) + self.assertEqual(msg_slice["msg_i"], i) + full_description += msg_slice["msg"] self.assertEqual(description, full_description) def test_not_truncate_kvp_file_modified_after_boot(self): with open(self.tmp_file_path, "wb+") as f: - kvp = {'key': 'key1', 'value': 'value1'} + kvp = {"key": "key1", "value": "value1"} data = struct.pack( "%ds%ds" % ( @@ -118,11 +117,16 @@ class TextKvpReporter(CiTestCase): def test_truncate_stale_kvp_file(self): with open(self.tmp_file_path, "wb+") as f: - kvp = {'key': 'key1', 'value': 'value1'} - data = (struct.pack("%ds%ds" % ( - HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, - HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), - kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + kvp = {"key": "key1", "value": "value1"} + data = struct.pack( + "%ds%ds" + % ( + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE, + ), + kvp["key"].encode("utf-8"), + kvp["value"].encode("utf-8"), + ) f.write(data) # set the time ways back to make it look like @@ -137,8 +141,8 @@ class TextKvpReporter(CiTestCase): kvps = list(reporter._iterate_kvps(0)) self.assertEqual(0, len(kvps)) - @mock.patch('cloudinit.distros.uses_systemd') - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.distros.uses_systemd") + @mock.patch("cloudinit.subp.subp") def test_get_boot_telemetry(self, m_subp, m_sysd): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) datetime_pattern = ( @@ -149,8 +153,9 @@ class TextKvpReporter(CiTestCase): # get_boot_telemetry makes two subp calls to systemctl. We provide # a list of values that the subp calls should return m_subp.side_effect = [ - ('UserspaceTimestampMonotonic=1844838', ''), - ('InactiveExitTimestampMonotonic=3068203', '')] + ("UserspaceTimestampMonotonic=1844838", ""), + ("InactiveExitTimestampMonotonic=3068203", ""), + ] m_sysd.return_value = True reporter.publish_event(azure.get_boot_telemetry()) @@ -158,15 +163,13 @@ class TextKvpReporter(CiTestCase): kvps = list(reporter._iterate_kvps(0)) self.assertEqual(1, len(kvps)) - evt_msg = kvps[0]['value'] + evt_msg = kvps[0]["value"] if not re.search("kernel_start=" + datetime_pattern, evt_msg): raise AssertionError("missing kernel_start timestamp") if not re.search("user_start=" + datetime_pattern, evt_msg): raise AssertionError("missing user_start timestamp") - if not re.search("cloudinit_activation=" + datetime_pattern, - evt_msg): - raise AssertionError( - "missing cloudinit_activation timestamp") + if not re.search("cloudinit_activation=" + datetime_pattern, evt_msg): + raise AssertionError("missing cloudinit_activation timestamp") def test_get_system_info(self): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) @@ -176,7 +179,7 @@ class TextKvpReporter(CiTestCase): reporter.q.join() kvps = list(reporter._iterate_kvps(0)) self.assertEqual(1, len(kvps)) - evt_msg = kvps[0]['value'] + evt_msg = kvps[0]["value"] # the most important information is cloudinit version, # kernel_version, and the distro variant. It is ok if @@ -191,12 +194,11 @@ class TextKvpReporter(CiTestCase): def test_report_diagnostic_event_without_logger_func(self): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) diagnostic_msg = "test_diagnostic" - reporter.publish_event( - azure.report_diagnostic_event(diagnostic_msg)) + reporter.publish_event(azure.report_diagnostic_event(diagnostic_msg)) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) self.assertEqual(1, len(kvps)) - evt_msg = kvps[0]['value'] + evt_msg = kvps[0]["value"] if diagnostic_msg not in evt_msg: raise AssertionError("missing expected diagnostic message") @@ -206,12 +208,14 @@ class TextKvpReporter(CiTestCase): logger_func = mock.MagicMock() diagnostic_msg = "test_diagnostic" reporter.publish_event( - azure.report_diagnostic_event(diagnostic_msg, - logger_func=logger_func)) + azure.report_diagnostic_event( + diagnostic_msg, logger_func=logger_func + ) + ) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) self.assertEqual(1, len(kvps)) - evt_msg = kvps[0]['value'] + evt_msg = kvps[0]["value"] if diagnostic_msg not in evt_msg: raise AssertionError("missing expected diagnostic message") @@ -221,18 +225,18 @@ class TextKvpReporter(CiTestCase): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) try: instantiated_handler_registry.register_item("telemetry", reporter) - event_desc = b'test_compressed' - azure.report_compressed_event( - "compressed event", event_desc) + event_desc = b"test_compressed" + azure.report_compressed_event("compressed event", event_desc) self.validate_compressed_kvps(reporter, 1, [event_desc]) finally: - instantiated_handler_registry.unregister_item("telemetry", - force=False) + instantiated_handler_registry.unregister_item( + "telemetry", force=False + ) - @mock.patch('cloudinit.sources.helpers.azure.report_compressed_event') - @mock.patch('cloudinit.sources.helpers.azure.report_diagnostic_event') - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.sources.helpers.azure.report_compressed_event") + @mock.patch("cloudinit.sources.helpers.azure.report_diagnostic_event") + @mock.patch("cloudinit.subp.subp") def test_push_log_to_kvp_exception_handling(self, m_subp, m_diag, m_com): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) try: @@ -240,7 +244,8 @@ class TextKvpReporter(CiTestCase): log_file = self.tmp_path("cloud-init.log") azure.MAX_LOG_TO_KVP_LENGTH = 100 azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path( - 'log_pushed_to_kvp') + "log_pushed_to_kvp" + ) with open(log_file, "w") as f: log_content = "A" * 50 + "B" * 100 f.write(log_content) @@ -251,11 +256,12 @@ class TextKvpReporter(CiTestCase): # exceptions will trigger diagnostic reporting calls self.assertEqual(m_diag.call_count, 3) finally: - instantiated_handler_registry.unregister_item("telemetry", - force=False) + instantiated_handler_registry.unregister_item( + "telemetry", force=False + ) - @mock.patch('cloudinit.subp.subp') - @mock.patch.object(LogHandler, 'publish_event') + @mock.patch("cloudinit.subp.subp") + @mock.patch.object(LogHandler, "publish_event") def test_push_log_to_kvp(self, publish_event, m_subp): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) try: @@ -263,7 +269,8 @@ class TextKvpReporter(CiTestCase): log_file = self.tmp_path("cloud-init.log") azure.MAX_LOG_TO_KVP_LENGTH = 100 azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path( - 'log_pushed_to_kvp') + "log_pushed_to_kvp" + ) with open(log_file, "w") as f: log_content = "A" * 50 + "B" * 100 f.write(log_content) @@ -275,20 +282,25 @@ class TextKvpReporter(CiTestCase): azure.push_log_to_kvp(log_file) # make sure dmesg is called every time - m_subp.assert_called_with( - ['dmesg'], capture=True, decode=False) + m_subp.assert_called_with(["dmesg"], capture=True, decode=False) for call_arg in publish_event.call_args_list: event = call_arg[0][0] self.assertNotEqual( - event.event_type, azure.COMPRESSED_EVENT_TYPE) + event.event_type, azure.COMPRESSED_EVENT_TYPE + ) self.validate_compressed_kvps( - reporter, 2, - [log_content[-azure.MAX_LOG_TO_KVP_LENGTH:].encode(), - extra_content.encode()]) + reporter, + 2, + [ + log_content[-azure.MAX_LOG_TO_KVP_LENGTH :].encode(), + extra_content.encode(), + ], + ) finally: - instantiated_handler_registry.unregister_item("telemetry", - force=False) + instantiated_handler_registry.unregister_item( + "telemetry", force=False + ) def validate_compressed_kvps(self, reporter, count, values): reporter.q.join() @@ -296,7 +308,7 @@ class TextKvpReporter(CiTestCase): compressed_count = 0 for i in range(len(kvps)): kvp = kvps[i] - kvp_value = kvp['value'] + kvp_value = kvp["value"] kvp_value_json = json.loads(kvp_value) evt_msg = kvp_value_json["msg"] evt_type = kvp_value_json["type"] @@ -305,7 +317,8 @@ class TextKvpReporter(CiTestCase): evt_msg_json = json.loads(evt_msg) evt_encoding = evt_msg_json["encoding"] evt_data = zlib.decompress( - base64.decodebytes(evt_msg_json["data"].encode("ascii"))) + base64.decodebytes(evt_msg_json["data"].encode("ascii")) + ) self.assertLess(compressed_count, len(values)) self.assertEqual(evt_data, values[compressed_count]) @@ -316,17 +329,21 @@ class TextKvpReporter(CiTestCase): def test_unique_kvp_key(self): reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) evt1 = events.ReportingEvent( - "event_type", 'event_message', - "event_description") + "event_type", "event_message", "event_description" + ) reporter.publish_event(evt1) evt2 = events.ReportingEvent( - "event_type", 'event_message', - "event_description", timestamp=evt1.timestamp + 1) + "event_type", + "event_message", + "event_description", + timestamp=evt1.timestamp + 1, + ) reporter.publish_event(evt2) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) self.assertEqual(2, len(kvps)) - self.assertNotEqual(kvps[0]["key"], kvps[1]["key"], - "duplicate keys for KVP entries") + self.assertNotEqual( + kvps[0]["key"], kvps[1]["key"], "duplicate keys for KVP entries" + ) diff --git a/tests/unittests/test_simpletable.py b/tests/unittests/test_simpletable.py index 69b30f0e..ee7eb0b4 100644 --- a/tests/unittests/test_simpletable.py +++ b/tests/unittests/test_simpletable.py @@ -13,14 +13,19 @@ from cloudinit.simpletable import SimpleTable from tests.unittests.helpers import CiTestCase # Examples rendered by cloud-init using PrettyTable -NET_DEVICE_FIELDS = ( - 'Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address') +NET_DEVICE_FIELDS = ("Device", "Up", "Address", "Mask", "Scope", "Hw-Address") NET_DEVICE_ROWS = ( - ('ens3', True, '172.31.4.203', '255.255.240.0', '.', '0a:1f:07:15:98:70'), - ('ens3', True, 'fe80::81f:7ff:fe15:9870/64', '.', 'link', - '0a:1f:07:15:98:70'), - ('lo', True, '127.0.0.1', '255.0.0.0', '.', '.'), - ('lo', True, '::1/128', '.', 'host', '.'), + ("ens3", True, "172.31.4.203", "255.255.240.0", ".", "0a:1f:07:15:98:70"), + ( + "ens3", + True, + "fe80::81f:7ff:fe15:9870/64", + ".", + "link", + "0a:1f:07:15:98:70", + ), + ("lo", True, "127.0.0.1", "255.0.0.0", ".", "."), + ("lo", True, "::1/128", ".", "host", "."), ) NET_DEVICE_TABLE = """\ +--------+------+----------------------------+---------------+-------+-------------------+ @@ -32,11 +37,17 @@ NET_DEVICE_TABLE = """\ | lo | True | ::1/128 | . | host | . | +--------+------+----------------------------+---------------+-------+-------------------+""" # noqa: E501 ROUTE_IPV4_FIELDS = ( - 'Route', 'Destination', 'Gateway', 'Genmask', 'Interface', 'Flags') + "Route", + "Destination", + "Gateway", + "Genmask", + "Interface", + "Flags", +) ROUTE_IPV4_ROWS = ( - ('0', '0.0.0.0', '172.31.0.1', '0.0.0.0', 'ens3', 'UG'), - ('1', '169.254.0.0', '0.0.0.0', '255.255.0.0', 'ens3', 'U'), - ('2', '172.31.0.0', '0.0.0.0', '255.255.240.0', 'ens3', 'U'), + ("0", "0.0.0.0", "172.31.0.1", "0.0.0.0", "ens3", "UG"), + ("1", "169.254.0.0", "0.0.0.0", "255.255.0.0", "ens3", "U"), + ("2", "172.31.0.0", "0.0.0.0", "255.255.240.0", "ens3", "U"), ) ROUTE_IPV4_TABLE = """\ +-------+-------------+------------+---------------+-----------+-------+ @@ -47,11 +58,14 @@ ROUTE_IPV4_TABLE = """\ | 2 | 172.31.0.0 | 0.0.0.0 | 255.255.240.0 | ens3 | U | +-------+-------------+------------+---------------+-----------+-------+""" -AUTHORIZED_KEYS_FIELDS = ( - 'Keytype', 'Fingerprint (md5)', 'Options', 'Comment') +AUTHORIZED_KEYS_FIELDS = ("Keytype", "Fingerprint (md5)", "Options", "Comment") AUTHORIZED_KEYS_ROWS = ( - ('ssh-rsa', '24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36', '-', - 'ajorgens'), + ( + "ssh-rsa", + "24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36", + "-", + "ajorgens", + ), ) AUTHORIZED_KEYS_TABLE = """\ +---------+-------------------------------------------------+---------+----------+ @@ -63,7 +77,7 @@ AUTHORIZED_KEYS_TABLE = """\ # from prettytable import PrettyTable # pt = PrettyTable(('HEADER',)) # print(pt) -NO_ROWS_FIELDS = ('HEADER',) +NO_ROWS_FIELDS = ("HEADER",) NO_ROWS_TABLE = """\ +--------+ | HEADER | @@ -72,7 +86,6 @@ NO_ROWS_TABLE = """\ class TestSimpleTable(CiTestCase): - def test_no_rows(self): """An empty table is rendered as PrettyTable would have done it.""" table = SimpleTable(NO_ROWS_FIELDS) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index b210bd3b..d614350e 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,27 +1,29 @@ # This file is part of cloud-init. See LICENSE file for license information. import os - from collections import namedtuple from functools import partial from unittest.mock import patch -from cloudinit import ssh_util +from cloudinit import ssh_util, util from tests.unittests import helpers as test_helpers -from cloudinit import util # https://stackoverflow.com/questions/11351032/ -FakePwEnt = namedtuple('FakePwEnt', [ - 'pw_name', - 'pw_passwd', - 'pw_uid', - 'pw_gid', - 'pw_gecos', - 'pw_dir', - 'pw_shell', -]) +FakePwEnt = namedtuple( + "FakePwEnt", + [ + "pw_name", + "pw_passwd", + "pw_uid", + "pw_gid", + "pw_gecos", + "pw_dir", + "pw_shell", + ], +) FakePwEnt.__new__.__defaults__ = tuple( - "UNSET_%s" % n for n in FakePwEnt._fields) + "UNSET_%s" % n for n in FakePwEnt._fields +) def mock_get_owner(updated_permissions, value): @@ -57,7 +59,7 @@ def mock_getpwnam(users, username): # the testdata for OpenSSH, and their private keys are available # https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata VALID_CONTENT = { - 'dsa': ( + "dsa": ( "AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF" "W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa" "A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa" @@ -69,12 +71,12 @@ VALID_CONTENT = { "JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/" "5z7u2rVAlDw==" ), - 'ecdsa': ( + "ecdsa": ( "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ" "J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/" "Ql7lc2leWL7CY=" ), - 'rsa': ( + "rsa": ( "AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz" "emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD" "c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q" @@ -82,11 +84,10 @@ VALID_CONTENT = { "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07" "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw==" ), - 'ed25519': ( - "AAAAC3NzaC1lZDI1NTE5AAAAIA1J77+CrJ8p6/vWCEzuylqJNMHUP/XmeYyGVWb" - "8lnDd" + "ed25519": ( + "AAAAC3NzaC1lZDI1NTE5AAAAIA1J77+CrJ8p6/vWCEzuylqJNMHUP/XmeYyGVWb8lnDd" ), - 'ecdsa-sha2-nistp256-cert-v01@openssh.com': ( + "ecdsa-sha2-nistp256-cert-v01@openssh.com": ( "AAAAKGVjZHNhLXNoYTItbmlzdHAyNTYtY2VydC12MDFAb3BlbnNzaC5jb20AAAA" "gQIfwT/+UX68/hlKsdKuaOuAVB6ftTg03SlP/uH4OBEwAAAAIbmlzdHAyNTYAAA" "BBBEjA0gjJmPM6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXw" @@ -101,12 +102,12 @@ VALID_CONTENT = { "2tM3QXkDcwdP0SxSEW5yy4XV5oAAAAhANNMm1cdVlAt3hmycQgdD82zPlg5YvVO" "iN0SQTbgVD8i" ), - 'ecdsa-sha2-nistp256': ( + "ecdsa-sha2-nistp256": ( "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEjA0gjJmPM" "6La3sXyfNlnjilvvGY6I2M8SvJj4o3X/46wcUbPWTaj4RF3EXwHvNxplYBwdPlk" "2zEecvf9Cs2BM=" ), - 'ecdsa-sha2-nistp384-cert-v01@openssh.com': ( + "ecdsa-sha2-nistp384-cert-v01@openssh.com": ( "AAAAKGVjZHNhLXNoYTItbmlzdHAzODQtY2VydC12MDFAb3BlbnNzaC5jb20AAAA" "grnSvDsK1EnCZndO1IyGWcGkVgVSkPWi/XO2ybPFyLVUAAAAIbmlzdHAzODQAAA" "BhBAaYSQs+8TT0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaY" @@ -123,12 +124,12 @@ VALID_CONTENT = { "RVYqYQgAAADAiit0UCMDAUbjD+R2x4LvU3x/t8G3sdqDLRNfMRpjZpvcS8AwC+Y" "VFVSQNn0AyzW0=" ), - 'ecdsa-sha2-nistp384': ( + "ecdsa-sha2-nistp384": ( "AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBAaYSQs+8TT" "0Tzciy0dorwhur6yzOGUrYQ6ueUQYWbE7eNdHmhsVrlpGPgSaYByhXtAJiPOMqL" "U5h0eb3sCtM3ek4NvjXFTGTqPrrxJI6q0OsgrtkGE7UM9ZsfMm7q6BOA==" ), - 'ecdsa-sha2-nistp521-cert-v01@openssh.com': ( + "ecdsa-sha2-nistp521-cert-v01@openssh.com": ( "AAAAKGVjZHNhLXNoYTItbmlzdHA1MjEtY2VydC12MDFAb3BlbnNzaC5jb20AAAA" "gGmRzkkMvRFk1V5U3m3mQ2nfW20SJVXk1NKnT5iZGDcEAAAAIbmlzdHA1MjEAAA" "CFBAHosAOHAI1ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94C" @@ -147,13 +148,13 @@ VALID_CONTENT = { "AAAQgEzkIpX3yKXPaPcK17mNx40ujEDitm4ARmbhAge0sFhZtf7YIgI55b6vkI8" "JvMJkzQCBF1cpNOaIpVh1nFZNBphMQ==" ), - 'ecdsa-sha2-nistp521': ( + "ecdsa-sha2-nistp521": ( "AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAHosAOHAI1" "ZkerbKYQ72S6uit1u77PCj/OalZtXgsxv0TTAZB273puG2X94CQ8yyNHcby87zF" "ZHdv5BSKyZ/cyREAAeiAcSakop9VS3+bUfZpEIqwBZXarwUjnRnxprkcQ0rfCCd" "agkGZr/OA7DemK2D8tKLTHsKoEEWNImo6/pXDkFxA==" ), - 'sk-ecdsa-sha2-nistp256-cert-v01@openssh.com': ( + "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com": ( "AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u" "wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX" "ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd" @@ -162,12 +163,12 @@ VALID_CONTENT = { "AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd" "0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ==" ), - 'sk-ecdsa-sha2-nistp256@openssh.com': ( + "sk-ecdsa-sha2-nistp256@openssh.com": ( "AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHA" "yNTYAAABBBIELQJ2DgvaX1yQlKFokfWM2suuaCFI2qp0eJodHyg6O4ifxc3XpRK" "d1OS8dNYQtE/YjdXSrA+AOnMF5ns2Nkx4AAAAEc3NoOg==" ), - 'sk-ssh-ed25519-cert-v01@openssh.com': ( + "sk-ssh-ed25519-cert-v01@openssh.com": ( "AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u" "wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX" "ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd" @@ -176,11 +177,11 @@ VALID_CONTENT = { "AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd" "0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ==" ), - 'sk-ssh-ed25519@openssh.com': ( + "sk-ssh-ed25519@openssh.com": ( "AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAICFo/k5LU8863u66YC9" "eUO2170QduohPURkQnbLa/dczAAAABHNzaDo=" ), - 'ssh-dss-cert-v01@openssh.com': ( + "ssh-dss-cert-v01@openssh.com": ( "AAAAHHNzaC1kc3MtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgdTlbNU9Hn9Qng3F" "HxwH971bxCIoq1ern/QWFFDWXgmYAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0c" "Fn1zYd/JGvtabKnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4" @@ -197,7 +198,7 @@ VALID_CONTENT = { "+F7SMGQAAAFMAAAALc3NoLWVkMjU1MTkAAABAh/z1LIdNL1b66tQ8t9DY9BTB3B" "QKpTKmc7ezyFKLwl96yaIniZwD9Ticdbe/8i/Li3uCFE3EAt8NAIv9zff8Bg==" ), - 'ssh-dss': ( + "ssh-dss": ( "AAAAB3NzaC1kc3MAAACBAPqS600VGwdPAQC/p3f0uGyrLVql0cFn1zYd/JGvtab" "KnIYjLaYprje/NcjwI3CZFJiz4Dp3S8kLs+X5/1DMn/Tg1Y4D4yLB+6vCtHcJF7" "rVBFhvw/KZwc7G54ez3khyOtsg82fzpyOc8/mq+/+C5TMKO7DDjMF0k5emWKCsa" @@ -209,7 +210,7 @@ VALID_CONTENT = { "GIf95LiLSgaXMjko7joot+LK84ltLymwZ4QMnYjnZSSclf1UuyQMcUtb34+I0u9" "Ycnyhp2mSFsQt" ), - 'ssh-ed25519-cert-v01@openssh.com': ( + "ssh-ed25519-cert-v01@openssh.com": ( "AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIIxzuxl4z3u" "wAIslne8Huft+1n1IhHAlNbWZkQyyECCGAAAAIFOG6kY7Rf4UtCFvPwKgo/BztX" "ck2xC4a2WyA34XtIwZAAAAAAAAAAgAAAACAAAABmp1bGl1cwAAABIAAAAFaG9zd" @@ -218,11 +219,10 @@ VALID_CONTENT = { "AAFMAAAALc3NoLWVkMjU1MTkAAABABGTn+Bmz86Ajk+iqKCSdP5NClsYzn4alJd" "0V5bizhP0Kumc/HbqQfSt684J1WdSzih+EjvnTgBhK9jTBKb90AQ==" ), - 'ssh-ed25519': ( - "AAAAC3NzaC1lZDI1NTE5AAAAIFOG6kY7Rf4UtCFvPwKgo/BztXck2xC4a2WyA34" - "XtIwZ" + "ssh-ed25519": ( + "AAAAC3NzaC1lZDI1NTE5AAAAIFOG6kY7Rf4UtCFvPwKgo/BztXck2xC4a2WyA34XtIwZ" ), - 'ssh-rsa-cert-v01@openssh.com': ( + "ssh-rsa-cert-v01@openssh.com": ( "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAg98LhS2EHxLOWCLo" "pZPwHdg/RJXusnkOqQXSc9R7aITkAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGh" "EZzpoojjEW5y8+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yM" @@ -233,13 +233,13 @@ VALID_CONTENT = { "he0jBkAAABTAAAAC3NzaC1lZDI1NTE5AAAAQI3QGlUCzC07KorupxpDkkGy6tni" "aZ8EvBflzvv+itXWNchGvfUeHmVT6aX0sRqehdz/lR+GmXRoZBhofwh0qAM=" ), - 'ssh-rsa': ( + "ssh-rsa": ( "AAAAB3NzaC1yc2EAAAADAQABAAAAgQDLV5lUTt7FrADseB/CGhEZzpoojjEW5y8" "+ePvLppmK3MmMI18ud6vxzpK3bwZLYkVSyfJYI0HmIuGhdu7yMrW6wb84gbq8C3" "1Xoe9EORcIUuGSvDKdNSM1SjlhDquRblDFB8kToqXyx1lqrXecXylxIUOL0jE+u" "0rU1967pDJx+w==" ), - 'ssh-xmss-cert-v01@openssh.com': ( + "ssh-xmss-cert-v01@openssh.com": ( "AAAAHXNzaC14bXNzLWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIM2UD0IH+Igsekq" "xjTO5f36exX4WGRMCtDGPjwfbXblxAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0gxMA" "AAAEDI83/K5JMOy0BMJgQypRdz35ApAnoQinMJ8ZMoZPaEJF8Z4rANQlfzaAXum" @@ -305,7 +305,7 @@ VALID_CONTENT = { "rNYClh8fQEQ8XuOCDpomMWu58YOTfbZNMDWs/Ou7RfCjX+VNwjPShDK9joMwWKc" "Jy3QalZbaoWtcyyvXxR2sqhVR9F7Cmasq4=" ), - 'ssh-xmss@openssh.com': ( + "ssh-xmss@openssh.com": ( "AAAAFHNzaC14bXNzQG9wZW5zc2guY29tAAAAFVhNU1NfU0hBMi0yNTZfVzE2X0g" "xMAAAAECqptWnK94d+Sj2xcdTu8gz+75lawZoLSZFqC5IhbYuT/Z3oBZCim6yt+" "HAmk6MKldl3Fg+74v4sR/SII0I0Jv/" @@ -316,19 +316,25 @@ KEY_TYPES = list(VALID_CONTENT.keys()) TEST_OPTIONS = ( "no-port-forwarding,no-agent-forwarding,no-X11-forwarding," - 'command="echo \'Please login as the user \"ubuntu\" rather than the' - 'user \"root\".\';echo;sleep 10"') + 'command="echo \'Please login as the user "ubuntu" rather than the' + 'user "root".\';echo;sleep 10"' +) class TestAuthKeyLineParser(test_helpers.CiTestCase): - def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) parser = ssh_util.AuthKeyLineParser() for ktype in KEY_TYPES: content = VALID_CONTENT[ktype] - comment = 'user-%s@host' % ktype - line = ' '.join((ktype, content, comment,)) + comment = "user-%s@host" % ktype + line = " ".join( + ( + ktype, + content, + comment, + ) + ) key = parser.parse(line) self.assertEqual(key.base64, content) @@ -341,7 +347,12 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase): parser = ssh_util.AuthKeyLineParser() for ktype in KEY_TYPES: content = VALID_CONTENT[ktype] - line = ' '.join((ktype, content,)) + line = " ".join( + ( + ktype, + content, + ) + ) key = parser.parse(line) self.assertEqual(key.base64, content) @@ -355,8 +366,15 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase): options = TEST_OPTIONS for ktype in KEY_TYPES: content = VALID_CONTENT[ktype] - comment = 'user-%s@host' % ktype - line = ' '.join((options, ktype, content, comment,)) + comment = "user-%s@host" % ktype + line = " ".join( + ( + options, + ktype, + content, + comment, + ) + ) key = parser.parse(line) self.assertEqual(key.base64, content) @@ -368,7 +386,7 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase): # test key line with key type and base64 only parser = ssh_util.AuthKeyLineParser() - baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host")) + baseline = " ".join(("rsa", VALID_CONTENT["rsa"], "user@host")) myopts = "no-port-forwarding,no-agent-forwarding" key = parser.parse("allowedopt" + " " + baseline) @@ -379,59 +397,62 @@ class TestAuthKeyLineParser(test_helpers.CiTestCase): def test_parse_invalid_keytype(self): parser = ssh_util.AuthKeyLineParser() - key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']])) + key = parser.parse(" ".join(["badkeytype", VALID_CONTENT["rsa"]])) self.assertFalse(key.valid()) class TestUpdateAuthorizedKeys(test_helpers.CiTestCase): - def test_new_keys_replace(self): """new entries with the same base64 should replace old.""" orig_entries = [ - ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), - ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] + " ".join(("rsa", VALID_CONTENT["rsa"], "orig_comment1")), + " ".join(("dsa", VALID_CONTENT["dsa"], "orig_comment2")), + ] new_entries = [ - ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ] + " ".join(("rsa", VALID_CONTENT["rsa"], "new_comment1")), + ] - expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' + expected = "\n".join([new_entries[0], orig_entries[1]]) + "\n" parser = ssh_util.AuthKeyLineParser() found = ssh_util.update_authorized_keys( [parser.parse(p) for p in orig_entries], - [parser.parse(p) for p in new_entries]) + [parser.parse(p) for p in new_entries], + ) self.assertEqual(expected, found) def test_new_invalid_keys_are_ignored(self): """new entries that are invalid should be skipped.""" orig_entries = [ - ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), - ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] + " ".join(("rsa", VALID_CONTENT["rsa"], "orig_comment1")), + " ".join(("dsa", VALID_CONTENT["dsa"], "orig_comment2")), + ] new_entries = [ - ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), - 'xxx-invalid-thing1', - 'xxx-invalid-blob2' + " ".join(("rsa", VALID_CONTENT["rsa"], "new_comment1")), + "xxx-invalid-thing1", + "xxx-invalid-blob2", ] - expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' + expected = "\n".join([new_entries[0], orig_entries[1]]) + "\n" parser = ssh_util.AuthKeyLineParser() found = ssh_util.update_authorized_keys( [parser.parse(p) for p in orig_entries], - [parser.parse(p) for p in new_entries]) + [parser.parse(p) for p in new_entries], + ) self.assertEqual(expected, found) class TestParseSSHConfig(test_helpers.CiTestCase): - def setUp(self): - self.load_file_patch = patch('cloudinit.ssh_util.util.load_file') + self.load_file_patch = patch("cloudinit.ssh_util.util.load_file") self.load_file = self.load_file_patch.start() - self.isfile_patch = patch('cloudinit.ssh_util.os.path.isfile') + self.isfile_patch = patch("cloudinit.ssh_util.os.path.isfile") self.isfile = self.isfile_patch.start() self.isfile.return_value = True @@ -442,60 +463,61 @@ class TestParseSSHConfig(test_helpers.CiTestCase): def test_not_a_file(self): self.isfile.return_value = False self.load_file.side_effect = IOError - ret = ssh_util.parse_ssh_config('not a real file') + ret = ssh_util.parse_ssh_config("not a real file") self.assertEqual([], ret) def test_empty_file(self): - self.load_file.return_value = '' - ret = ssh_util.parse_ssh_config('some real file') + self.load_file.return_value = "" + ret = ssh_util.parse_ssh_config("some real file") self.assertEqual([], ret) def test_comment_line(self): - comment_line = '# This is a comment' + comment_line = "# This is a comment" self.load_file.return_value = comment_line - ret = ssh_util.parse_ssh_config('some real file') + ret = ssh_util.parse_ssh_config("some real file") self.assertEqual(1, len(ret)) self.assertEqual(comment_line, ret[0].line) def test_blank_lines(self): - lines = ['', '\t', ' '] - self.load_file.return_value = '\n'.join(lines) - ret = ssh_util.parse_ssh_config('some real file') + lines = ["", "\t", " "] + self.load_file.return_value = "\n".join(lines) + ret = ssh_util.parse_ssh_config("some real file") self.assertEqual(len(lines), len(ret)) for line in ret: - self.assertEqual('', line.line) + self.assertEqual("", line.line) def test_lower_case_config(self): - self.load_file.return_value = 'foo bar' - ret = ssh_util.parse_ssh_config('some real file') + self.load_file.return_value = "foo bar" + ret = ssh_util.parse_ssh_config("some real file") self.assertEqual(1, len(ret)) - self.assertEqual('foo', ret[0].key) - self.assertEqual('bar', ret[0].value) + self.assertEqual("foo", ret[0].key) + self.assertEqual("bar", ret[0].value) def test_upper_case_config(self): - self.load_file.return_value = 'Foo Bar' - ret = ssh_util.parse_ssh_config('some real file') + self.load_file.return_value = "Foo Bar" + ret = ssh_util.parse_ssh_config("some real file") self.assertEqual(1, len(ret)) - self.assertEqual('foo', ret[0].key) - self.assertEqual('Bar', ret[0].value) + self.assertEqual("foo", ret[0].key) + self.assertEqual("Bar", ret[0].value) def test_lower_case_with_equals(self): - self.load_file.return_value = 'foo=bar' - ret = ssh_util.parse_ssh_config('some real file') + self.load_file.return_value = "foo=bar" + ret = ssh_util.parse_ssh_config("some real file") self.assertEqual(1, len(ret)) - self.assertEqual('foo', ret[0].key) - self.assertEqual('bar', ret[0].value) + self.assertEqual("foo", ret[0].key) + self.assertEqual("bar", ret[0].value) def test_upper_case_with_equals(self): - self.load_file.return_value = 'Foo=bar' - ret = ssh_util.parse_ssh_config('some real file') + self.load_file.return_value = "Foo=bar" + ret = ssh_util.parse_ssh_config("some real file") self.assertEqual(1, len(ret)) - self.assertEqual('foo', ret[0].key) - self.assertEqual('bar', ret[0].value) + self.assertEqual("foo", ret[0].key) + self.assertEqual("bar", ret[0].value) class TestUpdateSshConfigLines(test_helpers.CiTestCase): """Test the update_ssh_config_lines method.""" + exlines = [ "#PasswordAuthentication yes", "UsePAM yes", @@ -514,8 +536,8 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase): def test_new_option_added(self): """A single update of non-existing option.""" lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) - result = ssh_util.update_ssh_config_lines(lines, {'MyKey': 'MyVal'}) - self.assertEqual(['MyKey'], result) + result = ssh_util.update_ssh_config_lines(lines, {"MyKey": "MyVal"}) + self.assertEqual(["MyKey"], result) self.check_line(lines[-1], "MyKey", "MyVal") def test_commented_out_not_updated_but_appended(self): @@ -543,8 +565,12 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase): def test_multiple_updates_with_add(self): """Verify multiple updates some added some changed, some not.""" - updates = {"UsePAM": "no", "X11Forwarding": "no", "NewOpt": "newval", - "AcceptEnv": "LANG ADD LC_*"} + updates = { + "UsePAM": "no", + "X11Forwarding": "no", + "NewOpt": "newval", + "AcceptEnv": "LANG ADD LC_*", + } lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) result = ssh_util.update_ssh_config_lines(lines, updates) self.assertEqual(set(["UsePAM", "NewOpt", "AcceptEnv"]), set(result)) @@ -569,7 +595,7 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase): class TestUpdateSshConfig(test_helpers.CiTestCase): - cfgdata = '\n'.join(["#Option val", "MyKey ORIG_VAL", ""]) + cfgdata = "\n".join(["#Option val", "MyKey ORIG_VAL", ""]) def test_modified(self): mycfg = self.tmp_path("ssh_config_1") @@ -579,7 +605,7 @@ class TestUpdateSshConfig(test_helpers.CiTestCase): found = util.load_file(mycfg) self.assertEqual(self.cfgdata.replace("ORIG_VAL", "NEW_VAL"), found) # assert there is a newline at end of file (LP: #1677205) - self.assertEqual('\n', found[-1]) + self.assertEqual("\n", found[-1]) def test_not_modified(self): mycfg = self.tmp_path("ssh_config_2") @@ -596,72 +622,100 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): self.assertEqual( ["/opt/bobby/keys"], ssh_util.render_authorizedkeysfile_paths( - "/opt/%u/keys", "/home/bobby", "bobby")) + "/opt/%u/keys", "/home/bobby", "bobby" + ), + ) def test_user_file(self): self.assertEqual( ["/opt/bobby"], ssh_util.render_authorizedkeysfile_paths( - "/opt/%u", "/home/bobby", "bobby")) + "/opt/%u", "/home/bobby", "bobby" + ), + ) def test_user_file2(self): self.assertEqual( ["/opt/bobby/bobby"], ssh_util.render_authorizedkeysfile_paths( - "/opt/%u/%u", "/home/bobby", "bobby")) + "/opt/%u/%u", "/home/bobby", "bobby" + ), + ) def test_multiple(self): self.assertEqual( ["/keys/path1", "/keys/path2"], ssh_util.render_authorizedkeysfile_paths( - "/keys/path1 /keys/path2", "/home/bobby", "bobby")) + "/keys/path1 /keys/path2", "/home/bobby", "bobby" + ), + ) def test_multiple2(self): self.assertEqual( ["/keys/path1", "/keys/bobby"], ssh_util.render_authorizedkeysfile_paths( - "/keys/path1 /keys/%u", "/home/bobby", "bobby")) + "/keys/path1 /keys/%u", "/home/bobby", "bobby" + ), + ) def test_relative(self): self.assertEqual( ["/home/bobby/.secret/keys"], ssh_util.render_authorizedkeysfile_paths( - ".secret/keys", "/home/bobby", "bobby")) + ".secret/keys", "/home/bobby", "bobby" + ), + ) def test_home(self): self.assertEqual( ["/homedirs/bobby/.keys"], ssh_util.render_authorizedkeysfile_paths( - "%h/.keys", "/homedirs/bobby", "bobby")) + "%h/.keys", "/homedirs/bobby", "bobby" + ), + ) def test_all(self): self.assertEqual( - ["/homedirs/bobby/.keys", "/homedirs/bobby/.secret/keys", - "/keys/path1", "/opt/bobby/keys"], + [ + "/homedirs/bobby/.keys", + "/homedirs/bobby/.secret/keys", + "/keys/path1", + "/opt/bobby/keys", + ], ssh_util.render_authorizedkeysfile_paths( "%h/.keys .secret/keys /keys/path1 /opt/%u/keys", - "/homedirs/bobby", "bobby")) + "/homedirs/bobby", + "bobby", + ), + ) class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): - - def create_fake_users(self, names, mock_permissions, - m_get_group, m_get_owner, m_get_permissions, - m_getpwnam, users): + def create_fake_users( + self, + names, + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, + ): homes = [] - root = '/tmp/root' + root = "/tmp/root" fpw = FakePwEnt(pw_name="root", pw_dir=root) users["root"] = fpw for name in names: - home = '/tmp/home/' + name + home = "/tmp/home/" + name fpw = FakePwEnt(pw_name=name, pw_dir=home) users[name] = fpw homes.append(home) m_get_permissions.side_effect = partial( - mock_get_permissions, mock_permissions) + mock_get_permissions, mock_permissions + ) m_get_owner.side_effect = partial(mock_get_owner, mock_permissions) m_get_group.side_effect = partial(mock_get_group, mock_permissions) m_getpwnam.side_effect = partial(mock_getpwnam, users) @@ -676,23 +730,24 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): return authorized_keys def create_global_authorized_file(self, filename, content_key, keys): - authorized_keys = self.tmp_path(filename, dir='/tmp') + authorized_keys = self.tmp_path(filename, dir="/tmp") util.write_file(authorized_keys, VALID_CONTENT[content_key]) keys[authorized_keys] = content_key return authorized_keys def create_sshd_config(self, authorized_keys_files): - sshd_config = self.tmp_path('sshd_config', dir="/tmp") + sshd_config = self.tmp_path("sshd_config", dir="/tmp") util.write_file( - sshd_config, - "AuthorizedKeysFile " + authorized_keys_files + sshd_config, "AuthorizedKeysFile " + authorized_keys_files ) return sshd_config - def execute_and_check(self, user, sshd_config, solution, keys, - delete_keys=True): + def execute_and_check( + self, user, sshd_config, solution, keys, delete_keys=True + ): (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - user, sshd_config) + user, sshd_config + ) content = ssh_util.update_authorized_keys(auth_key_entries, []) self.assertEqual(auth_key_fn, solution) @@ -712,30 +767,35 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): def test_single_user_two_local_files( self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ): - user_bobby = 'bobby' + user_bobby = "bobby" keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), } homes = self.create_fake_users( - [user_bobby], mock_permissions, m_get_group, m_get_owner, - m_get_permissions, m_getpwnam, users + [user_bobby], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home = homes[0] # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home, 'authorized_keys', 'rsa', keys + home, "authorized_keys", "rsa", keys ) # /tmp/home/bobby/.ssh/user_keys = dsa user_keys = self.create_user_authorized_file( - home, 'user_keys', 'dsa', keys + home, "user_keys", "dsa", keys ) # /tmp/sshd_config @@ -751,30 +811,35 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): def test_single_user_two_local_files_inverted( self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ): - user_bobby = 'bobby' + user_bobby = "bobby" keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), } homes = self.create_fake_users( - [user_bobby], mock_permissions, m_get_group, m_get_owner, - m_get_permissions, m_getpwnam, users + [user_bobby], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home = homes[0] # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home, 'authorized_keys', 'rsa', keys + home, "authorized_keys", "rsa", keys ) # /tmp/home/bobby/.ssh/user_keys = dsa user_keys = self.create_user_authorized_file( - home, 'user_keys', 'dsa', keys + home, "user_keys", "dsa", keys ) # /tmp/sshd_config @@ -790,38 +855,46 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): def test_single_user_local_global_files( self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ): - user_bobby = 'bobby' + user_bobby = "bobby" keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/user_keys": ("bobby", "bobby", 0o600), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), } homes = self.create_fake_users( - [user_bobby], mock_permissions, m_get_group, m_get_owner, - m_get_permissions, m_getpwnam, users + [user_bobby], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home = homes[0] # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home, 'authorized_keys', 'rsa', keys + home, "authorized_keys", "rsa", keys ) # /tmp/home/bobby/.ssh/user_keys = dsa user_keys = self.create_user_authorized_file( - home, 'user_keys', 'dsa', keys + home, "user_keys", "dsa", keys ) authorized_keys_global = self.create_global_authorized_file( - 'etc/ssh/authorized_keys', 'ecdsa', keys + "etc/ssh/authorized_keys", "ecdsa", keys ) - options = "%s %s %s" % (authorized_keys_global, user_keys, - authorized_keys) + options = "%s %s %s" % ( + authorized_keys_global, + user_keys, + authorized_keys, + ) sshd_config = self.create_sshd_config(options) self.execute_and_check(user_bobby, sshd_config, user_keys, keys) @@ -833,38 +906,46 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): def test_single_user_local_global_files_inverted( self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ): - user_bobby = 'bobby' + user_bobby = "bobby" keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), - '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600), + "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600), } homes = self.create_fake_users( - [user_bobby], mock_permissions, m_get_group, m_get_owner, - m_get_permissions, m_getpwnam, users + [user_bobby], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home = homes[0] # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home, 'authorized_keys2', 'rsa', keys + home, "authorized_keys2", "rsa", keys ) # /tmp/home/bobby/.ssh/user_keys = dsa user_keys = self.create_user_authorized_file( - home, 'user_keys3', 'dsa', keys + home, "user_keys3", "dsa", keys ) authorized_keys_global = self.create_global_authorized_file( - 'etc/ssh/authorized_keys', 'ecdsa', keys + "etc/ssh/authorized_keys", "ecdsa", keys ) - options = "%s %s %s" % (authorized_keys_global, authorized_keys, - user_keys) + options = "%s %s %s" % ( + authorized_keys_global, + authorized_keys, + user_keys, + ) sshd_config = self.create_sshd_config(options) self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys) @@ -876,24 +957,29 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): def test_single_user_global_file( self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam ): - user_bobby = 'bobby' + user_bobby = "bobby" keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), } homes = self.create_fake_users( - [user_bobby], mock_permissions, m_get_group, m_get_owner, - m_get_permissions, m_getpwnam, users + [user_bobby], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home = homes[0] # /tmp/etc/ssh/authorized_keys = rsa authorized_keys_global = self.create_global_authorized_file( - 'etc/ssh/authorized_keys', 'rsa', keys + "etc/ssh/authorized_keys", "rsa", keys ) options = "%s" % authorized_keys_global @@ -912,31 +998,36 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), - '/tmp/home/suzie': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), + "/tmp/home/suzie": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600), } - user_bobby = 'bobby' - user_suzie = 'suzie' + user_bobby = "bobby" + user_suzie = "suzie" homes = self.create_fake_users( - [user_bobby, user_suzie], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_suzie], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home_bobby = homes[0] home_suzie = homes[1] # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home_bobby, 'authorized_keys', 'rsa', keys + home_bobby, "authorized_keys", "rsa", keys ) # /tmp/home/suzie/.ssh/authorized_keys = rsa authorized_keys2 = self.create_user_authorized_file( - home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys ) options = ".ssh/authorized_keys" @@ -957,31 +1048,36 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), - '/tmp/home/suzie': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600), + "/tmp/home/suzie": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600), } - user_bobby = 'bobby' - user_suzie = 'suzie' + user_bobby = "bobby" + user_suzie = "suzie" homes = self.create_fake_users( - [user_bobby, user_suzie], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_suzie], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home_bobby = homes[0] home_suzie = homes[1] # /tmp/home/bobby/.ssh/authorized_keys2 = rsa authorized_keys = self.create_user_authorized_file( - home_bobby, 'authorized_keys2', 'rsa', keys + home_bobby, "authorized_keys2", "rsa", keys ) # /tmp/home/suzie/.ssh/authorized_keys2 = rsa authorized_keys2 = self.create_user_authorized_file( - home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys + home_suzie, "authorized_keys2", "ssh-xmss@openssh.com", keys ) options = ".ssh/authorized_keys2" @@ -1002,46 +1098,53 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), - '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), - '/tmp/home/suzie': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), - '/tmp/home/suzie/.ssh/user_keys3': ('suzie', 'suzie', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600), + "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600), + "/tmp/home/suzie": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh/authorized_keys2": ("suzie", "suzie", 0o600), + "/tmp/home/suzie/.ssh/user_keys3": ("suzie", "suzie", 0o600), } - user_bobby = 'bobby' - user_suzie = 'suzie' + user_bobby = "bobby" + user_suzie = "suzie" homes = self.create_fake_users( - [user_bobby, user_suzie], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_suzie], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home_bobby = homes[0] home_suzie = homes[1] # /tmp/home/bobby/.ssh/authorized_keys2 = rsa self.create_user_authorized_file( - home_bobby, 'authorized_keys2', 'rsa', keys + home_bobby, "authorized_keys2", "rsa", keys ) # /tmp/home/bobby/.ssh/user_keys3 = dsa user_keys = self.create_user_authorized_file( - home_bobby, 'user_keys3', 'dsa', keys + home_bobby, "user_keys3", "dsa", keys ) # /tmp/home/suzie/.ssh/authorized_keys2 = rsa authorized_keys2 = self.create_user_authorized_file( - home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys + home_suzie, "authorized_keys2", "ssh-xmss@openssh.com", keys ) # /tmp/etc/ssh/authorized_keys = ecdsa authorized_keys_global = self.create_global_authorized_file( - 'etc/ssh/authorized_keys2', 'ecdsa', keys + "etc/ssh/authorized_keys2", "ecdsa", keys ) - options = "%s %s %%h/.ssh/authorized_keys2" % \ - (authorized_keys_global, user_keys) + options = "%s %s %%h/.ssh/authorized_keys2" % ( + authorized_keys_global, + user_keys, + ) sshd_config = self.create_sshd_config(options) self.execute_and_check( @@ -1055,50 +1158,62 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.util.get_owner") @patch("cloudinit.util.get_group") def test_two_users_local_global_files_badguy( - self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, - m_get_user_groups + self, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + m_get_user_groups, ): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), - '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), - '/tmp/home/badguy': ('root', 'root', 0o755), - '/tmp/home/badguy/home': ('root', 'root', 0o755), - '/tmp/home/badguy/home/bobby': ('root', 'root', 0o655), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys2": ("bobby", "bobby", 0o600), + "/tmp/home/bobby/.ssh/user_keys3": ("bobby", "bobby", 0o600), + "/tmp/home/badguy": ("root", "root", 0o755), + "/tmp/home/badguy/home": ("root", "root", 0o755), + "/tmp/home/badguy/home/bobby": ("root", "root", 0o655), } - user_bobby = 'bobby' - user_badguy = 'badguy' + user_bobby = "bobby" + user_badguy = "badguy" home_bobby, *_ = self.create_fake_users( - [user_bobby, user_badguy], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_badguy], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) m_get_user_groups.side_effect = mock_get_user_groups # /tmp/home/bobby/.ssh/authorized_keys2 = rsa authorized_keys = self.create_user_authorized_file( - home_bobby, 'authorized_keys2', 'rsa', keys + home_bobby, "authorized_keys2", "rsa", keys ) # /tmp/home/bobby/.ssh/user_keys3 = dsa user_keys = self.create_user_authorized_file( - home_bobby, 'user_keys3', 'dsa', keys + home_bobby, "user_keys3", "dsa", keys ) # /tmp/home/badguy/home/bobby = "" - authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") - util.write_file(authorized_keys2, '') + authorized_keys2 = self.tmp_path("home/bobby", dir="/tmp/home/badguy") + util.write_file(authorized_keys2, "") # /tmp/etc/ssh/authorized_keys = ecdsa authorized_keys_global = self.create_global_authorized_file( - 'etc/ssh/authorized_keys2', 'ecdsa', keys + "etc/ssh/authorized_keys2", "ecdsa", keys ) # /tmp/sshd_config - options = "%s %%h/.ssh/authorized_keys2 %s %s" % \ - (authorized_keys2, authorized_keys_global, user_keys) + options = "%s %%h/.ssh/authorized_keys2 %s %s" % ( + authorized_keys2, + authorized_keys_global, + user_keys, + ) sshd_config = self.create_sshd_config(options) self.execute_and_check( @@ -1114,33 +1229,43 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.util.get_owner") @patch("cloudinit.util.get_group") def test_two_users_unaccessible_file( - self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, - m_get_user_groups + self, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + m_get_user_groups, ): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), - - '/tmp/etc': ('root', 'root', 0o755), - '/tmp/etc/ssh': ('root', 'root', 0o755), - '/tmp/etc/ssh/userkeys': ('root', 'root', 0o700), - '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), - '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), - - '/tmp/home/badguy': ('badguy', 'badguy', 0o700), - '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), - '/tmp/home/badguy/.ssh/authorized_keys': - ('badguy', 'badguy', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), + "/tmp/etc": ("root", "root", 0o755), + "/tmp/etc/ssh": ("root", "root", 0o755), + "/tmp/etc/ssh/userkeys": ("root", "root", 0o700), + "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600), + "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600), + "/tmp/home/badguy": ("badguy", "badguy", 0o700), + "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700), + "/tmp/home/badguy/.ssh/authorized_keys": ( + "badguy", + "badguy", + 0o600, + ), } - user_bobby = 'bobby' - user_badguy = 'badguy' + user_bobby = "bobby" + user_badguy = "badguy" homes = self.create_fake_users( - [user_bobby, user_badguy], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_badguy], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) m_get_user_groups.side_effect = mock_get_user_groups home_bobby = homes[0] @@ -1148,22 +1273,22 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home_bobby, 'authorized_keys', 'rsa', keys + home_bobby, "authorized_keys", "rsa", keys ) # /tmp/etc/ssh/userkeys/bobby = dsa # assume here that we can bypass userkeys, despite permissions self.create_global_authorized_file( - 'etc/ssh/userkeys/bobby', 'dsa', keys + "etc/ssh/userkeys/bobby", "dsa", keys ) # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com authorized_keys2 = self.create_user_authorized_file( - home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys + home_badguy, "authorized_keys", "ssh-xmss@openssh.com", keys ) # /tmp/etc/ssh/userkeys/badguy = ecdsa self.create_global_authorized_file( - 'etc/ssh/userkeys/badguy', 'ecdsa', keys + "etc/ssh/userkeys/badguy", "ecdsa", keys ) # /tmp/sshd_config @@ -1183,33 +1308,43 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.util.get_owner") @patch("cloudinit.util.get_group") def test_two_users_accessible_file( - self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, - m_get_user_groups + self, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + m_get_user_groups, ): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), - - '/tmp/etc': ('root', 'root', 0o755), - '/tmp/etc/ssh': ('root', 'root', 0o755), - '/tmp/etc/ssh/userkeys': ('root', 'root', 0o755), - '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), - '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), - - '/tmp/home/badguy': ('badguy', 'badguy', 0o700), - '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), - '/tmp/home/badguy/.ssh/authorized_keys': - ('badguy', 'badguy', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), + "/tmp/etc": ("root", "root", 0o755), + "/tmp/etc/ssh": ("root", "root", 0o755), + "/tmp/etc/ssh/userkeys": ("root", "root", 0o755), + "/tmp/etc/ssh/userkeys/bobby": ("bobby", "bobby", 0o600), + "/tmp/etc/ssh/userkeys/badguy": ("badguy", "badguy", 0o600), + "/tmp/home/badguy": ("badguy", "badguy", 0o700), + "/tmp/home/badguy/.ssh": ("badguy", "badguy", 0o700), + "/tmp/home/badguy/.ssh/authorized_keys": ( + "badguy", + "badguy", + 0o600, + ), } - user_bobby = 'bobby' - user_badguy = 'badguy' + user_bobby = "bobby" + user_badguy = "badguy" homes = self.create_fake_users( - [user_bobby, user_badguy], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_badguy], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) m_get_user_groups.side_effect = mock_get_user_groups home_bobby = homes[0] @@ -1217,22 +1352,22 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): # /tmp/home/bobby/.ssh/authorized_keys = rsa self.create_user_authorized_file( - home_bobby, 'authorized_keys', 'rsa', keys + home_bobby, "authorized_keys", "rsa", keys ) # /tmp/etc/ssh/userkeys/bobby = dsa # assume here that we can bypass userkeys, despite permissions authorized_keys = self.create_global_authorized_file( - 'etc/ssh/userkeys/bobby', 'dsa', keys + "etc/ssh/userkeys/bobby", "dsa", keys ) # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com self.create_user_authorized_file( - home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys + home_badguy, "authorized_keys", "ssh-xmss@openssh.com", keys ) # /tmp/etc/ssh/userkeys/badguy = ecdsa authorized_keys2 = self.create_global_authorized_file( - 'etc/ssh/userkeys/badguy', 'ecdsa', keys + "etc/ssh/userkeys/badguy", "ecdsa", keys ) # /tmp/sshd_config @@ -1252,26 +1387,34 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.util.get_owner") @patch("cloudinit.util.get_group") def test_two_users_hardcoded_single_user_file( - self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, - m_get_user_groups + self, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + m_get_user_groups, ): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), - - '/tmp/home/suzie': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), + "/tmp/home/suzie": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600), } - user_bobby = 'bobby' - user_suzie = 'suzie' + user_bobby = "bobby" + user_suzie = "suzie" homes = self.create_fake_users( - [user_bobby, user_suzie], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_suzie], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home_bobby = homes[0] home_suzie = homes[1] @@ -1279,12 +1422,12 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home_bobby, 'authorized_keys', 'rsa', keys + home_bobby, "authorized_keys", "rsa", keys ) # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com self.create_user_authorized_file( - home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys ) # /tmp/sshd_config @@ -1303,26 +1446,34 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.util.get_owner") @patch("cloudinit.util.get_group") def test_two_users_hardcoded_single_user_file_inverted( - self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, - m_get_user_groups + self, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + m_get_user_groups, ): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), - - '/tmp/home/suzie': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), + "/tmp/home/suzie": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600), } - user_bobby = 'bobby' - user_suzie = 'suzie' + user_bobby = "bobby" + user_suzie = "suzie" homes = self.create_fake_users( - [user_bobby, user_suzie], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_suzie], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home_bobby = homes[0] home_suzie = homes[1] @@ -1330,12 +1481,12 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): # /tmp/home/bobby/.ssh/authorized_keys = rsa self.create_user_authorized_file( - home_bobby, 'authorized_keys', 'rsa', keys + home_bobby, "authorized_keys", "rsa", keys ) # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com authorized_keys2 = self.create_user_authorized_file( - home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys ) # /tmp/sshd_config @@ -1354,26 +1505,34 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.util.get_owner") @patch("cloudinit.util.get_group") def test_two_users_hardcoded_user_files( - self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, - m_get_user_groups + self, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + m_get_user_groups, ): keys = {} users = {} mock_permissions = { - '/tmp/home/bobby': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), - '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), - - '/tmp/home/suzie': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), - '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + "/tmp/home/bobby": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh": ("bobby", "bobby", 0o700), + "/tmp/home/bobby/.ssh/authorized_keys": ("bobby", "bobby", 0o600), + "/tmp/home/suzie": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh": ("suzie", "suzie", 0o700), + "/tmp/home/suzie/.ssh/authorized_keys": ("suzie", "suzie", 0o600), } - user_bobby = 'bobby' - user_suzie = 'suzie' + user_bobby = "bobby" + user_suzie = "suzie" homes = self.create_fake_users( - [user_bobby, user_suzie], mock_permissions, m_get_group, - m_get_owner, m_get_permissions, m_getpwnam, users + [user_bobby, user_suzie], + mock_permissions, + m_get_group, + m_get_owner, + m_get_permissions, + m_getpwnam, + users, ) home_bobby = homes[0] home_suzie = homes[1] @@ -1381,22 +1540,25 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): # /tmp/home/bobby/.ssh/authorized_keys = rsa authorized_keys = self.create_user_authorized_file( - home_bobby, 'authorized_keys', 'rsa', keys + home_bobby, "authorized_keys", "rsa", keys ) # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com authorized_keys2 = self.create_user_authorized_file( - home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + home_suzie, "authorized_keys", "ssh-xmss@openssh.com", keys ) # /tmp/etc/ssh/authorized_keys = ecdsa authorized_keys_global = self.create_global_authorized_file( - 'etc/ssh/authorized_keys', 'ecdsa', keys + "etc/ssh/authorized_keys", "ecdsa", keys ) # /tmp/sshd_config - options = "%s %s %s" % \ - (authorized_keys_global, authorized_keys, authorized_keys2) + options = "%s %s %s" % ( + authorized_keys_global, + authorized_keys, + authorized_keys2, + ) sshd_config = self.create_sshd_config(options) self.execute_and_check( @@ -1404,4 +1566,5 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): ) self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py index a722f03f..be1a0787 100644 --- a/tests/unittests/test_stages.py +++ b/tests/unittests/test_stages.py @@ -6,28 +6,25 @@ import stat import pytest -from cloudinit import stages -from cloudinit import sources -from cloudinit.sources import NetworkConfigSource - +from cloudinit import sources, stages from cloudinit.event import EventScope, EventType +from cloudinit.sources import NetworkConfigSource from cloudinit.util import write_file - from tests.unittests.helpers import CiTestCase, mock -TEST_INSTANCE_ID = 'i-testing' +TEST_INSTANCE_ID = "i-testing" class FakeDataSource(sources.DataSource): - - def __init__(self, paths=None, userdata=None, vendordata=None, - network_config=''): + def __init__( + self, paths=None, userdata=None, vendordata=None, network_config="" + ): super(FakeDataSource, self).__init__({}, None, paths=paths) - self.metadata = {'instance-id': TEST_INSTANCE_ID} + self.metadata = {"instance-id": TEST_INSTANCE_ID} self.userdata_raw = userdata self.vendordata_raw = vendordata self._network_config = None - if network_config: # Permit for None value to setup attribute + if network_config: # Permit for None value to setup attribute self._network_config = network_config @property @@ -47,9 +44,12 @@ class TestInit(CiTestCase): self.tmpdir = self.tmp_dir() self.init = stages.Init() # Setup fake Paths for Init to reference - self.init._cfg = {'system_info': { - 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir, - 'run_dir': self.tmpdir}}} + self.init._cfg = { + "system_info": { + "distro": "ubuntu", + "paths": {"cloud_dir": self.tmpdir, "run_dir": self.tmpdir}, + } + } self.init.datasource = FakeDataSource(paths=self.init.paths) self._real_is_new_instance = self.init.is_new_instance self.init.is_new_instance = mock.Mock(return_value=True) @@ -57,199 +57,255 @@ class TestInit(CiTestCase): def test_wb__find_networking_config_disabled(self): """find_networking_config returns no config when disabled.""" disable_file = os.path.join( - self.init.paths.get_cpath('data'), 'upgraded-network') - write_file(disable_file, '') + self.init.paths.get_cpath("data"), "upgraded-network" + ) + write_file(disable_file, "") self.assertEqual( - (None, disable_file), - self.init._find_networking_config()) + (None, disable_file), self.init._find_networking_config() + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_disabled_by_kernel( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns when disabled by kernel cmdline.""" - m_cmdline.return_value = {'config': 'disabled'} - m_initramfs.return_value = {'config': ['fake_initrd']} + m_cmdline.return_value = {"config": "disabled"} + m_initramfs.return_value = {"config": ["fake_initrd"]} self.assertEqual( (None, NetworkConfigSource.cmdline), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by cmdline\n', - self.logs.getvalue()) + self.init._find_networking_config(), + ) + self.assertEqual( + "DEBUG: network config disabled by cmdline\n", self.logs.getvalue() + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_disabled_by_initrd( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns when disabled by kernel cmdline.""" m_cmdline.return_value = {} - m_initramfs.return_value = {'config': 'disabled'} + m_initramfs.return_value = {"config": "disabled"} self.assertEqual( (None, NetworkConfigSource.initramfs), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by initramfs\n', - self.logs.getvalue()) + self.init._find_networking_config(), + ) + self.assertEqual( + "DEBUG: network config disabled by initramfs\n", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_disabled_by_datasrc( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns when disabled by datasource cfg.""" m_cmdline.return_value = {} # Kernel doesn't disable networking m_initramfs.return_value = {} # initramfs doesn't disable networking - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {}} # system config doesn't disable + self.init._cfg = { + "system_info": {"paths": {"cloud_dir": self.tmpdir}}, + "network": {}, + } # system config doesn't disable self.init.datasource = FakeDataSource( - network_config={'config': 'disabled'}) + network_config={"config": "disabled"} + ) + self.assertEqual( + (None, NetworkConfigSource.ds), self.init._find_networking_config() + ) self.assertEqual( - (None, NetworkConfigSource.ds), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by ds\n', - self.logs.getvalue()) + "DEBUG: network config disabled by ds\n", self.logs.getvalue() + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_disabled_by_sysconfig( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns when disabled by system config.""" m_cmdline.return_value = {} # Kernel doesn't disable networking m_initramfs.return_value = {} # initramfs doesn't disable networking - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {'config': 'disabled'}} + self.init._cfg = { + "system_info": {"paths": {"cloud_dir": self.tmpdir}}, + "network": {"config": "disabled"}, + } self.assertEqual( (None, NetworkConfigSource.system_cfg), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by system_cfg\n', - self.logs.getvalue()) + self.init._find_networking_config(), + ) + self.assertEqual( + "DEBUG: network config disabled by system_cfg\n", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test__find_networking_config_uses_datasrc_order( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config should check sources in DS defined order""" # cmdline and initramfs, which would normally be preferred over other # sources, disable networking; in this case, though, the DS moves them # later so its own config is preferred - m_cmdline.return_value = {'config': 'disabled'} - m_initramfs.return_value = {'config': 'disabled'} + m_cmdline.return_value = {"config": "disabled"} + m_initramfs.return_value = {"config": "disabled"} - ds_net_cfg = {'config': {'needle': True}} + ds_net_cfg = {"config": {"needle": True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) self.init.datasource.network_config_sources = [ - NetworkConfigSource.ds, NetworkConfigSource.system_cfg, - NetworkConfigSource.cmdline, NetworkConfigSource.initramfs] + NetworkConfigSource.ds, + NetworkConfigSource.system_cfg, + NetworkConfigSource.cmdline, + NetworkConfigSource.initramfs, + ] self.assertEqual( (ds_net_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) + self.init._find_networking_config(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test__find_networking_config_warns_if_datasrc_uses_invalid_src( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config should check sources in DS defined order""" - ds_net_cfg = {'config': {'needle': True}} + ds_net_cfg = {"config": {"needle": True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) self.init.datasource.network_config_sources = [ - 'invalid_src', NetworkConfigSource.ds] + "invalid_src", + NetworkConfigSource.ds, + ] self.assertEqual( (ds_net_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) - self.assertIn('WARNING: data source specifies an invalid network' - ' cfg_source: invalid_src', - self.logs.getvalue()) + self.init._find_networking_config(), + ) + self.assertIn( + "WARNING: data source specifies an invalid network" + " cfg_source: invalid_src", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config should check sources in DS defined order""" - ds_net_cfg = {'config': {'needle': True}} + ds_net_cfg = {"config": {"needle": True}} self.init.datasource = FakeDataSource(network_config=ds_net_cfg) self.init.datasource.network_config_sources = [ - NetworkConfigSource.fallback, NetworkConfigSource.ds] + NetworkConfigSource.fallback, + NetworkConfigSource.ds, + ] self.assertEqual( (ds_net_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) - self.assertIn('WARNING: data source specifies an unavailable network' - ' cfg_source: fallback', - self.logs.getvalue()) + self.init._find_networking_config(), + ) + self.assertIn( + "WARNING: data source specifies an unavailable network" + " cfg_source: fallback", + self.logs.getvalue(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_returns_kernel( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns kernel cmdline config if present.""" - expected_cfg = {'config': ['fakekernel']} + expected_cfg = {"config": ["fakekernel"]} m_cmdline.return_value = expected_cfg - m_initramfs.return_value = {'config': ['fake_initrd']} - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {'config': ['fakesys_config']}} + m_initramfs.return_value = {"config": ["fake_initrd"]} + self.init._cfg = { + "system_info": {"paths": {"cloud_dir": self.tmpdir}}, + "network": {"config": ["fakesys_config"]}, + } self.init.datasource = FakeDataSource( - network_config={'config': ['fakedatasource']}) + network_config={"config": ["fakedatasource"]} + ) self.assertEqual( (expected_cfg, NetworkConfigSource.cmdline), - self.init._find_networking_config()) + self.init._find_networking_config(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_returns_initramfs( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns kernel cmdline config if present.""" - expected_cfg = {'config': ['fake_initrd']} + expected_cfg = {"config": ["fake_initrd"]} m_cmdline.return_value = {} m_initramfs.return_value = expected_cfg - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {'config': ['fakesys_config']}} + self.init._cfg = { + "system_info": {"paths": {"cloud_dir": self.tmpdir}}, + "network": {"config": ["fakesys_config"]}, + } self.init.datasource = FakeDataSource( - network_config={'config': ['fakedatasource']}) + network_config={"config": ["fakedatasource"]} + ) self.assertEqual( (expected_cfg, NetworkConfigSource.initramfs), - self.init._find_networking_config()) + self.init._find_networking_config(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_returns_system_cfg( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns system config when present.""" m_cmdline.return_value = {} # No kernel network config m_initramfs.return_value = {} # no initramfs network config - expected_cfg = {'config': ['fakesys_config']} - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': expected_cfg} + expected_cfg = {"config": ["fakesys_config"]} + self.init._cfg = { + "system_info": {"paths": {"cloud_dir": self.tmpdir}}, + "network": expected_cfg, + } self.init.datasource = FakeDataSource( - network_config={'config': ['fakedatasource']}) + network_config={"config": ["fakedatasource"]} + ) self.assertEqual( (expected_cfg, NetworkConfigSource.system_cfg), - self.init._find_networking_config()) + self.init._find_networking_config(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_returns_datasrc_cfg( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns datasource net config if present.""" m_cmdline.return_value = {} # No kernel network config m_initramfs.return_value = {} # no initramfs network config # No system config for network in setUp - expected_cfg = {'config': ['fakedatasource']} + expected_cfg = {"config": ["fakedatasource"]} self.init.datasource = FakeDataSource(network_config=expected_cfg) self.assertEqual( (expected_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) + self.init._find_networking_config(), + ) - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + @mock.patch("cloudinit.stages.cmdline.read_initramfs_config") + @mock.patch("cloudinit.stages.cmdline.read_kernel_cmdline_config") def test_wb__find_networking_config_returns_fallback( - self, m_cmdline, m_initramfs): + self, m_cmdline, m_initramfs + ): """find_networking_config returns fallback config if not defined.""" m_cmdline.return_value = {} # Kernel doesn't disable networking m_initramfs.return_value = {} # no initramfs network config # Neither datasource nor system_info disable or provide network - fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], - 'version': 1} + fake_cfg = { + "config": [{"type": "physical", "name": "eth9"}], + "version": 1, + } def fake_generate_fallback(): return fake_cfg @@ -259,13 +315,15 @@ class TestInit(CiTestCase): distro.generate_fallback_config = fake_generate_fallback self.assertEqual( (fake_cfg, NetworkConfigSource.fallback), - self.init._find_networking_config()) - self.assertNotIn('network config disabled', self.logs.getvalue()) + self.init._find_networking_config(), + ) + self.assertNotIn("network config disabled", self.logs.getvalue()) def test_apply_network_config_disabled(self): """Log when network is disabled by upgraded-network.""" disable_file = os.path.join( - self.init.paths.get_cpath('data'), 'upgraded-network') + self.init.paths.get_cpath("data"), "upgraded-network" + ) def fake_network_config(): return (None, disable_file) @@ -274,41 +332,58 @@ class TestInit(CiTestCase): self.init.apply_network_config(True) self.assertIn( - 'INFO: network config is disabled by %s' % disable_file, - self.logs.getvalue()) + "INFO: network config is disabled by %s" % disable_file, + self.logs.getvalue(), + ) - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch("cloudinit.net.get_interfaces_by_mac") + @mock.patch("cloudinit.distros.ubuntu.Distro") def test_apply_network_on_new_instance(self, m_ubuntu, m_macs): """Call distro apply_network_config methods on is_new_instance.""" net_cfg = { - 'version': 1, 'config': [ - {'subnets': [{'type': 'dhcp'}], 'type': 'physical', - 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + "version": 1, + "config": [ + { + "subnets": [{"type": "dhcp"}], + "type": "physical", + "name": "eth9", + "mac_address": "42:42:42:42:42:42", + } + ], + } def fake_network_config(): return net_cfg, NetworkConfigSource.fallback - m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + m_macs.return_value = {"42:42:42:42:42:42": "eth9"} self.init._find_networking_config = fake_network_config self.init.apply_network_config(True) self.init.distro.apply_network_config_names.assert_called_with(net_cfg) self.init.distro.apply_network_config.assert_called_with( - net_cfg, bring_up=True) + net_cfg, bring_up=True + ) - @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch("cloudinit.distros.ubuntu.Distro") def test_apply_network_on_same_instance_id(self, m_ubuntu): """Only call distro.apply_network_config_names on same instance id.""" self.init.is_new_instance = self._real_is_new_instance old_instance_id = os.path.join( - self.init.paths.get_cpath('data'), 'instance-id') + self.init.paths.get_cpath("data"), "instance-id" + ) write_file(old_instance_id, TEST_INSTANCE_ID) net_cfg = { - 'version': 1, 'config': [ - {'subnets': [{'type': 'dhcp'}], 'type': 'physical', - 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + "version": 1, + "config": [ + { + "subnets": [{"type": "dhcp"}], + "type": "physical", + "name": "eth9", + "mac_address": "42:42:42:42:42:42", + } + ], + } def fake_network_config(): return net_cfg, NetworkConfigSource.fallback @@ -320,53 +395,65 @@ class TestInit(CiTestCase): self.init.distro.apply_network_config.assert_not_called() assert ( "No network config applied. Neither a new instance nor datasource " - "network update allowed" - ) in self.logs.getvalue() + "network update allowed" in self.logs.getvalue() + ) # CiTestCase doesn't work with pytest.mark.parametrize, and moving this # functionality to a separate class is more cumbersome than it'd be worth # at the moment, so use this as a simple setup def _apply_network_setup(self, m_macs): old_instance_id = os.path.join( - self.init.paths.get_cpath('data'), 'instance-id') + self.init.paths.get_cpath("data"), "instance-id" + ) write_file(old_instance_id, TEST_INSTANCE_ID) net_cfg = { - 'version': 1, 'config': [ - {'subnets': [{'type': 'dhcp'}], 'type': 'physical', - 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + "version": 1, + "config": [ + { + "subnets": [{"type": "dhcp"}], + "type": "physical", + "name": "eth9", + "mac_address": "42:42:42:42:42:42", + } + ], + } def fake_network_config(): return net_cfg, NetworkConfigSource.fallback - m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + m_macs.return_value = {"42:42:42:42:42:42": "eth9"} self.init._find_networking_config = fake_network_config self.init.datasource = FakeDataSource(paths=self.init.paths) self.init.is_new_instance = mock.Mock(return_value=False) return net_cfg - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}) - def test_apply_network_allowed_when_default_boot( - self, m_ubuntu, m_macs - ): + @mock.patch("cloudinit.net.get_interfaces_by_mac") + @mock.patch("cloudinit.distros.ubuntu.Distro") + @mock.patch.dict( + sources.DataSource.default_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, + ) + def test_apply_network_allowed_when_default_boot(self, m_ubuntu, m_macs): """Apply network if datasource permits BOOT event.""" net_cfg = self._apply_network_setup(m_macs) self.init.apply_network_config(True) - assert mock.call( - net_cfg - ) == self.init.distro.apply_network_config_names.call_args_list[-1] - assert mock.call( - net_cfg, bring_up=True - ) == self.init.distro.apply_network_config.call_args_list[-1] - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + assert ( + mock.call(net_cfg) + == self.init.distro.apply_network_config_names.call_args_list[-1] + ) + assert ( + mock.call(net_cfg, bring_up=True) + == self.init.distro.apply_network_config.call_args_list[-1] + ) + + @mock.patch("cloudinit.net.get_interfaces_by_mac") + @mock.patch("cloudinit.distros.ubuntu.Distro") + @mock.patch.dict( + sources.DataSource.default_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}, + ) def test_apply_network_disabled_when_no_default_boot( self, m_ubuntu, m_macs ): @@ -376,45 +463,47 @@ class TestInit(CiTestCase): self.init.distro.apply_network_config.assert_not_called() assert ( "No network config applied. Neither a new instance nor datasource " - "network update allowed" - ) in self.logs.getvalue() - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + "network update allowed" in self.logs.getvalue() + ) + + @mock.patch("cloudinit.net.get_interfaces_by_mac") + @mock.patch("cloudinit.distros.ubuntu.Distro") + @mock.patch.dict( + sources.DataSource.default_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}, + ) def test_apply_network_allowed_with_userdata_overrides( self, m_ubuntu, m_macs ): """Apply network if userdata overrides default config""" net_cfg = self._apply_network_setup(m_macs) - self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init._cfg = {"updates": {"network": {"when": ["boot"]}}} self.init.apply_network_config(True) - self.init.distro.apply_network_config_names.assert_called_with( - net_cfg) + self.init.distro.apply_network_config_names.assert_called_with(net_cfg) self.init.distro.apply_network_config.assert_called_with( - net_cfg, bring_up=True) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.supported_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - def test_apply_network_disabled_when_unsupported( - self, m_ubuntu, m_macs - ): + net_cfg, bring_up=True + ) + + @mock.patch("cloudinit.net.get_interfaces_by_mac") + @mock.patch("cloudinit.distros.ubuntu.Distro") + @mock.patch.dict( + sources.DataSource.supported_update_events, + {EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}, + ) + def test_apply_network_disabled_when_unsupported(self, m_ubuntu, m_macs): """Don't apply network config if unsupported. Shouldn't work even when specified as userdata """ self._apply_network_setup(m_macs) - self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init._cfg = {"updates": {"network": {"when": ["boot"]}}} self.init.apply_network_config(True) self.init.distro.apply_network_config.assert_not_called() assert ( "No network config applied. Neither a new instance nor datasource " - "network update allowed" - ) in self.logs.getvalue() + "network update allowed" in self.logs.getvalue() + ) class TestInit_InitializeFilesystem: @@ -475,4 +564,5 @@ class TestInit_InitializeFilesystem: assert mode == stat.S_IMODE(log_file.stat().mode) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py index 572510d7..7cd1339b 100644 --- a/tests/unittests/test_subp.py +++ b/tests/unittests/test_subp.py @@ -4,17 +4,15 @@ import json import os -import sys import stat - +import sys from unittest import mock from cloudinit import subp, util from tests.unittests.helpers import CiTestCase, get_top_level_dir - -BASH = subp.which('bash') -BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' +BASH = subp.which("bash") +BOGUS_COMMAND = "this-is-not-expected-to-be-a-program-name" class TestPrependBaseCommands(CiTestCase): @@ -23,177 +21,231 @@ class TestPrependBaseCommands(CiTestCase): def test_prepend_base_command_errors_on_neither_string_nor_list(self): """Raise an error for each command which is not a string or list.""" - orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']] + orig_commands = ["ls", 1, {"not": "gonna work"}, ["basecmd", "list"]] with self.assertRaises(TypeError) as context_manager: subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) + base_command="basecmd", commands=orig_commands + ) self.assertEqual( "Invalid basecmd config. These commands are not a string or" " list:\n1\n{'not': 'gonna work'}", - str(context_manager.exception)) + str(context_manager.exception), + ) def test_prepend_base_command_warns_on_non_base_string_commands(self): """Warn on each non-base for commands of type string.""" orig_commands = [ - 'ls', 'basecmd list', 'touch /blah', 'basecmd install x'] + "ls", + "basecmd list", + "touch /blah", + "basecmd install x", + ] fixed_commands = subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) + base_command="basecmd", commands=orig_commands + ) self.assertEqual( - 'WARNING: Non-basecmd commands in basecmd config:\n' - 'ls\ntouch /blah\n', - self.logs.getvalue()) + "WARNING: Non-basecmd commands in basecmd config:\n" + "ls\ntouch /blah\n", + self.logs.getvalue(), + ) self.assertEqual(orig_commands, fixed_commands) def test_prepend_base_command_prepends_on_non_base_list_commands(self): """Prepend 'basecmd' for each non-basecmd command of type list.""" - orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'], - ['basecmd', 'install', 'x']] - expected = [['basecmd', 'ls'], ['basecmd', 'list'], - ['basecmd', 'basecmda', '/blah'], - ['basecmd', 'install', 'x']] + orig_commands = [ + ["ls"], + ["basecmd", "list"], + ["basecmda", "/blah"], + ["basecmd", "install", "x"], + ] + expected = [ + ["basecmd", "ls"], + ["basecmd", "list"], + ["basecmd", "basecmda", "/blah"], + ["basecmd", "install", "x"], + ] fixed_commands = subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) - self.assertEqual('', self.logs.getvalue()) + base_command="basecmd", commands=orig_commands + ) + self.assertEqual("", self.logs.getvalue()) self.assertEqual(expected, fixed_commands) def test_prepend_base_command_removes_first_item_when_none(self): """Remove the first element of a non-basecmd when it is None.""" - orig_commands = [[None, 'ls'], ['basecmd', 'list'], - [None, 'touch', '/blah'], - ['basecmd', 'install', 'x']] - expected = [['ls'], ['basecmd', 'list'], - ['touch', '/blah'], - ['basecmd', 'install', 'x']] + orig_commands = [ + [None, "ls"], + ["basecmd", "list"], + [None, "touch", "/blah"], + ["basecmd", "install", "x"], + ] + expected = [ + ["ls"], + ["basecmd", "list"], + ["touch", "/blah"], + ["basecmd", "install", "x"], + ] fixed_commands = subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) - self.assertEqual('', self.logs.getvalue()) + base_command="basecmd", commands=orig_commands + ) + self.assertEqual("", self.logs.getvalue()) self.assertEqual(expected, fixed_commands) class TestSubp(CiTestCase): - allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE, - BOGUS_COMMAND, sys.executable] - - stdin2err = [BASH, '-c', 'cat >&2'] - stdin2out = ['cat'] - utf8_invalid = b'ab\xaadef' - utf8_valid = b'start \xc3\xa9 end' - utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' - printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + allowed_subp = [ + BASH, + "cat", + CiTestCase.SUBP_SHELL_TRUE, + BOGUS_COMMAND, + sys.executable, + ] + + stdin2err = [BASH, "-c", "cat >&2"] + stdin2out = ["cat"] + utf8_invalid = b"ab\xaadef" + utf8_valid = b"start \xc3\xa9 end" + utf8_valid_2 = b"d\xc3\xa9j\xc8\xa7" + printenv = [BASH, "-c", 'for n in "$@"; do echo "$n=${!n}"; done', "--"] def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf # but by using bash, we remove dependency on another program. - return([BASH, '-c', 'printf "$@"', 'printf'] + list(args)) + return [BASH, "-c", 'printf "$@"', "printf"] + list(args) def test_subp_handles_bytestrings(self): """subp can run a bytestring command if shell is True.""" - tmp_file = self.tmp_path('test.out') - cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) - (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True) - self.assertEqual('', out) - self.assertEqual('', _err) - self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + tmp_file = self.tmp_path("test.out") + cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file) + (out, _err) = subp.subp(cmd.encode("utf-8"), shell=True) + self.assertEqual("", out) + self.assertEqual("", _err) + self.assertEqual("HI MOM\n", util.load_file(tmp_file)) def test_subp_handles_strings(self): """subp can run a string command if shell is True.""" - tmp_file = self.tmp_path('test.out') - cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) + tmp_file = self.tmp_path("test.out") + cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file) (out, _err) = subp.subp(cmd, shell=True) - self.assertEqual('', out) - self.assertEqual('', _err) - self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + self.assertEqual("", out) + self.assertEqual("", _err) + self.assertEqual("HI MOM\n", util.load_file(tmp_file)) def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. # the "deja dup" package in Ubuntu. cmd = self.printf_cmd(self.utf8_valid_2) (out, _err) = subp.subp(cmd, capture=True) - self.assertEqual(out, self.utf8_valid_2.decode('utf-8')) + self.assertEqual(out, self.utf8_valid_2.decode("utf-8")) def test_subp_respects_decode_false(self): - (out, err) = subp.subp(self.stdin2out, capture=True, decode=False, - data=self.utf8_valid) + (out, err) = subp.subp( + self.stdin2out, capture=True, decode=False, data=self.utf8_valid + ) self.assertTrue(isinstance(out, bytes)) self.assertTrue(isinstance(err, bytes)) self.assertEqual(out, self.utf8_valid) def test_subp_decode_ignore(self): # this executes a string that writes invalid utf-8 to stdout - (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'), - capture=True, decode='ignore') - self.assertEqual(out, 'abcdef') + (out, _err) = subp.subp( + self.printf_cmd("abc\\xaadef"), capture=True, decode="ignore" + ) + self.assertEqual(out, "abcdef") def test_subp_decode_strict_valid_utf8(self): - (out, _err) = subp.subp(self.stdin2out, capture=True, - decode='strict', data=self.utf8_valid) - self.assertEqual(out, self.utf8_valid.decode('utf-8')) + (out, _err) = subp.subp( + self.stdin2out, capture=True, decode="strict", data=self.utf8_valid + ) + self.assertEqual(out, self.utf8_valid.decode("utf-8")) def test_subp_decode_invalid_utf8_replaces(self): - (out, _err) = subp.subp(self.stdin2out, capture=True, - data=self.utf8_invalid) - expected = self.utf8_invalid.decode('utf-8', 'replace') + (out, _err) = subp.subp( + self.stdin2out, capture=True, data=self.utf8_invalid + ) + expected = self.utf8_invalid.decode("utf-8", "replace") self.assertEqual(out, expected) def test_subp_decode_strict_raises(self): args = [] - kwargs = {'args': self.stdin2out, 'capture': True, - 'decode': 'strict', 'data': self.utf8_invalid} + kwargs = { + "args": self.stdin2out, + "capture": True, + "decode": "strict", + "data": self.utf8_invalid, + } self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs) def test_subp_capture_stderr(self): - data = b'hello world' - (out, err) = subp.subp(self.stdin2err, capture=True, - decode=False, data=data, - update_env={'LC_ALL': 'C'}) + data = b"hello world" + (out, err) = subp.subp( + self.stdin2err, + capture=True, + decode=False, + data=data, + update_env={"LC_ALL": "C"}, + ) self.assertEqual(err, data) - self.assertEqual(out, b'') + self.assertEqual(out, b"") def test_subp_reads_env(self): - with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): - out, _err = subp.subp(self.printenv + ['FOO'], capture=True) - self.assertEqual('FOO=BAR', out.splitlines()[0]) + with mock.patch.dict("os.environ", values={"FOO": "BAR"}): + out, _err = subp.subp(self.printenv + ["FOO"], capture=True) + self.assertEqual("FOO=BAR", out.splitlines()[0]) def test_subp_env_and_update_env(self): out, _err = subp.subp( - self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, - env={'FOO': 'BAR'}, - update_env={'HOME': '/myhome', 'K2': 'V2'}) + self.printenv + ["FOO", "HOME", "K1", "K2"], + capture=True, + env={"FOO": "BAR"}, + update_env={"HOME": "/myhome", "K2": "V2"}, + ) self.assertEqual( - ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines()) + ["FOO=BAR", "HOME=/myhome", "K1=", "K2=V2"], out.splitlines() + ) def test_subp_update_env(self): - extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} + extra = {"FOO": "BAR", "HOME": "/root", "K1": "V1"} with mock.patch.dict("os.environ", values=extra): out, _err = subp.subp( - self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, - update_env={'HOME': '/myhome', 'K2': 'V2'}) + self.printenv + ["FOO", "HOME", "K1", "K2"], + capture=True, + update_env={"HOME": "/myhome", "K2": "V2"}, + ) self.assertEqual( - ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines()) + ["FOO=BAR", "HOME=/myhome", "K1=V1", "K2=V2"], out.splitlines() + ) def test_subp_warn_missing_shebang(self): """Warn on no #! in script""" - noshebang = self.tmp_path('noshebang') - util.write_file(noshebang, 'true\n') + noshebang = self.tmp_path("noshebang") + util.write_file(noshebang, "true\n") print("os is %s" % os) os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) with self.allow_subp([noshebang]): - self.assertRaisesRegex(subp.ProcessExecutionError, - r'Missing #! in script\?', - subp.subp, (noshebang,)) + self.assertRaisesRegex( + subp.ProcessExecutionError, + r"Missing #! in script\?", + subp.subp, + (noshebang,), + ) def test_subp_combined_stderr_stdout(self): """Providing combine_capture as True redirects stderr to stdout.""" - data = b'hello world' - (out, err) = subp.subp(self.stdin2err, capture=True, - combine_capture=True, decode=False, data=data) - self.assertEqual(b'', err) + data = b"hello world" + (out, err) = subp.subp( + self.stdin2err, + capture=True, + combine_capture=True, + decode=False, + data=data, + ) + self.assertEqual(b"", err) self.assertEqual(data, out) def test_returns_none_if_no_capture(self): - (out, err) = subp.subp(self.stdin2out, data=b'', capture=False) + (out, err) = subp.subp(self.stdin2out, data=b"", capture=False) self.assertIsNone(err) self.assertIsNone(out) @@ -212,10 +264,12 @@ class TestSubp(CiTestCase): self.assertTrue(isinstance(cm.exception.stderr, str)) def test_bunch_of_slashes_in_path(self): - self.assertEqual("/target/my/path/", - subp.target_path("/target/", "//my/path/")) - self.assertEqual("/target/my/path/", - subp.target_path("/target/", "///my/path/")) + self.assertEqual( + "/target/my/path/", subp.target_path("/target/", "//my/path/") + ) + self.assertEqual( + "/target/my/path/", subp.target_path("/target/", "///my/path/") + ) def test_c_lang_can_take_utf8_args(self): """Independent of system LC_CTYPE, args can contain utf-8 strings. @@ -232,25 +286,32 @@ class TestSubp(CiTestCase): the default encoding will be set to ascii. In such an environment Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError. """ - python_prog = '\n'.join( + python_prog = "\n".join( [ - 'import json, sys', + "import json, sys", 'sys.path.insert(0, "{}")'.format(get_top_level_dir()), - 'from cloudinit.subp import subp', - 'data = sys.stdin.read()', - 'cmd = json.loads(data)', - 'subp(cmd, capture=False)', - '', + "from cloudinit.subp import subp", + "data = sys.stdin.read()", + "cmd = json.loads(data)", + "subp(cmd, capture=False)", + "", ] ) - cmd = [BASH, '-c', 'echo -n "$@"', '--', - self.utf8_valid.decode("utf-8")] - python_subp = [sys.executable, '-c', python_prog] + cmd = [ + BASH, + "-c", + 'echo -n "$@"', + "--", + self.utf8_valid.decode("utf-8"), + ] + python_subp = [sys.executable, "-c", python_prog] out, _err = subp.subp( - python_subp, update_env={'LC_CTYPE': 'C'}, + python_subp, + update_env={"LC_CTYPE": "C"}, data=json.dumps(cmd).encode("utf-8"), - decode=False) + decode=False, + ) self.assertEqual(self.utf8_valid, out) def test_bogus_command_logs_status_messages(self): @@ -264,8 +325,9 @@ class TestSubp(CiTestCase): subp.subp([BOGUS_COMMAND], status_cb=status_cb) expected = [ - 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND), - 'ERROR: End run command: invalid command provided\n'] + "Begin run command: {cmd}\n".format(cmd=BOGUS_COMMAND), + "ERROR: End run command: invalid command provided\n", + ] self.assertEqual(expected, logs) def test_command_logs_exit_codes_to_status_cb(self): @@ -276,14 +338,15 @@ class TestSubp(CiTestCase): logs.append(log) with self.assertRaises(subp.ProcessExecutionError): - subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb) - subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb) + subp.subp([BASH, "-c", "exit 2"], status_cb=status_cb) + subp.subp([BASH, "-c", "exit 0"], status_cb=status_cb) expected = [ - 'Begin run command: %s -c exit 2\n' % BASH, - 'ERROR: End run command: exit(2)\n', - 'Begin run command: %s -c exit 0\n' % BASH, - 'End run command: exit(0)\n'] + "Begin run command: %s -c exit 2\n" % BASH, + "ERROR: End run command: exit(2)\n", + "Begin run command: %s -c exit 0\n" % BASH, + "End run command: exit(0)\n", + ] self.assertEqual(expected, logs) diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py index 9d56d0d0..e91f389b 100644 --- a/tests/unittests/test_temp_utils.py +++ b/tests/unittests/test_temp_utils.py @@ -2,30 +2,33 @@ """Tests for cloudinit.temp_utils""" +import os + from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir from tests.unittests.helpers import CiTestCase, wrap_and_call -import os class TestTempUtils(CiTestCase): - def test_mkdtemp_default_non_root(self): """mkdtemp creates a dir under /tmp for the unprivileged.""" calls = [] def fake_mkdtemp(*args, **kwargs): calls.append(kwargs) - return '/fake/return/path' + return "/fake/return/path" retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 1000, - 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkdtemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/tmp'}], calls) + "cloudinit.temp_utils", + { + "os.getuid": 1000, + "tempfile.mkdtemp": {"side_effect": fake_mkdtemp}, + "_TMPDIR": {"new": None}, + "os.path.isdir": True, + }, + mkdtemp, + ) + self.assertEqual("/fake/return/path", retval) + self.assertEqual([{"dir": "/tmp"}], calls) def test_mkdtemp_default_non_root_needs_exe(self): """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe.""" @@ -33,17 +36,21 @@ class TestTempUtils(CiTestCase): def fake_mkdtemp(*args, **kwargs): calls.append(kwargs) - return '/fake/return/path' + return "/fake/return/path" retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 1000, - 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkdtemp, needs_exe=True) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/var/tmp/cloud-init'}], calls) + "cloudinit.temp_utils", + { + "os.getuid": 1000, + "tempfile.mkdtemp": {"side_effect": fake_mkdtemp}, + "_TMPDIR": {"new": None}, + "os.path.isdir": True, + }, + mkdtemp, + needs_exe=True, + ) + self.assertEqual("/fake/return/path", retval) + self.assertEqual([{"dir": "/var/tmp/cloud-init"}], calls) def test_mkdtemp_default_root(self): """mkdtemp creates a dir under /run/cloud-init for the privileged.""" @@ -51,17 +58,20 @@ class TestTempUtils(CiTestCase): def fake_mkdtemp(*args, **kwargs): calls.append(kwargs) - return '/fake/return/path' + return "/fake/return/path" retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 0, - 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkdtemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + "cloudinit.temp_utils", + { + "os.getuid": 0, + "tempfile.mkdtemp": {"side_effect": fake_mkdtemp}, + "_TMPDIR": {"new": None}, + "os.path.isdir": True, + }, + mkdtemp, + ) + self.assertEqual("/fake/return/path", retval) + self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls) def test_mkstemp_default_non_root(self): """mkstemp creates secure tempfile under /tmp for the unprivileged.""" @@ -69,17 +79,20 @@ class TestTempUtils(CiTestCase): def fake_mkstemp(*args, **kwargs): calls.append(kwargs) - return '/fake/return/path' + return "/fake/return/path" retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 1000, - 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkstemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/tmp'}], calls) + "cloudinit.temp_utils", + { + "os.getuid": 1000, + "tempfile.mkstemp": {"side_effect": fake_mkstemp}, + "_TMPDIR": {"new": None}, + "os.path.isdir": True, + }, + mkstemp, + ) + self.assertEqual("/fake/return/path", retval) + self.assertEqual([{"dir": "/tmp"}], calls) def test_mkstemp_default_root(self): """mkstemp creates a secure tempfile in /run/cloud-init for root.""" @@ -87,31 +100,36 @@ class TestTempUtils(CiTestCase): def fake_mkstemp(*args, **kwargs): calls.append(kwargs) - return '/fake/return/path' + return "/fake/return/path" retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 0, - 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkstemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + "cloudinit.temp_utils", + { + "os.getuid": 0, + "tempfile.mkstemp": {"side_effect": fake_mkstemp}, + "_TMPDIR": {"new": None}, + "os.path.isdir": True, + }, + mkstemp, + ) + self.assertEqual("/fake/return/path", retval) + self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls) def test_tempdir_error_suppression(self): """test tempdir suppresses errors during directory removal.""" with self.assertRaises(OSError): - with tempdir(prefix='cloud-init-dhcp-') as tdir: + with tempdir(prefix="cloud-init-dhcp-") as tdir: os.rmdir(tdir) # As a result, the directory is already gone, # so shutil.rmtree should raise OSError - with tempdir(rmtree_ignore_errors=True, - prefix='cloud-init-dhcp-') as tdir: + with tempdir( + rmtree_ignore_errors=True, prefix="cloud-init-dhcp-" + ) as tdir: os.rmdir(tdir) # Since the directory is already gone, shutil.rmtree would raise # OSError, but we suppress that + # vi: ts=4 expandtab diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 459e017b..c1fec27c 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -4,14 +4,15 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from tests.unittests import helpers as test_helpers import textwrap from cloudinit import templater from cloudinit.util import load_file, write_file +from tests.unittests import helpers as test_helpers try: import Cheetah + HAS_CHEETAH = True c = Cheetah # make pyflakes and pylint happy, as Cheetah is not used here except ImportError: @@ -22,32 +23,36 @@ class TestTemplates(test_helpers.CiTestCase): with_logs = True - jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n' - jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8') + jinja_utf8 = b"It\xe2\x80\x99s not ascii, {{name}}\n" + jinja_utf8_rbob = b"It\xe2\x80\x99s not ascii, bob\n".decode("utf-8") @staticmethod def add_header(renderer, data): """Return text (py2 unicode/py3 str) with template header.""" if isinstance(data, bytes): - data = data.decode('utf-8') + data = data.decode("utf-8") return "## template: %s\n" % renderer + data def test_render_basic(self): - in_data = textwrap.dedent(""" + in_data = textwrap.dedent( + """ ${b} c = d - """) + """ + ) in_data = in_data.strip() - expected_data = textwrap.dedent(""" + expected_data = textwrap.dedent( + """ 2 c = d - """) - out_data = templater.basic_render(in_data, {'b': 2}) + """ + ) + out_data = templater.basic_render(in_data, {"b": 2}) self.assertEqual(expected_data.strip(), out_data) - @test_helpers.skipIf(not HAS_CHEETAH, 'cheetah renderer not available') + @test_helpers.skipIf(not HAS_CHEETAH, "cheetah renderer not available") def test_detection(self): blob = "## template:cheetah" @@ -60,28 +65,28 @@ class TestTemplates(test_helpers.CiTestCase): self.assertIn("cheetah", template_type) self.assertEqual(blob, contents) - blob = '##template:something-new' + blob = "##template:something-new" self.assertRaises(ValueError, templater.detect_template, blob) def test_render_cheetah(self): - blob = '''## template:cheetah -$a,$b''' + blob = """## template:cheetah +$a,$b""" c = templater.render_string(blob, {"a": 1, "b": 2}) self.assertEqual("1,2", c) def test_render_jinja(self): - blob = '''## template:jinja -{{a}},{{b}}''' + blob = """## template:jinja +{{a}},{{b}}""" c = templater.render_string(blob, {"a": 1, "b": 2}) self.assertEqual("1,2", c) def test_render_default(self): - blob = '''$a,$b''' + blob = """$a,$b""" c = templater.render_string(blob, {"a": 1, "b": 2}) self.assertEqual("1,2", c) def test_render_basic_deeper(self): - hn = 'myfoohost.yahoo.com' + hn = "myfoohost.yahoo.com" expected_data = "h=%s\nc=d\n" % hn in_data = "h=$hostname.canonical_name\nc=d\n" params = { @@ -96,59 +101,69 @@ $a,$b''' hn = "myfoohost" in_data = "h=$hostname\nc=d\n" expected_data = "h=%s\nc=d\n" % hn - out_data = templater.basic_render(in_data, {'hostname': hn}) + out_data = templater.basic_render(in_data, {"hostname": hn}) self.assertEqual(expected_data, out_data) def test_render_basic_parens(self): hn = "myfoohost" in_data = "h = ${hostname}\nc=d\n" expected_data = "h = %s\nc=d\n" % hn - out_data = templater.basic_render(in_data, {'hostname': hn}) + out_data = templater.basic_render(in_data, {"hostname": hn}) self.assertEqual(expected_data, out_data) def test_render_basic2(self): mirror = "mymirror" codename = "zany" in_data = "deb $mirror $codename-updates main contrib non-free" - ex_data = "deb %s %s-updates main contrib non-free" % (mirror, - codename) - - out_data = templater.basic_render(in_data, - {'mirror': mirror, - 'codename': codename}) + ex_data = "deb %s %s-updates main contrib non-free" % ( + mirror, + codename, + ) + + out_data = templater.basic_render( + in_data, {"mirror": mirror, "codename": codename} + ) self.assertEqual(ex_data, out_data) def test_jinja_nonascii_render_to_string(self): """Test jinja render_to_string with non-ascii content.""" self.assertEqual( templater.render_string( - self.add_header("jinja", self.jinja_utf8), {"name": "bob"}), - self.jinja_utf8_rbob) + self.add_header("jinja", self.jinja_utf8), {"name": "bob"} + ), + self.jinja_utf8_rbob, + ) def test_jinja_nonascii_render_undefined_variables_to_default_py3(self): """Test py3 jinja render_to_string with undefined variable default.""" self.assertEqual( templater.render_string( - self.add_header("jinja", self.jinja_utf8), {}), - self.jinja_utf8_rbob.replace('bob', 'CI_MISSING_JINJA_VAR/name')) + self.add_header("jinja", self.jinja_utf8), {} + ), + self.jinja_utf8_rbob.replace("bob", "CI_MISSING_JINJA_VAR/name"), + ) def test_jinja_nonascii_render_to_file(self): """Test jinja render_to_file of a filename with non-ascii content.""" tmpl_fn = self.tmp_path("j-render-to-file.template") out_fn = self.tmp_path("j-render-to-file.out") - write_file(filename=tmpl_fn, omode="wb", - content=self.add_header( - "jinja", self.jinja_utf8).encode('utf-8')) + write_file( + filename=tmpl_fn, + omode="wb", + content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"), + ) templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"}) - result = load_file(out_fn, decode=False).decode('utf-8') + result = load_file(out_fn, decode=False).decode("utf-8") self.assertEqual(result, self.jinja_utf8_rbob) def test_jinja_nonascii_render_from_file(self): """Test jinja render_from_file with non-ascii content.""" tmpl_fn = self.tmp_path("j-render-from-file.template") - write_file(tmpl_fn, omode="wb", - content=self.add_header( - "jinja", self.jinja_utf8).encode('utf-8')) + write_file( + tmpl_fn, + omode="wb", + content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"), + ) result = templater.render_from_file(tmpl_fn, {"name": "bob"}) self.assertEqual(result, self.jinja_utf8_rbob) @@ -156,14 +171,18 @@ $a,$b''' def test_jinja_warns_on_missing_dep_and_uses_basic_renderer(self): """Test jinja render_from_file will fallback to basic renderer.""" tmpl_fn = self.tmp_path("j-render-from-file.template") - write_file(tmpl_fn, omode="wb", - content=self.add_header( - "jinja", self.jinja_utf8).encode('utf-8')) + write_file( + tmpl_fn, + omode="wb", + content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"), + ) result = templater.render_from_file(tmpl_fn, {"name": "bob"}) self.assertEqual(result, self.jinja_utf8.decode()) self.assertIn( - 'WARNING: Jinja not available as the selected renderer for desired' - ' template, reverting to the basic renderer.', - self.logs.getvalue()) + "WARNING: Jinja not available as the selected renderer for desired" + " template, reverting to the basic renderer.", + self.logs.getvalue(), + ) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py index 501d9533..85810e00 100644 --- a/tests/unittests/test_url_helper.py +++ b/tests/unittests/test_url_helper.py @@ -1,54 +1,63 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.url_helper import ( - NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, - retry_on_url_exc) -from tests.unittests.helpers import CiTestCase, mock, skipIf -from cloudinit import util -from cloudinit import version +import logging import httpretty -import logging import requests +from cloudinit import util, version +from cloudinit.url_helper import ( + NOT_FOUND, + REDACTED, + UrlError, + oauth_headers, + read_file_or_url, + retry_on_url_exc, +) +from tests.unittests.helpers import CiTestCase, mock, skipIf try: import oauthlib + assert oauthlib # avoid pyflakes error F401: import unused _missing_oauthlib_dep = False except ImportError: _missing_oauthlib_dep = True -M_PATH = 'cloudinit.url_helper.' +M_PATH = "cloudinit.url_helper." class TestOAuthHeaders(CiTestCase): - def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): """oauth_headers raises a NotImplemented error when oauth absent.""" - with mock.patch.dict('sys.modules', {'oauthlib': None}): + with mock.patch.dict("sys.modules", {"oauthlib": None}): with self.assertRaises(NotImplementedError) as context_manager: oauth_headers(1, 2, 3, 4, 5) self.assertEqual( - 'oauth support is not available', - str(context_manager.exception)) + "oauth support is not available", str(context_manager.exception) + ) @skipIf(_missing_oauthlib_dep, "No python-oauthlib dependency") - @mock.patch('oauthlib.oauth1.Client') + @mock.patch("oauthlib.oauth1.Client") def test_oauth_headers_calls_oathlibclient_when_available(self, m_client): """oauth_headers calls oaut1.hClient.sign with the provided url.""" + class fakeclient(object): def sign(self, url): # The first and 3rd item of the client.sign tuple are ignored - return ('junk', url, 'junk2') + return ("junk", url, "junk2") m_client.return_value = fakeclient() return_value = oauth_headers( - 'url', 'consumer_key', 'token_key', 'token_secret', - 'consumer_secret') - self.assertEqual('url', return_value) + "url", + "consumer_key", + "token_key", + "token_secret", + "consumer_secret", + ) + self.assertEqual("url", return_value) class TestReadFileOrUrl(CiTestCase): @@ -59,45 +68,45 @@ class TestReadFileOrUrl(CiTestCase): """Test that str(result.contents) on file is text version of contents. It should not be "b'data'", but just "'data'" """ tmpf = self.tmp_path("myfile1") - data = b'This is my file content\n' + data = b"This is my file content\n" util.write_file(tmpf, data, omode="wb") result = read_file_or_url("file://%s" % tmpf) self.assertEqual(result.contents, data) - self.assertEqual(str(result), data.decode('utf-8')) + self.assertEqual(str(result), data.decode("utf-8")) @httpretty.activate def test_read_file_or_url_str_from_url(self): """Test that str(result.contents) on url is text version of contents. It should not be "b'data'", but just "'data'" """ - url = 'http://hostname/path' - data = b'This is my url content\n' + url = "http://hostname/path" + data = b"This is my url content\n" httpretty.register_uri(httpretty.GET, url, data) result = read_file_or_url(url) self.assertEqual(result.contents, data) - self.assertEqual(str(result), data.decode('utf-8')) + self.assertEqual(str(result), data.decode("utf-8")) @httpretty.activate def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self): """Headers are redacted from logs but unredacted in requests.""" - url = 'http://hostname/path' - headers = {'sensitive': 'sekret', 'server': 'blah'} + url = "http://hostname/path" + headers = {"sensitive": "sekret", "server": "blah"} httpretty.register_uri(httpretty.GET, url) # By default, httpretty will log our request along with the header, # so if we don't change this the secret will show up in the logs - logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) + logging.getLogger("httpretty.core").setLevel(logging.CRITICAL) - read_file_or_url(url, headers=headers, headers_redact=['sensitive']) + read_file_or_url(url, headers=headers, headers_redact=["sensitive"]) logs = self.logs.getvalue() for k in headers.keys(): self.assertEqual(headers[k], httpretty.last_request().headers[k]) self.assertIn(REDACTED, logs) - self.assertNotIn('sekret', logs) + self.assertNotIn("sekret", logs) @httpretty.activate def test_read_file_or_url_str_from_url_redacts_noheaders(self): """When no headers_redact, header values are in logs and requests.""" - url = 'http://hostname/path' - headers = {'sensitive': 'sekret', 'server': 'blah'} + url = "http://hostname/path" + headers = {"sensitive": "sekret", "server": "blah"} httpretty.register_uri(httpretty.GET, url) read_file_or_url(url, headers=headers) @@ -105,21 +114,27 @@ class TestReadFileOrUrl(CiTestCase): self.assertEqual(headers[k], httpretty.last_request().headers[k]) logs = self.logs.getvalue() self.assertNotIn(REDACTED, logs) - self.assertIn('sekret', logs) + self.assertIn("sekret", logs) - @mock.patch(M_PATH + 'readurl') + @mock.patch(M_PATH + "readurl") def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): """read_file_or_url passes all params through to readurl.""" - url = 'http://hostname/path' - response = 'This is my url content\n' + url = "http://hostname/path" + response = "This is my url content\n" m_readurl.return_value = response - params = {'url': url, 'timeout': 1, 'retries': 2, - 'headers': {'somehdr': 'val'}, - 'data': 'data', 'sec_between': 1, - 'ssl_details': {'cert_file': '/path/cert.pem'}, - 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} + params = { + "url": url, + "timeout": 1, + "retries": 2, + "headers": {"somehdr": "val"}, + "data": "data", + "sec_between": 1, + "ssl_details": {"cert_file": "/path/cert.pem"}, + "headers_cb": "headers_cb", + "exception_cb": "exception_cb", + } self.assertEqual(response, read_file_or_url(**params)) - params.pop('url') # url is passed in as a positional arg + params.pop("url") # url is passed in as a positional arg self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): @@ -129,7 +144,7 @@ class TestReadFileOrUrl(CiTestCase): retries: 0, additional headers None beyond default, method: GET, data: None, check_status: True and allow_redirects: True """ - url = 'http://hostname/path' + url = "http://hostname/path" m_response = mock.MagicMock() @@ -137,20 +152,26 @@ class TestReadFileOrUrl(CiTestCase): @classmethod def request(cls, **kwargs): self.assertEqual( - {'url': url, 'allow_redirects': True, 'method': 'GET', - 'headers': { - 'User-Agent': 'Cloud-Init/%s' % ( - version.version_string())}}, - kwargs) + { + "url": url, + "allow_redirects": True, + "method": "GET", + "headers": { + "User-Agent": "Cloud-Init/%s" + % (version.version_string()) + }, + }, + kwargs, + ) return m_response - with mock.patch(M_PATH + 'requests.Session') as m_session: - error = requests.exceptions.HTTPError('broke') + with mock.patch(M_PATH + "requests.Session") as m_session: + error = requests.exceptions.HTTPError("broke") m_session.side_effect = [error, FakeSession()] # assert no retries and check_status == True with self.assertRaises(UrlError) as context_manager: response = read_file_or_url(url) - self.assertEqual('broke', str(context_manager.exception)) + self.assertEqual("broke", str(context_manager.exception)) # assert default headers, method, url and allow_redirects True # Success on 2nd call with FakeSession response = read_file_or_url(url) @@ -158,21 +179,22 @@ class TestReadFileOrUrl(CiTestCase): class TestRetryOnUrlExc(CiTestCase): - def test_do_not_retry_non_urlerror(self): """When exception is not UrlError return False.""" - myerror = IOError('something unexcpected') - self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) + myerror = IOError("something unexcpected") + self.assertFalse(retry_on_url_exc(msg="", exc=myerror)) def test_perform_retries_on_not_found(self): """When exception is UrlError with a 404 status code return True.""" - myerror = UrlError(cause=RuntimeError( - 'something was not found'), code=NOT_FOUND) - self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + myerror = UrlError( + cause=RuntimeError("something was not found"), code=NOT_FOUND + ) + self.assertTrue(retry_on_url_exc(msg="", exc=myerror)) def test_perform_retries_on_timeout(self): """When exception is a requests.Timout return True.""" - myerror = UrlError(cause=requests.Timeout('something timed out')) - self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + myerror = UrlError(cause=requests.Timeout("something timed out")) + self.assertTrue(retry_on_url_exc(msg="", exc=myerror)) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 61b9e303..e2bfe9d2 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -3,33 +3,30 @@ """Tests for cloudinit.util""" import base64 -import logging -import json -import platform -import pytest - import io +import json +import logging import os +import platform import re import shutil import stat import tempfile -import yaml +from textwrap import dedent from unittest import mock -from cloudinit import subp -from cloudinit import importer, util -from tests.unittests import helpers - +import pytest +import yaml +from cloudinit import importer, subp, util +from tests.unittests import helpers from tests.unittests.helpers import CiTestCase -from textwrap import dedent LOG = logging.getLogger(__name__) MOUNT_INFO = [ - '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', - '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2', + "68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64", + "153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2", ] OS_RELEASE_SLES = dedent( @@ -329,9 +326,9 @@ class FakeCloud(object): def get_hostname(self, fqdn=None, metadata_only=None): myargs = {} if fqdn is not None: - myargs['fqdn'] = fqdn + myargs["fqdn"] = fqdn if metadata_only is not None: - myargs['metadata_only'] = metadata_only + myargs["metadata_only"] = metadata_only self.calls.append(myargs) if fqdn: return self.fqdn @@ -340,34 +337,34 @@ class FakeCloud(object): class TestUtil(CiTestCase): def test_parse_mount_info_no_opts_no_arg(self): - result = util.parse_mount_info('/home', MOUNT_INFO, LOG) - self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + result = util.parse_mount_info("/home", MOUNT_INFO, LOG) + self.assertEqual(("/dev/sda2", "xfs", "/home"), result) def test_parse_mount_info_no_opts_arg(self): - result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) - self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + result = util.parse_mount_info("/home", MOUNT_INFO, LOG, False) + self.assertEqual(("/dev/sda2", "xfs", "/home"), result) def test_parse_mount_info_with_opts(self): - result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) - self.assertEqual(('/dev/sda1', 'btrfs', '/', 'ro,relatime'), result) + result = util.parse_mount_info("/", MOUNT_INFO, LOG, True) + self.assertEqual(("/dev/sda1", "btrfs", "/", "ro,relatime"), result) - @mock.patch('cloudinit.util.get_mount_info') + @mock.patch("cloudinit.util.get_mount_info") def test_mount_is_rw(self, m_mount_info): - m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') - is_rw = util.mount_is_read_write('/') + m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "rw,relatime") + is_rw = util.mount_is_read_write("/") self.assertEqual(is_rw, True) - @mock.patch('cloudinit.util.get_mount_info') + @mock.patch("cloudinit.util.get_mount_info") def test_mount_is_ro(self, m_mount_info): - m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') - is_rw = util.mount_is_read_write('/') + m_mount_info.return_value = ("/dev/sda1", "btrfs", "/", "ro,relatime") + is_rw = util.mount_is_read_write("/") self.assertEqual(is_rw, False) class TestUptime(CiTestCase): - @mock.patch('cloudinit.util.boottime') - @mock.patch('cloudinit.util.os.path.exists') - @mock.patch('cloudinit.util.time.time') + @mock.patch("cloudinit.util.boottime") + @mock.patch("cloudinit.util.os.path.exists") + @mock.patch("cloudinit.util.time.time") def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): boottime = 1000.0 uptime = 10.0 @@ -382,24 +379,24 @@ class TestShellify(CiTestCase): def test_input_dict_raises_type_error(self): self.assertRaisesRegex( TypeError, - 'Input.*was.*dict.*xpected', + "Input.*was.*dict.*xpected", util.shellify, - {'mykey': 'myval'}, + {"mykey": "myval"}, ) def test_input_str_raises_type_error(self): self.assertRaisesRegex( - TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar" + TypeError, "Input.*was.*str.*xpected", util.shellify, "foobar" ) def test_value_with_int_raises_type_error(self): self.assertRaisesRegex( - TypeError, 'shellify.*int', util.shellify, ["foo", 1] + TypeError, "shellify.*int", util.shellify, ["foo", 1] ) def test_supports_strings_and_lists(self): self.assertEqual( - '\n'.join( + "\n".join( [ "#!/bin/sh", "echo hi mom", @@ -409,13 +406,13 @@ class TestShellify(CiTestCase): ] ), util.shellify( - ["echo hi mom", ["echo", "hi dad"], ('echo', 'hi', 'sis')] + ["echo hi mom", ["echo", "hi dad"], ("echo", "hi", "sis")] ), ) def test_supports_comments(self): self.assertEqual( - '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]), + "\n".join(["#!/bin/sh", "echo start", "echo end", ""]), util.shellify(["echo start", None, "echo end"]), ) @@ -424,58 +421,58 @@ class TestGetHostnameFqdn(CiTestCase): def test_get_hostname_fqdn_from_only_cfg_fqdn(self): """When cfg only has the fqdn key, derive hostname and fqdn from it.""" hostname, fqdn = util.get_hostname_fqdn( - cfg={'fqdn': 'myhost.domain.com'}, cloud=None + cfg={"fqdn": "myhost.domain.com"}, cloud=None ) - self.assertEqual('myhost', hostname) - self.assertEqual('myhost.domain.com', fqdn) + self.assertEqual("myhost", hostname) + self.assertEqual("myhost.domain.com", fqdn) def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): """When cfg has both fqdn and hostname keys, return them.""" hostname, fqdn = util.get_hostname_fqdn( - cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None + cfg={"fqdn": "myhost.domain.com", "hostname": "other"}, cloud=None ) - self.assertEqual('other', hostname) - self.assertEqual('myhost.domain.com', fqdn) + self.assertEqual("other", hostname) + self.assertEqual("myhost.domain.com", fqdn) def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): """When cfg has only hostname key which represents a fqdn, use that.""" hostname, fqdn = util.get_hostname_fqdn( - cfg={'hostname': 'myhost.domain.com'}, cloud=None + cfg={"hostname": "myhost.domain.com"}, cloud=None ) - self.assertEqual('myhost', hostname) - self.assertEqual('myhost.domain.com', fqdn) + self.assertEqual("myhost", hostname) + self.assertEqual("myhost.domain.com", fqdn) def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): """When cfg has a hostname without a '.' query cloud.get_hostname.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com") hostname, fqdn = util.get_hostname_fqdn( - cfg={'hostname': 'myhost'}, cloud=mycloud + cfg={"hostname": "myhost"}, cloud=mycloud ) - self.assertEqual('myhost', hostname) - self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual("myhost", hostname) + self.assertEqual("cloudhost.mycloud.com", fqdn) self.assertEqual( - [{'fqdn': True, 'metadata_only': False}], mycloud.calls + [{"fqdn": True, "metadata_only": False}], mycloud.calls ) def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): """When cfg has neither hostname nor fqdn cloud.get_hostname.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com") hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) - self.assertEqual('cloudhost', hostname) - self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual("cloudhost", hostname) + self.assertEqual("cloudhost.mycloud.com", fqdn) self.assertEqual( - [{'fqdn': True, 'metadata_only': False}, {'metadata_only': False}], + [{"fqdn": True, "metadata_only": False}, {"metadata_only": False}], mycloud.calls, ) def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): """Calls to cloud.get_hostname pass the metadata_only parameter.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com") _hn, _fqdn = util.get_hostname_fqdn( cfg={}, cloud=mycloud, metadata_only=True ) self.assertEqual( - [{'fqdn': True, 'metadata_only': True}, {'metadata_only': True}], + [{"fqdn": True, "metadata_only": True}, {"metadata_only": True}], mycloud.calls, ) @@ -565,19 +562,19 @@ class TestBlkid(CiTestCase): ) -@mock.patch('cloudinit.subp.subp') +@mock.patch("cloudinit.subp.subp") class TestUdevadmSettle(CiTestCase): def test_with_no_params(self, m_subp): """called with no parameters.""" util.udevadm_settle() - m_subp.called_once_with(mock.call(['udevadm', 'settle'])) + m_subp.called_once_with(mock.call(["udevadm", "settle"])) def test_with_exists_and_not_exists(self, m_subp): """with exists=file where file does not exist should invoke subp.""" mydev = self.tmp_path("mydev") util.udevadm_settle(exists=mydev) m_subp.called_once_with( - ['udevadm', 'settle', '--exit-if-exists=%s' % mydev] + ["udevadm", "settle", "--exit-if-exists=%s" % mydev] ) def test_with_exists_and_file_exists(self, m_subp): @@ -592,7 +589,7 @@ class TestUdevadmSettle(CiTestCase): timeout = 9 util.udevadm_settle(timeout=timeout) m_subp.called_once_with( - ['udevadm', 'settle', '--timeout=%s' % timeout] + ["udevadm", "settle", "--timeout=%s" % timeout] ) def test_with_timeout_string(self, m_subp): @@ -600,7 +597,7 @@ class TestUdevadmSettle(CiTestCase): timeout = "555" util.udevadm_settle(timeout=timeout) m_subp.assert_called_once_with( - ['udevadm', 'settle', '--timeout=%s' % timeout] + ["udevadm", "settle", "--timeout=%s" % timeout] ) def test_with_exists_and_timeout(self, m_subp): @@ -610,10 +607,10 @@ class TestUdevadmSettle(CiTestCase): util.udevadm_settle(exists=mydev) m_subp.called_once_with( [ - 'udevadm', - 'settle', - '--exit-if-exists=%s' % mydev, - '--timeout=%s' % timeout, + "udevadm", + "settle", + "--exit-if-exists=%s" % mydev, + "--timeout=%s" % timeout, ] ) @@ -622,7 +619,7 @@ class TestUdevadmSettle(CiTestCase): self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) -@mock.patch('os.path.exists') +@mock.patch("os.path.exists") class TestGetLinuxDistro(CiTestCase): def setUp(self): # python2 has no lru_cache, and therefore, no cache_clear() @@ -632,36 +629,36 @@ class TestGetLinuxDistro(CiTestCase): @classmethod def os_release_exists(self, path): """Side effect function""" - if path == '/etc/os-release': + if path == "/etc/os-release": return 1 @classmethod def redhat_release_exists(self, path): """Side effect function""" - if path == '/etc/redhat-release': + if path == "/etc/redhat-release": return 1 - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): """Verify we get the correct name if the os-release file has the distro name in quotes""" m_os_release.return_value = OS_RELEASE_SLES m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('sles', '12.3', platform.machine()), dist) + self.assertEqual(("sles", "12.3", platform.machine()), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): """Verify we get the correct name if the os-release file does not have the distro name in quotes""" m_os_release.return_value = OS_RELEASE_UBUNTU m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) + self.assertEqual(("ubuntu", "16.04", "xenial"), dist) - @mock.patch('platform.system') - @mock.patch('platform.release') - @mock.patch('cloudinit.util._parse_redhat_release') + @mock.patch("platform.system") + @mock.patch("platform.release") + @mock.patch("cloudinit.util._parse_redhat_release") def test_get_linux_freebsd( self, m_parse_redhat_release, @@ -671,192 +668,194 @@ class TestGetLinuxDistro(CiTestCase): ): """Verify we get the correct name and release name on FreeBSD.""" m_path_exists.return_value = False - m_platform_release.return_value = '12.0-RELEASE-p10' - m_platform_system.return_value = 'FreeBSD' + m_platform_release.return_value = "12.0-RELEASE-p10" + m_platform_system.return_value = "FreeBSD" m_parse_redhat_release.return_value = {} util.is_BSD.cache_clear() dist = util.get_linux_distro() - self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) + self.assertEqual(("freebsd", "12.0-RELEASE-p10", ""), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_centos6(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on CentOS 6.""" m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('centos', '6.10', 'Final'), dist) + self.assertEqual(("centos", "6.10", "Final"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): """Verify the correct release info on CentOS 7 without os-release.""" m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('centos', '7.5.1804', 'Core'), dist) + self.assertEqual(("centos", "7.5.1804", "Core"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): """Verify redhat 7 read from os-release.""" m_os_release.return_value = OS_RELEASE_REDHAT_7 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + self.assertEqual(("redhat", "7.5", "Maipo"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): """Verify redhat 7 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + self.assertEqual(("redhat", "7.5", "Maipo"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): """Verify redhat 6 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('redhat', '6.10', 'Santiago'), dist) + self.assertEqual(("redhat", "6.10", "Santiago"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_copr_centos(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on COPR CentOS.""" m_os_release.return_value = OS_RELEASE_CENTOS m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('centos', '7', 'Core'), dist) + self.assertEqual(("centos", "7", "Core"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): """Verify almalinux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): """Verify almalinux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_ALMALINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): """Verify eurolinux 7 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): """Verify eurolinux 7 read from os-release.""" m_os_release.return_value = OS_RELEASE_EUROLINUX_7 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): """Verify eurolinux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): """Verify eurolinux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_EUROLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) - @mock.patch('cloudinit.util.load_file') - def test_get_linux_miraclelinux8_rhrelease(self, m_os_release, - m_path_exists): + @mock.patch("cloudinit.util.load_file") + def test_get_linux_miraclelinux8_rhrelease( + self, m_os_release, m_path_exists + ): """Verify miraclelinux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_MIRACLELINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('miracle', '8.4', 'Peony'), dist) + self.assertEqual(("miracle", "8.4", "Peony"), dist) - @mock.patch('cloudinit.util.load_file') - def test_get_linux_miraclelinux8_osrelease(self, m_os_release, - m_path_exists): + @mock.patch("cloudinit.util.load_file") + def test_get_linux_miraclelinux8_osrelease( + self, m_os_release, m_path_exists + ): """Verify miraclelinux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_MIRACLELINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('miraclelinux', '8', 'Peony'), dist) + self.assertEqual(("miraclelinux", "8", "Peony"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_ROCKY_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): """Verify virtuozzo linux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): """Verify virtuozzo linux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): """Verify cloudlinux 8 read from redhat-release.""" m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): """Verify cloudlinux 8 read from os-release.""" m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_debian(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on Debian.""" m_os_release.return_value = OS_RELEASE_DEBIAN m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('debian', '9', 'stretch'), dist) + self.assertEqual(("debian", "9", "stretch"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_openeuler(self, m_os_release, m_path_exists): """Verify get the correct name and release name on Openeuler.""" m_os_release.return_value = OS_RELEASE_OPENEULER_20 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist) + self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_opensuse(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on openSUSE prior to openSUSE Leap 15. @@ -864,9 +863,9 @@ class TestGetLinuxDistro(CiTestCase): m_os_release.return_value = OS_RELEASE_OPENSUSE m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('opensuse', '42.3', platform.machine()), dist) + self.assertEqual(("opensuse", "42.3", platform.machine()), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on openSUSE for openSUSE Leap 15.0 and later. @@ -874,9 +873,9 @@ class TestGetLinuxDistro(CiTestCase): m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) + self.assertEqual(("opensuse-leap", "15.0", platform.machine()), dist) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on openSUSE for openSUSE Tumbleweed @@ -885,31 +884,31 @@ class TestGetLinuxDistro(CiTestCase): m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() self.assertEqual( - ('opensuse-tumbleweed', '20180920', platform.machine()), dist + ("opensuse-tumbleweed", "20180920", platform.machine()), dist ) - @mock.patch('cloudinit.util.load_file') + @mock.patch("cloudinit.util.load_file") def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on PhotonOS""" m_os_release.return_value = OS_RELEASE_PHOTON m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(('photon', '4.0', 'VMware Photon OS/Linux'), dist) + self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist) - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) + @mock.patch("platform.system") + @mock.patch("platform.dist", create=True) def test_get_linux_distro_no_data( self, m_platform_dist, m_platform_system, m_path_exists ): """Verify we get no information if os-release does not exist""" - m_platform_dist.return_value = ('', '', '') + m_platform_dist.return_value = ("", "", "") m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(('', '', ''), dist) + self.assertEqual(("", "", ""), dist) - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) + @mock.patch("platform.system") + @mock.patch("platform.dist", create=True) def test_get_linux_distro_no_impl( self, m_platform_dist, m_platform_system, m_path_exists ): @@ -919,55 +918,55 @@ class TestGetLinuxDistro(CiTestCase): m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(('', '', ''), dist) + self.assertEqual(("", "", ""), dist) - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) + @mock.patch("platform.system") + @mock.patch("platform.dist", create=True) def test_get_linux_distro_plat_data( self, m_platform_dist, m_platform_system, m_path_exists ): """Verify we get the correct platform information""" - m_platform_dist.return_value = ('foo', '1.1', 'aarch64') + m_platform_dist.return_value = ("foo", "1.1", "aarch64") m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(('foo', '1.1', 'aarch64'), dist) + self.assertEqual(("foo", "1.1", "aarch64"), dist) class TestGetVariant: @pytest.mark.parametrize( - 'info, expected_variant', + "info, expected_variant", [ - ({'system': 'Linux', 'dist': ('almalinux',)}, 'almalinux'), - ({'system': 'linux', 'dist': ('alpine',)}, 'alpine'), - ({'system': 'linux', 'dist': ('arch',)}, 'arch'), - ({'system': 'linux', 'dist': ('centos',)}, 'centos'), - ({'system': 'linux', 'dist': ('cloudlinux',)}, 'cloudlinux'), - ({'system': 'linux', 'dist': ('debian',)}, 'debian'), - ({'system': 'linux', 'dist': ('eurolinux',)}, 'eurolinux'), - ({'system': 'linux', 'dist': ('fedora',)}, 'fedora'), - ({'system': 'linux', 'dist': ('openEuler',)}, 'openeuler'), - ({'system': 'linux', 'dist': ('photon',)}, 'photon'), - ({'system': 'linux', 'dist': ('rhel',)}, 'rhel'), - ({'system': 'linux', 'dist': ('rocky',)}, 'rocky'), - ({'system': 'linux', 'dist': ('suse',)}, 'suse'), - ({'system': 'linux', 'dist': ('virtuozzo',)}, 'virtuozzo'), - ({'system': 'linux', 'dist': ('ubuntu',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('linuxmint',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('mint',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('redhat',)}, 'rhel'), - ({'system': 'linux', 'dist': ('opensuse',)}, 'suse'), - ({'system': 'linux', 'dist': ('opensuse-tumbleweed',)}, 'suse'), - ({'system': 'linux', 'dist': ('opensuse-leap',)}, 'suse'), - ({'system': 'linux', 'dist': ('sles',)}, 'suse'), - ({'system': 'linux', 'dist': ('sle_hpc',)}, 'suse'), - ({'system': 'linux', 'dist': ('my_distro',)}, 'linux'), - ({'system': 'Windows', 'dist': ('dontcare',)}, 'windows'), - ({'system': 'Darwin', 'dist': ('dontcare',)}, 'darwin'), - ({'system': 'Freebsd', 'dist': ('dontcare',)}, 'freebsd'), - ({'system': 'Netbsd', 'dist': ('dontcare',)}, 'netbsd'), - ({'system': 'Openbsd', 'dist': ('dontcare',)}, 'openbsd'), - ({'system': 'Dragonfly', 'dist': ('dontcare',)}, 'dragonfly'), + ({"system": "Linux", "dist": ("almalinux",)}, "almalinux"), + ({"system": "linux", "dist": ("alpine",)}, "alpine"), + ({"system": "linux", "dist": ("arch",)}, "arch"), + ({"system": "linux", "dist": ("centos",)}, "centos"), + ({"system": "linux", "dist": ("cloudlinux",)}, "cloudlinux"), + ({"system": "linux", "dist": ("debian",)}, "debian"), + ({"system": "linux", "dist": ("eurolinux",)}, "eurolinux"), + ({"system": "linux", "dist": ("fedora",)}, "fedora"), + ({"system": "linux", "dist": ("openEuler",)}, "openeuler"), + ({"system": "linux", "dist": ("photon",)}, "photon"), + ({"system": "linux", "dist": ("rhel",)}, "rhel"), + ({"system": "linux", "dist": ("rocky",)}, "rocky"), + ({"system": "linux", "dist": ("suse",)}, "suse"), + ({"system": "linux", "dist": ("virtuozzo",)}, "virtuozzo"), + ({"system": "linux", "dist": ("ubuntu",)}, "ubuntu"), + ({"system": "linux", "dist": ("linuxmint",)}, "ubuntu"), + ({"system": "linux", "dist": ("mint",)}, "ubuntu"), + ({"system": "linux", "dist": ("redhat",)}, "rhel"), + ({"system": "linux", "dist": ("opensuse",)}, "suse"), + ({"system": "linux", "dist": ("opensuse-tumbleweed",)}, "suse"), + ({"system": "linux", "dist": ("opensuse-leap",)}, "suse"), + ({"system": "linux", "dist": ("sles",)}, "suse"), + ({"system": "linux", "dist": ("sle_hpc",)}, "suse"), + ({"system": "linux", "dist": ("my_distro",)}, "linux"), + ({"system": "Windows", "dist": ("dontcare",)}, "windows"), + ({"system": "Darwin", "dist": ("dontcare",)}, "darwin"), + ({"system": "Freebsd", "dist": ("dontcare",)}, "freebsd"), + ({"system": "Netbsd", "dist": ("dontcare",)}, "netbsd"), + ({"system": "Openbsd", "dist": ("dontcare",)}, "openbsd"), + ({"system": "Dragonfly", "dist": ("dontcare",)}, "dragonfly"), ], ) def test_get_variant(self, info, expected_variant): @@ -978,41 +977,42 @@ class TestGetVariant: class TestJsonDumps(CiTestCase): def test_is_str(self): """json_dumps should return a string.""" - self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) + self.assertTrue(isinstance(util.json_dumps({"abc": "123"}), str)) def test_utf8(self): - smiley = '\\ud83d\\ude03' + smiley = "\\ud83d\\ude03" self.assertEqual( - {'smiley': smiley}, json.loads(util.json_dumps({'smiley': smiley})) + {"smiley": smiley}, json.loads(util.json_dumps({"smiley": smiley})) ) def test_non_utf8(self): - blob = b'\xba\x03Qx-#y\xea' + blob = b"\xba\x03Qx-#y\xea" self.assertEqual( - {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, - json.loads(util.json_dumps({'blob': blob})), + {"blob": "ci-b64:" + base64.b64encode(blob).decode("utf-8")}, + json.loads(util.json_dumps({"blob": blob})), ) -@mock.patch('os.path.exists') +@mock.patch("os.path.exists") class TestIsLXD(CiTestCase): def test_is_lxd_true_on_sock_device(self, m_exists): """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" m_exists.return_value = True self.assertTrue(util.is_lxd()) - m_exists.assert_called_once_with('/dev/lxd/sock') + m_exists.assert_called_once_with("/dev/lxd/sock") def test_is_lxd_false_when_sock_device_absent(self, m_exists): """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" m_exists.return_value = False self.assertFalse(util.is_lxd()) - m_exists.assert_called_once_with('/dev/lxd/sock') + m_exists.assert_called_once_with("/dev/lxd/sock") class TestReadCcFromCmdline: if hasattr(pytest, "param"): random_string = pytest.param( - CiTestCase.random_string(), None, id="random_string") + CiTestCase.random_string(), None, id="random_string" + ) else: random_string = (CiTestCase.random_string(), None) @@ -1022,55 +1022,51 @@ class TestReadCcFromCmdline: # Return None if cmdline has no cc:<YAML>end_cc content. random_string, # Return None if YAML content is empty string. - ('foo cc: end_cc bar', None), + ("foo cc: end_cc bar", None), # Return expected dictionary without trailing end_cc marker. - ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}), + ("foo cc: ssh_pwauth: true", {"ssh_pwauth": True}), # Return expected dictionary w escaped newline and no end_cc. - ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}), + ("foo cc: ssh_pwauth: true\\n", {"ssh_pwauth": True}), # Return expected dictionary of yaml between cc: and end_cc. - ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}), + ("foo cc: ssh_pwauth: true end_cc bar", {"ssh_pwauth": True}), # Return dict with list value w escaped newline, no end_cc. ( - 'cc: ssh_import_id: [smoser, kirkland]\\n', - {'ssh_import_id': ['smoser', 'kirkland']}, + "cc: ssh_import_id: [smoser, kirkland]\\n", + {"ssh_import_id": ["smoser", "kirkland"]}, ), # Parse urlencoded brackets in yaml content. ( - 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc', - {'ssh_import_id': ['smoser', 'kirkland']}, + "cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc", + {"ssh_import_id": ["smoser", "kirkland"]}, ), # Parse complete urlencoded yaml content. ( - 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc', - {'ssh_import_id': ['user1', 'user2']}, + "cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc", + {"ssh_import_id": ["user1", "user2"]}, ), # Parse nested dictionary in yaml content. ( - 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc', - {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}, + "cc: ntp: {enabled: true, ntp_client: myclient} end_cc", + {"ntp": {"enabled": True, "ntp_client": "myclient"}}, ), # Parse single mapping value in yaml content. - ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}), + ("cc: ssh_import_id: smoser end_cc", {"ssh_import_id": "smoser"}), # Parse multiline content with multiple mapping and nested lists. ( - ( - 'cc: ssh_import_id: [smoser, bob]\\n' - 'runcmd: [ [ ls, -l ], echo hi ] end_cc' - ), + "cc: ssh_import_id: [smoser, bob]\\n" + "runcmd: [ [ ls, -l ], echo hi ] end_cc", { - 'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi'], + "ssh_import_id": ["smoser", "bob"], + "runcmd": [["ls", "-l"], "echo hi"], }, ), # Parse multiline encoded content w/ mappings and nested lists. ( - ( - 'cc: ssh_import_id: %5Bsmoser, bob%5D\\n' - 'runcmd: [ [ ls, -l ], echo hi ] end_cc' - ), + "cc: ssh_import_id: %5Bsmoser, bob%5D\\n" + "runcmd: [ [ ls, -l ], echo hi ] end_cc", { - 'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi'], + "ssh_import_id": ["smoser", "bob"], + "runcmd": [["ls", "-l"], "echo hi"], }, ), # test encoded escaped newlines work. @@ -1079,17 +1075,13 @@ class TestReadCcFromCmdline: # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]' ( ( - 'cc: ' - + ( - 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn' - 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' - '%20echo%20hi%20%5D' - ) - + ' end_cc' + "cc: " + "ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn" + "runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C" + "%20echo%20hi%20%5D" + " end_cc" ), { - 'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi'], + "ssh_import_id": ["smoser", "bob"], + "runcmd": [["ls", "-l"], "echo hi"], }, ), # test encoded newlines work. @@ -1098,34 +1090,26 @@ class TestReadCcFromCmdline: # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]' ( ( - "cc: " - + ( - 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A' - 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' - '%20echo%20hi%20%5D' - ) - + ' end_cc' + "cc: " + "ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A" + "runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C" + "%20echo%20hi%20%5D" + " end_cc" ), { - 'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi'], + "ssh_import_id": ["smoser", "bob"], + "runcmd": [["ls", "-l"], "echo hi"], }, ), # Parse and merge multiple yaml content sections. ( - ( - 'cc:ssh_import_id: [smoser, bob] end_cc ' - 'cc: runcmd: [ [ ls, -l ] ] end_cc' - ), - {'ssh_import_id': ['smoser', 'bob'], 'runcmd': [['ls', '-l']]}, + "cc:ssh_import_id: [smoser, bob] end_cc " + "cc: runcmd: [ [ ls, -l ] ] end_cc", + {"ssh_import_id": ["smoser", "bob"], "runcmd": [["ls", "-l"]]}, ), # Parse and merge multiple encoded yaml content sections. ( - ( - 'cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc ' - 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc' - ), - {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}, + "cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc " + "cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc", + {"ssh_import_id": ["smoser"], "runcmd": [["ls", "-l"]]}, ), ], ) @@ -1189,7 +1173,7 @@ class TestMountCb: ) callback = mock.Mock(autospec=True) - util.mount_cb('/dev/fake0', callback, mtype=mtype) + util.mount_cb("/dev/fake0", callback, mtype=mtype) assert ( mock.call( [ @@ -1473,7 +1457,7 @@ class TestWriteFile(helpers.TestCase): path = os.path.join(self.tmp, "NewFile.txt") contents = "Hey there" - open(path, 'w').close() + open(path, "w").close() os.chmod(path, 0o666) util.write_file(path, contents, preserve_mode=True) @@ -1508,7 +1492,7 @@ class TestWriteFile(helpers.TestCase): fake_se = FakeSelinux(my_file) with mock.patch.object( - importer, 'import_module', return_value=fake_se + importer, "import_module", return_value=fake_se ) as mockobj: with util.SeLinuxGuard(my_file) as is_on: self.assertTrue(is_on) @@ -1516,7 +1500,7 @@ class TestWriteFile(helpers.TestCase): self.assertEqual(1, len(fake_se.restored)) self.assertEqual(my_file, fake_se.restored[0]) - mockobj.assert_called_once_with('selinux') + mockobj.assert_called_once_with("selinux") class TestDeleteDirContents(helpers.TestCase): @@ -1587,7 +1571,7 @@ class TestDeleteDirContents(helpers.TestCase): class TestKeyValStrings(helpers.TestCase): def test_keyval_str_to_dict(self): - expected = {'1': 'one', '2': 'one+one', 'ro': True} + expected = {"1": "one", "2": "one+one", "ro": True} cmdline = "1=one ro 2=one+one" self.assertEqual(expected, util.keyval_str_to_dict(cmdline)) @@ -1595,7 +1579,7 @@ class TestKeyValStrings(helpers.TestCase): class TestGetCmdline(helpers.TestCase): def test_cmdline_reads_debug_env(self): with mock.patch.dict( - "os.environ", values={'DEBUG_PROC_CMDLINE': 'abcd 123'} + "os.environ", values={"DEBUG_PROC_CMDLINE": "abcd 123"} ): ret = util.get_cmdline() self.assertEqual("abcd 123", ret) @@ -1606,13 +1590,13 @@ class TestLoadYaml(helpers.CiTestCase): with_logs = True def test_simple(self): - mydata = {'1': "one", '2': "two"} + mydata = {"1": "one", "2": "two"} self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata) def test_nonallowed_returns_default(self): - '''Any unallowed types result in returning default; log the issue.''' + """Any unallowed types result in returning default; log the issue.""" # for now, anything not in the allowed list just returns the default. - myyaml = yaml.dump({'1': "one"}) + myyaml = yaml.dump({"1": "one"}) self.assertEqual( util.load_yaml( blob=myyaml, default=self.mydefault, allowed=(str,) @@ -1620,37 +1604,37 @@ class TestLoadYaml(helpers.CiTestCase): self.mydefault, ) regex = re.compile( - r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but' - r' got dict' + r"Yaml load allows \(<(class|type) \'str\'>,\) root types, but" + r" got dict" ) self.assertTrue( regex.search(self.logs.getvalue()), - msg='Missing expected yaml load error', + msg="Missing expected yaml load error", ) def test_bogus_scan_error_returns_default(self): - '''On Yaml scan error, load_yaml returns the default and logs issue.''' + """On Yaml scan error, load_yaml returns the default and logs issue.""" badyaml = "1\n 2:" self.assertEqual( util.load_yaml(blob=badyaml, default=self.mydefault), self.mydefault, ) self.assertIn( - 'Failed loading yaml blob. Invalid format at line 2 column 3:' + "Failed loading yaml blob. Invalid format at line 2 column 3:" ' "mapping values are not allowed here', self.logs.getvalue(), ) def test_bogus_parse_error_returns_default(self): - '''On Yaml parse error, load_yaml returns default and logs issue.''' + """On Yaml parse error, load_yaml returns default and logs issue.""" badyaml = "{}}" self.assertEqual( util.load_yaml(blob=badyaml, default=self.mydefault), self.mydefault, ) self.assertIn( - 'Failed loading yaml blob. Invalid format at line 1 column 3:' - " \"expected \'<document start>\', but found \'}\'", + "Failed loading yaml blob. Invalid format at line 1 column 3:" + " \"expected '<document start>', but found '}'", self.logs.getvalue(), ) @@ -1670,7 +1654,7 @@ class TestLoadYaml(helpers.CiTestCase): def test_python_unicode(self): # complex type of python/unicode is explicitly allowed - myobj = {'1': "FOOBAR"} + myobj = {"1": "FOOBAR"} safe_yaml = yaml.dump(myobj) self.assertEqual( util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj @@ -1694,144 +1678,144 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): ) elements = line.split() for i in range(len(elements) + 1): - lines = [' '.join(elements[0:i])] + lines = [" ".join(elements[0:i])] if i < 10: expected = None else: - expected = ('/dev/mapper/vg0-root', 'ext4', '/') - self.assertEqual(expected, util.parse_mount_info('/', lines)) + expected = ("/dev/mapper/vg0-root", "ext4", "/") + self.assertEqual(expected, util.parse_mount_info("/", lines)) def test_precise_ext4_root(self): - lines = helpers.readResource('mountinfo_precise_ext4.txt').splitlines() + lines = helpers.readResource("mountinfo_precise_ext4.txt").splitlines() - expected = ('/dev/mapper/vg0-root', 'ext4', '/') - self.assertEqual(expected, util.parse_mount_info('/', lines)) - self.assertEqual(expected, util.parse_mount_info('/usr', lines)) - self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines)) + expected = ("/dev/mapper/vg0-root", "ext4", "/") + self.assertEqual(expected, util.parse_mount_info("/", lines)) + self.assertEqual(expected, util.parse_mount_info("/usr", lines)) + self.assertEqual(expected, util.parse_mount_info("/usr/bin", lines)) - expected = ('/dev/md0', 'ext4', '/boot') - self.assertEqual(expected, util.parse_mount_info('/boot', lines)) - self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines)) + expected = ("/dev/md0", "ext4", "/boot") + self.assertEqual(expected, util.parse_mount_info("/boot", lines)) + self.assertEqual(expected, util.parse_mount_info("/boot/grub", lines)) - expected = ('/dev/mapper/vg0-root', 'ext4', '/') - self.assertEqual(expected, util.parse_mount_info('/home', lines)) - self.assertEqual(expected, util.parse_mount_info('/home/me', lines)) + expected = ("/dev/mapper/vg0-root", "ext4", "/") + self.assertEqual(expected, util.parse_mount_info("/home", lines)) + self.assertEqual(expected, util.parse_mount_info("/home/me", lines)) - expected = ('tmpfs', 'tmpfs', '/run') - self.assertEqual(expected, util.parse_mount_info('/run', lines)) + expected = ("tmpfs", "tmpfs", "/run") + self.assertEqual(expected, util.parse_mount_info("/run", lines)) - expected = ('none', 'tmpfs', '/run/lock') - self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) + expected = ("none", "tmpfs", "/run/lock") + self.assertEqual(expected, util.parse_mount_info("/run/lock", lines)) def test_raring_btrfs_root(self): - lines = helpers.readResource('mountinfo_raring_btrfs.txt').splitlines() + lines = helpers.readResource("mountinfo_raring_btrfs.txt").splitlines() - expected = ('/dev/vda1', 'btrfs', '/') - self.assertEqual(expected, util.parse_mount_info('/', lines)) - self.assertEqual(expected, util.parse_mount_info('/usr', lines)) - self.assertEqual(expected, util.parse_mount_info('/usr/bin', lines)) - self.assertEqual(expected, util.parse_mount_info('/boot', lines)) - self.assertEqual(expected, util.parse_mount_info('/boot/grub', lines)) + expected = ("/dev/vda1", "btrfs", "/") + self.assertEqual(expected, util.parse_mount_info("/", lines)) + self.assertEqual(expected, util.parse_mount_info("/usr", lines)) + self.assertEqual(expected, util.parse_mount_info("/usr/bin", lines)) + self.assertEqual(expected, util.parse_mount_info("/boot", lines)) + self.assertEqual(expected, util.parse_mount_info("/boot/grub", lines)) - expected = ('/dev/vda1', 'btrfs', '/home') - self.assertEqual(expected, util.parse_mount_info('/home', lines)) - self.assertEqual(expected, util.parse_mount_info('/home/me', lines)) + expected = ("/dev/vda1", "btrfs", "/home") + self.assertEqual(expected, util.parse_mount_info("/home", lines)) + self.assertEqual(expected, util.parse_mount_info("/home/me", lines)) - expected = ('tmpfs', 'tmpfs', '/run') - self.assertEqual(expected, util.parse_mount_info('/run', lines)) + expected = ("tmpfs", "tmpfs", "/run") + self.assertEqual(expected, util.parse_mount_info("/run", lines)) - expected = ('none', 'tmpfs', '/run/lock') - self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) + expected = ("none", "tmpfs", "/run/lock") + self.assertEqual(expected, util.parse_mount_info("/run/lock", lines)) - @mock.patch('cloudinit.util.os') - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.util.os") + @mock.patch("cloudinit.subp.subp") def test_get_device_info_from_zpool(self, zpool_output, m_os): # mock /dev/zfs exists m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), - '', + helpers.readResource("zpool_status_simple.txt"), + "", ) # save function return values and do asserts - ret = util.get_device_info_from_zpool('vmzroot') - self.assertEqual('gpt/system', ret) + ret = util.get_device_info_from_zpool("vmzroot") + self.assertEqual("gpt/system", ret) self.assertIsNotNone(ret) - m_os.path.exists.assert_called_with('/dev/zfs') + m_os.path.exists.assert_called_with("/dev/zfs") - @mock.patch('cloudinit.util.os') + @mock.patch("cloudinit.util.os") def test_get_device_info_from_zpool_no_dev_zfs(self, m_os): # mock /dev/zfs missing m_os.path.exists.return_value = False # save function return values and do asserts - ret = util.get_device_info_from_zpool('vmzroot') + ret = util.get_device_info_from_zpool("vmzroot") self.assertIsNone(ret) - @mock.patch('cloudinit.util.os') - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.util.os") + @mock.patch("cloudinit.subp.subp") def test_get_device_info_from_zpool_handles_no_zpool(self, m_sub, m_os): """Handle case where there is no zpool command""" # mock /dev/zfs exists m_os.path.exists.return_value = True m_sub.side_effect = subp.ProcessExecutionError("No zpool cmd") - ret = util.get_device_info_from_zpool('vmzroot') + ret = util.get_device_info_from_zpool("vmzroot") self.assertIsNone(ret) - @mock.patch('cloudinit.util.os') - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.util.os") + @mock.patch("cloudinit.subp.subp") def test_get_device_info_from_zpool_on_error(self, zpool_output, m_os): # mock /dev/zfs exists m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), - 'error', + helpers.readResource("zpool_status_simple.txt"), + "error", ) # save function return values and do asserts - ret = util.get_device_info_from_zpool('vmzroot') + ret = util.get_device_info_from_zpool("vmzroot") self.assertIsNone(ret) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_parse_mount_with_ext(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_ext.txt'), - '', + helpers.readResource("mount_parse_ext.txt"), + "", ) # this one is valid and exists in mount_parse_ext.txt - ret = util.parse_mount('/var') - self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret) + ret = util.parse_mount("/var") + self.assertEqual(("/dev/mapper/vg00-lv_var", "ext4", "/var"), ret) # another one that is valid and exists - ret = util.parse_mount('/') - self.assertEqual(('/dev/mapper/vg00-lv_root', 'ext4', '/'), ret) + ret = util.parse_mount("/") + self.assertEqual(("/dev/mapper/vg00-lv_root", "ext4", "/"), ret) # this one exists in mount_parse_ext.txt - ret = util.parse_mount('/sys/kernel/debug') + ret = util.parse_mount("/sys/kernel/debug") self.assertIsNone(ret) # this one does not even exist in mount_parse_ext.txt - ret = util.parse_mount('/not/existing/mount') + ret = util.parse_mount("/not/existing/mount") self.assertIsNone(ret) - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_parse_mount_with_zfs(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_zfs.txt'), - '', + helpers.readResource("mount_parse_zfs.txt"), + "", ) # this one is valid and exists in mount_parse_zfs.txt - ret = util.parse_mount('/var') - self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret) + ret = util.parse_mount("/var") + self.assertEqual(("vmzroot/ROOT/freebsd/var", "zfs", "/var"), ret) # this one is the root, valid and also exists in mount_parse_zfs.txt - ret = util.parse_mount('/') - self.assertEqual(('vmzroot/ROOT/freebsd', 'zfs', '/'), ret) + ret = util.parse_mount("/") + self.assertEqual(("vmzroot/ROOT/freebsd", "zfs", "/"), ret) # this one does not even exist in mount_parse_ext.txt - ret = util.parse_mount('/not/existing/mount') + ret = util.parse_mount("/not/existing/mount") self.assertIsNone(ret) class TestIsX86(helpers.CiTestCase): def test_is_x86_matches_x86_types(self): """is_x86 returns True if CPU architecture matches.""" - matched_arches = ['x86_64', 'i386', 'i586', 'i686'] + matched_arches = ["x86_64", "i386", "i586", "i686"] for arch in matched_arches: self.assertTrue( util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch @@ -1839,16 +1823,16 @@ class TestIsX86(helpers.CiTestCase): def test_is_x86_unmatched_types(self): """is_x86 returns Fale on non-intel x86 architectures.""" - unmatched_arches = ['ia64', '9000/800', 'arm64v71'] + unmatched_arches = ["ia64", "9000/800", "arm64v71"] for arch in unmatched_arches: self.assertFalse( util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch ) - @mock.patch('cloudinit.util.os.uname') + @mock.patch("cloudinit.util.os.uname") def test_is_x86_calls_uname_for_architecture(self, m_uname): """is_x86 returns True if platform from uname matches.""" - m_uname.return_value = [0, 1, 2, 3, 'x86_64'] + m_uname.return_value = [0, 1, 2, 3, "x86_64"] self.assertTrue(util.is_x86()) @@ -1861,18 +1845,18 @@ class TestGetConfigLogfiles(helpers.CiTestCase): def test_default_log_file_present(self): """When default_log_file is set get_config_logfiles finds it.""" self.assertEqual( - ['/my.log'], util.get_config_logfiles({'def_log_file': '/my.log'}) + ["/my.log"], util.get_config_logfiles({"def_log_file": "/my.log"}) ) def test_output_logs_parsed_when_teeing_files(self): """When output configuration is parsed when teeing files.""" self.assertEqual( - ['/himom.log', '/my.log'], + ["/himom.log", "/my.log"], sorted( util.get_config_logfiles( { - 'def_log_file': '/my.log', - 'output': {'all': '|tee -a /himom.log'}, + "def_log_file": "/my.log", + "output": {"all": "|tee -a /himom.log"}, } ) ), @@ -1881,12 +1865,12 @@ class TestGetConfigLogfiles(helpers.CiTestCase): def test_output_logs_parsed_when_redirecting(self): """When output configuration is parsed when redirecting to a file.""" self.assertEqual( - ['/my.log', '/test.log'], + ["/my.log", "/test.log"], sorted( util.get_config_logfiles( { - 'def_log_file': '/my.log', - 'output': {'all': '>/test.log'}, + "def_log_file": "/my.log", + "output": {"all": ">/test.log"}, } ) ), @@ -1895,12 +1879,12 @@ class TestGetConfigLogfiles(helpers.CiTestCase): def test_output_logs_parsed_when_appending(self): """When output configuration is parsed when appending to a file.""" self.assertEqual( - ['/my.log', '/test.log'], + ["/my.log", "/test.log"], sorted( util.get_config_logfiles( { - 'def_log_file': '/my.log', - 'output': {'all': '>> /test.log'}, + "def_log_file": "/my.log", + "output": {"all": ">> /test.log"}, } ) ), @@ -1909,8 +1893,8 @@ class TestGetConfigLogfiles(helpers.CiTestCase): class TestMultiLog(helpers.FilesystemMockingTestCase): def _createConsole(self, root): - os.mkdir(os.path.join(root, 'dev')) - open(os.path.join(root, 'dev', 'console'), 'a').close() + os.mkdir(os.path.join(root, "dev")) + open(os.path.join(root, "dev", "console"), "a").close() def setUp(self): super(TestMultiLog, self).setUp() @@ -1924,37 +1908,37 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): self.patchStdoutAndStderr(self.stdout, self.stderr) def test_stderr_used_by_default(self): - logged_string = 'test stderr output' + logged_string = "test stderr output" util.multi_log(logged_string) self.assertEqual(logged_string, self.stderr.getvalue()) def test_stderr_not_used_if_false(self): - util.multi_log('should not see this', stderr=False) - self.assertEqual('', self.stderr.getvalue()) + util.multi_log("should not see this", stderr=False) + self.assertEqual("", self.stderr.getvalue()) def test_logs_go_to_console_by_default(self): self._createConsole(self.root) - logged_string = 'something very important' + logged_string = "something very important" util.multi_log(logged_string) - self.assertEqual(logged_string, open('/dev/console').read()) + self.assertEqual(logged_string, open("/dev/console").read()) def test_logs_dont_go_to_stdout_if_console_exists(self): self._createConsole(self.root) - util.multi_log('something') - self.assertEqual('', self.stdout.getvalue()) + util.multi_log("something") + self.assertEqual("", self.stdout.getvalue()) def test_logs_go_to_stdout_if_console_does_not_exist(self): - logged_string = 'something very important' + logged_string = "something very important" util.multi_log(logged_string) self.assertEqual(logged_string, self.stdout.getvalue()) def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self): - util.multi_log('something', fallback_to_stdout=False) - self.assertEqual('', self.stdout.getvalue()) + util.multi_log("something", fallback_to_stdout=False) + self.assertEqual("", self.stdout.getvalue()) def test_logs_go_to_log_if_given(self): log = mock.MagicMock() - logged_string = 'something very important' + logged_string = "something very important" util.multi_log(logged_string, log=log) self.assertEqual( [((mock.ANY, logged_string), {})], log.log.call_args_list @@ -1962,26 +1946,26 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): def test_newlines_stripped_from_log_call(self): log = mock.MagicMock() - expected_string = 'something very important' - util.multi_log('{0}\n'.format(expected_string), log=log) + expected_string = "something very important" + util.multi_log("{0}\n".format(expected_string), log=log) self.assertEqual((mock.ANY, expected_string), log.log.call_args[0]) def test_log_level_defaults_to_debug(self): log = mock.MagicMock() - util.multi_log('message', log=log) + util.multi_log("message", log=log) self.assertEqual((logging.DEBUG, mock.ANY), log.log.call_args[0]) def test_given_log_level_used(self): log = mock.MagicMock() log_level = mock.Mock() - util.multi_log('message', log=log, log_level=log_level) + util.multi_log("message", log=log, log_level=log_level) self.assertEqual((log_level, mock.ANY), log.log.call_args[0]) class TestMessageFromString(helpers.TestCase): def test_unicode_not_messed_up(self): - roundtripped = util.message_from_string('\n').as_string() - self.assertNotIn('\x00', roundtripped) + roundtripped = util.message_from_string("\n").as_string() + self.assertNotIn("\x00", roundtripped) class TestReadSeeded(helpers.TestCase): @@ -1995,12 +1979,12 @@ class TestReadSeeded(helpers.TestCase): vd = b"vendordatablob" helpers.populate_dir( self.tmp, - {'meta-data': "key1: val1", 'user-data': ud, 'vendor-data': vd}, + {"meta-data": "key1: val1", "user-data": ud, "vendor-data": vd}, ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) - self.assertEqual(found_md, {'key1': 'val1'}) + self.assertEqual(found_md, {"key1": "val1"}) self.assertEqual(found_ud, ud) self.assertEqual(found_vd, vd) @@ -2015,12 +1999,12 @@ class TestReadSeededWithoutVendorData(helpers.TestCase): ud = b"userdatablob" vd = None helpers.populate_dir( - self.tmp, {'meta-data': "key1: val1", 'user-data': ud} + self.tmp, {"meta-data": "key1: val1", "user-data": ud} ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) - self.assertEqual(found_md, {'key1': 'val1'}) + self.assertEqual(found_md, {"key1": "val1"}) self.assertEqual(found_ud, ud) self.assertEqual(found_vd, vd) @@ -2029,7 +2013,7 @@ class TestEncode(helpers.TestCase): """Test the encoding functions""" def test_decode_binary_plain_text_with_hex(self): - blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd' + blob = "BOOTABLE_FLAG=\x80init=/bin/systemd" text = util.decode_binary(blob) self.assertEqual(text, blob) @@ -2037,20 +2021,20 @@ class TestEncode(helpers.TestCase): class TestProcessExecutionError(helpers.TestCase): template = ( - '{description}\n' - 'Command: {cmd}\n' - 'Exit code: {exit_code}\n' - 'Reason: {reason}\n' - 'Stdout: {stdout}\n' - 'Stderr: {stderr}' + "{description}\n" + "Command: {cmd}\n" + "Exit code: {exit_code}\n" + "Reason: {reason}\n" + "Stdout: {stdout}\n" + "Stderr: {stderr}" ) - empty_attr = '-' - empty_description = 'Unexpected error while running command.' + empty_attr = "-" + empty_description = "Unexpected error while running command." def test_pexec_error_indent_text(self): error = subp.ProcessExecutionError() - msg = 'abc\ndef' - formatted = 'abc\n{0}def'.format(' ' * 4) + msg = "abc\ndef" + formatted = "abc\n{0}def".format(" " * 4) self.assertEqual(error._indent_text(msg, indent_level=4), formatted) self.assertEqual( error._indent_text(msg.encode(), indent_level=4), @@ -2085,9 +2069,9 @@ class TestProcessExecutionError(helpers.TestCase): ) def test_pexec_error_single_line_msgs(self): - stdout_msg = 'out out' - stderr_msg = 'error error' - cmd = 'test command' + stdout_msg = "out out" + stderr_msg = "error error" + cmd = "test command" exit_code = 3 error = subp.ProcessExecutionError( stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd @@ -2106,25 +2090,25 @@ class TestProcessExecutionError(helpers.TestCase): def test_pexec_error_multi_line_msgs(self): # make sure bytes is converted handled properly when formatting - stdout_msg = 'multi\nline\noutput message'.encode() - stderr_msg = 'multi\nline\nerror message\n\n\n' + stdout_msg = "multi\nline\noutput message".encode() + stderr_msg = "multi\nline\nerror message\n\n\n" error = subp.ProcessExecutionError( stdout=stdout_msg, stderr=stderr_msg ) self.assertEqual( str(error), - '\n'.join( + "\n".join( ( - '{description}', - 'Command: {empty_attr}', - 'Exit code: {empty_attr}', - 'Reason: {empty_attr}', - 'Stdout: multi', - ' line', - ' output message', - 'Stderr: multi', - ' line', - ' error message', + "{description}", + "Command: {empty_attr}", + "Exit code: {empty_attr}", + "Reason: {empty_attr}", + "Stdout: multi", + " line", + " output message", + "Stderr: multi", + " line", + " error message", ) ).format( description=self.empty_description, empty_attr=self.empty_attr @@ -2135,31 +2119,31 @@ class TestProcessExecutionError(helpers.TestCase): class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): def test_id_in_os_release_quoted(self): """os-release containing ID="ubuntu-core" is snappy.""" - orcontent = '\n'.join(['ID="ubuntu-core"', '']) + orcontent = "\n".join(['ID="ubuntu-core"', ""]) root_d = self.tmp_dir() - helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + helpers.populate_dir(root_d, {"etc/os-release": orcontent}) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) def test_id_in_os_release(self): """os-release containing ID=ubuntu-core is snappy.""" - orcontent = '\n'.join(['ID=ubuntu-core', '']) + orcontent = "\n".join(["ID=ubuntu-core", ""]) root_d = self.tmp_dir() - helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + helpers.populate_dir(root_d, {"etc/os-release": orcontent}) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.util.get_cmdline") def test_bad_content_in_os_release_no_effect(self, m_cmdline): """malformed os-release should not raise exception.""" - m_cmdline.return_value = 'root=/dev/sda' - orcontent = '\n'.join(['IDubuntu-core', '']) + m_cmdline.return_value = "root=/dev/sda" + orcontent = "\n".join(["IDubuntu-core", ""]) root_d = self.tmp_dir() - helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + helpers.populate_dir(root_d, {"etc/os-release": orcontent}) self.reRoot() self.assertFalse(util.system_is_snappy()) - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.util.get_cmdline") def test_snap_core_in_cmdline_is_snappy(self, m_cmdline): """The string snap_core= in kernel cmdline indicates snappy.""" cmdline = ( @@ -2172,31 +2156,31 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): self.assertTrue(util.system_is_snappy()) self.assertTrue(m_cmdline.call_count > 0) - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.util.get_cmdline") def test_nothing_found_is_not_snappy(self, m_cmdline): """If no positive identification, then not snappy.""" - m_cmdline.return_value = 'root=/dev/sda' + m_cmdline.return_value = "root=/dev/sda" self.reRoot() self.assertFalse(util.system_is_snappy()) self.assertTrue(m_cmdline.call_count > 0) - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.util.get_cmdline") def test_channel_ini_with_snappy_is_snappy(self, m_cmdline): """A Channel.ini file with 'ubuntu-core' indicates snappy.""" - m_cmdline.return_value = 'root=/dev/sda' + m_cmdline.return_value = "root=/dev/sda" root_d = self.tmp_dir() - content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""]) - helpers.populate_dir(root_d, {'etc/system-image/channel.ini': content}) + content = "\n".join(["[Foo]", "source = 'ubuntu-core'", ""]) + helpers.populate_dir(root_d, {"etc/system-image/channel.ini": content}) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) - @mock.patch('cloudinit.util.get_cmdline') + @mock.patch("cloudinit.util.get_cmdline") def test_system_image_config_dir_is_snappy(self, m_cmdline): """Existence of /etc/system-image/config.d indicates snappy.""" - m_cmdline.return_value = 'root=/dev/sda' + m_cmdline.return_value = "root=/dev/sda" root_d = self.tmp_dir() helpers.populate_dir( - root_d, {'etc/system-image/config.d/my.file': "_unused"} + root_d, {"etc/system-image/config.d/my.file": "_unused"} ) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) @@ -2206,16 +2190,16 @@ class TestLoadShellContent(helpers.TestCase): def test_comments_handled_correctly(self): """Shell comments should be allowed in the content.""" self.assertEqual( - {'key1': 'val1', 'key2': 'val2', 'key3': 'val3 #tricky'}, + {"key1": "val1", "key2": "val2", "key3": "val3 #tricky"}, util.load_shell_content( - '\n'.join( + "\n".join( [ "#top of file comment", "key1=val1 #this is a comment", "# second comment", 'key2="val2" # inlin comment#badkey=wark', 'key3="val3 #tricky"', - '', + "", ] ) ), @@ -2225,15 +2209,15 @@ class TestLoadShellContent(helpers.TestCase): class TestGetProcEnv(helpers.TestCase): """test get_proc_env.""" - null = b'\x00' - simple1 = b'HOME=/' - simple2 = b'PATH=/bin:/sbin' - bootflag = b'BOOTABLE_FLAG=\x80' # from LP: #1775371 - mixed = b'MIXED=' + b'ab\xccde' + null = b"\x00" + simple1 = b"HOME=/" + simple2 = b"PATH=/bin:/sbin" + bootflag = b"BOOTABLE_FLAG=\x80" # from LP: #1775371 + mixed = b"MIXED=" + b"ab\xccde" - def _val_decoded(self, blob, encoding='utf-8', errors='replace'): + def _val_decoded(self, blob, encoding="utf-8", errors="replace"): # return the value portion of key=val decoded. - return blob.split(b'=', 1)[1].decode(encoding, errors) + return blob.split(b"=", 1)[1].decode(encoding, errors) @mock.patch("cloudinit.util.load_file") def test_non_utf8_in_environment(self, m_load_file): @@ -2245,10 +2229,10 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual( { - 'BOOTABLE_FLAG': self._val_decoded(self.bootflag), - 'HOME': '/', - 'PATH': '/bin:/sbin', - 'MIXED': self._val_decoded(self.mixed), + "BOOTABLE_FLAG": self._val_decoded(self.bootflag), + "HOME": "/", + "PATH": "/bin:/sbin", + "MIXED": self._val_decoded(self.mixed), }, util.get_proc_env(1), ) @@ -2262,7 +2246,7 @@ class TestGetProcEnv(helpers.TestCase): m_load_file.return_value = content self.assertEqual( - dict([t.split(b'=') for t in lines]), + dict([t.split(b"=") for t in lines]), util.get_proc_env(1, encoding=None), ) self.assertEqual(1, m_load_file.call_count) @@ -2273,7 +2257,7 @@ class TestGetProcEnv(helpers.TestCase): content = self.null.join((self.simple1, self.simple2)) m_load_file.return_value = content self.assertEqual( - {'HOME': '/', 'PATH': '/bin:/sbin'}, util.get_proc_env(1) + {"HOME": "/", "PATH": "/bin:/sbin"}, util.get_proc_env(1) ) self.assertEqual(1, m_load_file.call_count) @@ -2296,13 +2280,13 @@ class TestKernelVersion: """test kernel version function""" params = [ - ('5.6.19-300.fc32.x86_64', (5, 6)), - ('4.15.0-101-generic', (4, 15)), - ('3.10.0-1062.12.1.vz7.131.10', (3, 10)), - ('4.18.0-144.el8.x86_64', (4, 18)), + ("5.6.19-300.fc32.x86_64", (5, 6)), + ("4.15.0-101-generic", (4, 15)), + ("3.10.0-1062.12.1.vz7.131.10", (3, 10)), + ("4.18.0-144.el8.x86_64", (4, 18)), ] - @mock.patch('os.uname') + @mock.patch("os.uname") @pytest.mark.parametrize("uname_release,expected", params) def test_kernel_version(self, m_uname, uname_release, expected): m_uname.return_value.release = uname_release @@ -2310,11 +2294,11 @@ class TestKernelVersion: class TestFindDevs: - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_find_devs_with(self, m_subp): m_subp.return_value = ( '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"', - '', + "", ) devlist = util.find_devs_with() assert devlist == [ @@ -2326,32 +2310,32 @@ class TestFindDevs: '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"' ] - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_find_devs_with_openbsd(self, m_subp): - m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') + m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "") devlist = util.find_devs_with_openbsd() - assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] + assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"] - @mock.patch('cloudinit.subp.subp') + @mock.patch("cloudinit.subp.subp") def test_find_devs_with_openbsd_with_criteria(self, m_subp): - m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') + m_subp.return_value = ("cd0:,sd0:630d98d32b5d3759,sd1:,fd0:", "") devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660") - assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] + assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"] # lp: #1841466 devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL") - assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] + assert devlist == ["/dev/cd0a", "/dev/sd1a", "/dev/sd1i"] @pytest.mark.parametrize( - 'criteria,expected_devlist', + "criteria,expected_devlist", ( - (None, ['/dev/msdosfs/EFISYS', '/dev/iso9660/config-2']), - ('TYPE=iso9660', ['/dev/iso9660/config-2']), - ('TYPE=vfat', ['/dev/msdosfs/EFISYS']), - ('LABEL_FATBOOT=A_LABEL', []), # lp: #1841466 + (None, ["/dev/msdosfs/EFISYS", "/dev/iso9660/config-2"]), + ("TYPE=iso9660", ["/dev/iso9660/config-2"]), + ("TYPE=vfat", ["/dev/msdosfs/EFISYS"]), + ("LABEL_FATBOOT=A_LABEL", []), # lp: #1841466 ), ) - @mock.patch('glob.glob') + @mock.patch("glob.glob") def test_find_devs_with_freebsd(self, m_glob, criteria, expected_devlist): def fake_glob(pattern): msdos = ["/dev/msdosfs/EFISYS"] @@ -2368,14 +2352,14 @@ class TestFindDevs: assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', + "criteria,expected_devlist", ( - (None, ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']), - ('TYPE=iso9660', ['/dev/cd0']), - ('TYPE=vfat', ["/dev/ld0", "/dev/dk0", "/dev/dk1"]), + (None, ["/dev/ld0", "/dev/dk0", "/dev/dk1", "/dev/cd0"]), + ("TYPE=iso9660", ["/dev/cd0"]), + ("TYPE=vfat", ["/dev/ld0", "/dev/dk0", "/dev/dk1"]), ( - 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0'], + "LABEL_FATBOOT=A_LABEL", # lp: #1841466 + ["/dev/ld0", "/dev/dk0", "/dev/dk1", "/dev/cd0"], ), ), ) @@ -2384,39 +2368,31 @@ class TestFindDevs: side_effect_values = [ ("ld0 dk0 dk1 cd0", ""), ( - ( - "mscdlabel: CDIOREADTOCHEADER: " - "Inappropriate ioctl for device\n" - "track (ctl=4) at sector 0\n" - "disklabel not written\n" - ), + "mscdlabel: CDIOREADTOCHEADER: " + "Inappropriate ioctl for device\n" + "track (ctl=4) at sector 0\n" + "disklabel not written\n", "", ), ( - ( - "mscdlabel: CDIOREADTOCHEADER: " - "Inappropriate ioctl for device\n" - "track (ctl=4) at sector 0\n" - "disklabel not written\n" - ), + "mscdlabel: CDIOREADTOCHEADER: " + "Inappropriate ioctl for device\n" + "track (ctl=4) at sector 0\n" + "disklabel not written\n", "", ), ( - ( - "mscdlabel: CDIOREADTOCHEADER: " - "Inappropriate ioctl for device\n" - "track (ctl=4) at sector 0\n" - "disklabel not written\n" - ), + "mscdlabel: CDIOREADTOCHEADER: " + "Inappropriate ioctl for device\n" + "track (ctl=4) at sector 0\n" + "disklabel not written\n", "", ), ( - ( - "track (ctl=4) at sector 0\n" - 'ISO filesystem, label "config-2", ' - "creation time: 2020/03/31 17:29\n" - "adding as 'a'\n" - ), + "track (ctl=4) at sector 0\n" + 'ISO filesystem, label "config-2", ' + "creation time: 2020/03/31 17:29\n" + "adding as 'a'\n", "", ), ] @@ -2425,14 +2401,14 @@ class TestFindDevs: assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', + "criteria,expected_devlist", ( - (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), - ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']), - ('TYPE=vfat', ['/dev/vbd0']), + (None, ["/dev/vbd0", "/dev/cd0", "/dev/acd0"]), + ("TYPE=iso9660", ["/dev/cd0", "/dev/acd0"]), + ("TYPE=vfat", ["/dev/vbd0"]), ( - 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/vbd0', '/dev/cd0', '/dev/acd0'], + "LABEL_FATBOOT=A_LABEL", # lp: #1841466 + ["/dev/vbd0", "/dev/cd0", "/dev/acd0"], ), ), ) @@ -2440,7 +2416,7 @@ class TestFindDevs: def test_find_devs_with_dragonflybsd( self, m_subp, criteria, expected_devlist ): - m_subp.return_value = ('md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '') + m_subp.return_value = ("md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0", "") devlist = util.find_devs_with_dragonflybsd(criteria=criteria) assert devlist == expected_devlist diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py index ed66b09f..8ac8aea6 100644 --- a/tests/unittests/test_version.py +++ b/tests/unittests/test_version.py @@ -2,21 +2,22 @@ from unittest import mock -from tests.unittests.helpers import CiTestCase from cloudinit import version +from tests.unittests.helpers import CiTestCase class TestExportsFeatures(CiTestCase): def test_has_network_config_v1(self): - self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) + self.assertIn("NETWORK_CONFIG_V1", version.FEATURES) def test_has_network_config_v2(self): - self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) + self.assertIn("NETWORK_CONFIG_V2", version.FEATURES) class TestVersionString(CiTestCase): - @mock.patch("cloudinit.version._PACKAGED_VERSION", - "17.2-3-gb05b9972-0ubuntu1") + @mock.patch( + "cloudinit.version._PACKAGED_VERSION", "17.2-3-gb05b9972-0ubuntu1" + ) def test_package_version_respected(self): """If _PACKAGED_VERSION is filled in, then it should be returned.""" self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) diff --git a/tests/unittests/util.py b/tests/unittests/util.py index 2204c28f..79a6e1d0 100644 --- a/tests/unittests/util.py +++ b/tests/unittests/util.py @@ -27,15 +27,17 @@ def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None): def abstract_to_concrete(abclass): """Takes an abstract class and returns a concrete version of it.""" + class concreteCls(abclass): pass + concreteCls.__abstractmethods__ = frozenset() - return type('DummyConcrete' + abclass.__name__, (concreteCls,), {}) + return type("DummyConcrete" + abclass.__name__, (concreteCls,), {}) class DataSourceTesting(DataSourceNone): def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): - return 'hostname' + return "hostname" def persist_instance_data(self): return True @@ -46,7 +48,7 @@ class DataSourceTesting(DataSourceNone): @property def cloud_name(self): - return 'testing' + return "testing" class MockDistro(distros.Distro): @@ -68,7 +70,7 @@ class MockDistro(distros.Distro): return True def get_primary_arch(self): - return 'i386' + return "i386" def get_package_mirror_info(self, arch=None, data_source=None): pass @@ -110,7 +112,7 @@ class MockDistro(distros.Distro): pass def add_snap_user(self, name, **kwargs): - return 'snap_user' + return "snap_user" def create_user(self, name, **kwargs): return True @@ -124,7 +126,7 @@ class MockDistro(distros.Distro): def set_passwd(self, user, passwd, hashed=False): return True - def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'): + def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"): pass def write_sudo_rules(self, user, rules, sudo_file=None): |