summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/cloud_tests/releases.yaml16
-rw-r--r--tests/cloud_tests/testcases/examples/including_user_groups.yaml4
-rw-r--r--tests/cloud_tests/testcases/modules/user_groups.yaml4
-rw-r--r--tests/data/merge_sources/expected10.yaml2
-rw-r--r--tests/data/merge_sources/expected7.yaml6
-rw-r--r--tests/data/merge_sources/source10-1.yaml2
-rw-r--r--tests/data/merge_sources/source7-1.yaml4
-rw-r--r--tests/data/merge_sources/source7-2.yaml2
-rw-r--r--tests/data/old_pickles/focal-20.1-10-g71af48df-0ubuntu5.pklbin0 -> 7135 bytes
-rw-r--r--tests/data/old_pickles/focal-20.3-2-g371b392c-0ubuntu1~20.04.1.pklbin0 -> 7215 bytes
-rw-r--r--tests/integration_tests/bugs/test_lp1886531.py27
-rw-r--r--tests/integration_tests/bugs/test_lp1897099.py31
-rw-r--r--tests/integration_tests/bugs/test_lp1900837.py28
-rw-r--r--tests/integration_tests/clouds.py215
-rw-r--r--tests/integration_tests/conftest.py182
-rw-r--r--tests/integration_tests/instances.py154
-rw-r--r--tests/integration_tests/integration_settings.py96
-rw-r--r--tests/integration_tests/modules/test_apt_configure_sources_list.py51
-rw-r--r--tests/integration_tests/modules/test_ntp_servers.py58
-rw-r--r--tests/integration_tests/modules/test_package_update_upgrade_install.py74
-rw-r--r--tests/integration_tests/modules/test_runcmd.py25
-rw-r--r--tests/integration_tests/modules/test_seed_random_data.py28
-rw-r--r--tests/integration_tests/modules/test_set_hostname.py47
-rw-r--r--tests/integration_tests/modules/test_set_password.py151
-rw-r--r--tests/integration_tests/modules/test_snap.py29
-rw-r--r--tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py48
-rw-r--r--tests/integration_tests/modules/test_ssh_generate.py51
-rw-r--r--tests/integration_tests/modules/test_ssh_import_id.py29
-rw-r--r--tests/integration_tests/modules/test_ssh_keys_provided.py148
-rw-r--r--tests/integration_tests/modules/test_timezone.py25
-rw-r--r--tests/integration_tests/modules/test_users_groups.py83
-rw-r--r--tests/integration_tests/modules/test_write_files.py66
-rw-r--r--tests/unittests/test_cli.py2
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py6
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py21
-rw-r--r--tests/unittests/test_datasource/test_azure.py1076
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py804
-rw-r--r--tests/unittests/test_datasource/test_hetzner.py20
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py3
-rw-r--r--tests/unittests/test_datasource/test_openstack.py42
-rw-r--r--tests/unittests/test_datasource/test_ovf.py16
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py8
-rw-r--r--tests/unittests/test_distros/test_gentoo.py26
-rw-r--r--tests/unittests/test_distros/test_netconfig.py83
-rw-r--r--tests/unittests/test_distros/test_resolv.py6
-rw-r--r--tests/unittests/test_ds_identify.py65
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py2
-rw-r--r--tests/unittests/test_handler/test_handler_power_state.py58
-rw-r--r--tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py109
-rw-r--r--tests/unittests/test_handler/test_handler_resizefs.py55
-rw-r--r--tests/unittests/test_handler/test_handler_resizefs_vyos.py55
-rw-r--r--tests/unittests/test_handler/test_schema.py109
-rw-r--r--tests/unittests/test_net.py285
-rw-r--r--tests/unittests/test_reporting_hyperv.py60
-rw-r--r--tests/unittests/test_sshutil.py6
-rw-r--r--tests/unittests/test_util.py147
56 files changed, 4167 insertions, 583 deletions
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index e76a3d35..6249efc5 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -133,6 +133,22 @@ features:
releases:
# UBUNTU =================================================================
+ hirsute:
+ # EOL: Jan 2022
+ default:
+ enabled: true
+ release: hirsute
+ version: "21.04"
+ os: ubuntu
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: hirsute
+ setup_overrides: null
+ override_templates: false
groovy:
# EOL: Jul 2021
default:
diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.yaml b/tests/cloud_tests/testcases/examples/including_user_groups.yaml
index 77528d98..86e392dd 100644
--- a/tests/cloud_tests/testcases/examples/including_user_groups.yaml
+++ b/tests/cloud_tests/testcases/examples/including_user_groups.yaml
@@ -18,7 +18,7 @@ cloud_config: |
gecos: Foo B. Bar
primary_group: foobar
groups: users
- expiredate: 2038-01-19
+ expiredate: '2038-01-19'
lock_passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- name: barfoo
@@ -28,7 +28,7 @@ cloud_config: |
lock_passwd: true
- name: cloudy
gecos: Magic Cloud App Daemon User
- inactive: true
+ inactive: '5'
system: true
collect_scripts:
group_ubuntu: |
diff --git a/tests/cloud_tests/testcases/modules/user_groups.yaml b/tests/cloud_tests/testcases/modules/user_groups.yaml
index 675dfb8c..91b0e281 100644
--- a/tests/cloud_tests/testcases/modules/user_groups.yaml
+++ b/tests/cloud_tests/testcases/modules/user_groups.yaml
@@ -17,7 +17,7 @@ cloud_config: |
gecos: Foo B. Bar
primary_group: foobar
groups: users
- expiredate: 2038-01-19
+ expiredate: '2038-01-19'
lock_passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- name: barfoo
@@ -27,7 +27,7 @@ cloud_config: |
lock_passwd: true
- name: cloudy
gecos: Magic Cloud App Daemon User
- inactive: true
+ inactive: '5'
system: true
collect_scripts:
group_ubuntu: |
diff --git a/tests/data/merge_sources/expected10.yaml b/tests/data/merge_sources/expected10.yaml
index b865db16..e9f88f7b 100644
--- a/tests/data/merge_sources/expected10.yaml
+++ b/tests/data/merge_sources/expected10.yaml
@@ -1,7 +1,7 @@
#cloud-config
power_state:
- delay: 30
+ delay: '+30'
mode: poweroff
message: [Bye, Bye, Pew, Pew]
diff --git a/tests/data/merge_sources/expected7.yaml b/tests/data/merge_sources/expected7.yaml
index d32988e8..8186d13a 100644
--- a/tests/data/merge_sources/expected7.yaml
+++ b/tests/data/merge_sources/expected7.yaml
@@ -7,7 +7,7 @@ users:
primary_group: foobar
groups: users
selinux_user: staff_u
- expiredate: 2012-09-01
+ expiredate: '2012-09-01'
ssh_import_id: foobar
lock-passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
@@ -22,7 +22,7 @@ users:
- <ssh pub key 2>
- name: cloudy
gecos: Magic Cloud App Daemon User
- inactive: true
+ inactive: '5'
system: true
- bob
- joe
@@ -32,7 +32,7 @@ users:
primary_group: foobar
groups: users
selinux_user: staff_u
- expiredate: 2012-09-01
+ expiredate: '2012-09-01'
ssh_import_id: foobar
lock-passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
diff --git a/tests/data/merge_sources/source10-1.yaml b/tests/data/merge_sources/source10-1.yaml
index 6ae72a13..36fd336d 100644
--- a/tests/data/merge_sources/source10-1.yaml
+++ b/tests/data/merge_sources/source10-1.yaml
@@ -1,6 +1,6 @@
#cloud-config
power_state:
- delay: 30
+ delay: '+30'
mode: poweroff
message: [Bye, Bye]
diff --git a/tests/data/merge_sources/source7-1.yaml b/tests/data/merge_sources/source7-1.yaml
index 6405fc9b..ec93079f 100644
--- a/tests/data/merge_sources/source7-1.yaml
+++ b/tests/data/merge_sources/source7-1.yaml
@@ -7,7 +7,7 @@ users:
primary_group: foobar
groups: users
selinux_user: staff_u
- expiredate: 2012-09-01
+ expiredate: '2012-09-01'
ssh_import_id: foobar
lock-passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
@@ -22,6 +22,6 @@ users:
- <ssh pub key 2>
- name: cloudy
gecos: Magic Cloud App Daemon User
- inactive: true
+ inactive: '5'
system: true
diff --git a/tests/data/merge_sources/source7-2.yaml b/tests/data/merge_sources/source7-2.yaml
index 0cd28978..0c02abff 100644
--- a/tests/data/merge_sources/source7-2.yaml
+++ b/tests/data/merge_sources/source7-2.yaml
@@ -9,7 +9,7 @@ users:
primary_group: foobar
groups: users
selinux_user: staff_u
- expiredate: 2012-09-01
+ expiredate: '2012-09-01'
ssh_import_id: foobar
lock-passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
diff --git a/tests/data/old_pickles/focal-20.1-10-g71af48df-0ubuntu5.pkl b/tests/data/old_pickles/focal-20.1-10-g71af48df-0ubuntu5.pkl
new file mode 100644
index 00000000..358813b4
--- /dev/null
+++ b/tests/data/old_pickles/focal-20.1-10-g71af48df-0ubuntu5.pkl
Binary files differ
diff --git a/tests/data/old_pickles/focal-20.3-2-g371b392c-0ubuntu1~20.04.1.pkl b/tests/data/old_pickles/focal-20.3-2-g371b392c-0ubuntu1~20.04.1.pkl
new file mode 100644
index 00000000..e26f98d8
--- /dev/null
+++ b/tests/data/old_pickles/focal-20.3-2-g371b392c-0ubuntu1~20.04.1.pkl
Binary files differ
diff --git a/tests/integration_tests/bugs/test_lp1886531.py b/tests/integration_tests/bugs/test_lp1886531.py
new file mode 100644
index 00000000..058ea8bb
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1886531.py
@@ -0,0 +1,27 @@
+"""Integration test for LP: #1886531
+
+This test replicates the failure condition (absent /etc/fstab) on all releases
+by removing it in a bootcmd; this runs well before the part of cloud-init which
+causes the failure.
+
+The only required assertion is that cloud-init does not emit a WARNING to the
+log: this indicates that the fstab parsing code has not failed.
+
+https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1886531
+"""
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+bootcmd:
+- rm -f /etc/fstab
+"""
+
+
+class TestLp1886531:
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_lp1886531(self, client):
+ log_content = client.read_from_file("/var/log/cloud-init.log")
+ assert "WARNING" not in log_content
diff --git a/tests/integration_tests/bugs/test_lp1897099.py b/tests/integration_tests/bugs/test_lp1897099.py
new file mode 100644
index 00000000..27c8927f
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1897099.py
@@ -0,0 +1,31 @@
+""" Integration test for LP #187099
+
+Ensure that if fallocate fails during mkswap that we fall back to using dd
+
+https://bugs.launchpad.net/cloud-init/+bug/1897099
+"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+bootcmd:
+ - echo 'whoops' > /usr/bin/fallocate
+swap:
+ filename: /swap.img
+ size: 10000000
+ maxsize: 10000000
+"""
+
+
+@pytest.mark.sru_2020_11
+@pytest.mark.user_data(USER_DATA)
+@pytest.mark.no_container('Containers cannot configure swap')
+def test_fallocate_fallback(client):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert '/swap.img' in client.execute('cat /proc/swaps')
+ assert '/swap.img' in client.execute('cat /etc/fstab')
+ assert 'fallocate swap creation failed, will attempt with dd' in log
+ assert "Running command ['dd', 'if=/dev/zero', 'of=/swap.img'" in log
+ assert 'SUCCESS: config-mounts ran successfully' in log
diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py
new file mode 100644
index 00000000..3fe7d0d0
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1900837.py
@@ -0,0 +1,28 @@
+"""Integration test for LP: #1900836.
+
+This test mirrors the reproducing steps from the reported bug: it changes the
+permissions on cloud-init.log to 600 and confirms that they remain 600 after a
+reboot.
+"""
+import pytest
+
+
+def _get_log_perms(client):
+ return client.execute("stat -c %a /var/log/cloud-init.log")
+
+
+@pytest.mark.sru_2020_11
+class TestLogPermissionsNotResetOnReboot:
+ def test_permissions_unchanged(self, client):
+ # Confirm that the current permissions aren't 600
+ assert "644" == _get_log_perms(client)
+
+ # Set permissions to 600 and confirm our assertion passes pre-reboot
+ client.execute("chmod 600 /var/log/cloud-init.log")
+ assert "600" == _get_log_perms(client)
+
+ # Reboot
+ client.instance.restart()
+
+ # Check that permissions are not reset on reboot
+ assert "600" == _get_log_perms(client)
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
new file mode 100644
index 00000000..88ac4408
--- /dev/null
+++ b/tests/integration_tests/clouds.py
@@ -0,0 +1,215 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from abc import ABC, abstractmethod
+import logging
+
+from pycloudlib import EC2, GCE, Azure, OCI, LXDContainer, LXDVirtualMachine
+from pycloudlib.lxd.instance import LXDInstance
+
+import cloudinit
+from cloudinit.subp import subp
+from tests.integration_tests import integration_settings
+from tests.integration_tests.instances import (
+ IntegrationEc2Instance,
+ IntegrationGceInstance,
+ IntegrationAzureInstance, IntegrationInstance,
+ IntegrationOciInstance,
+ IntegrationLxdInstance,
+)
+
+try:
+ from typing import Optional
+except ImportError:
+ pass
+
+
+log = logging.getLogger('integration_testing')
+
+
+class IntegrationCloud(ABC):
+ datasource = None # type: Optional[str]
+ integration_instance_cls = IntegrationInstance
+
+ def __init__(self, settings=integration_settings):
+ self.settings = settings
+ self.cloud_instance = self._get_cloud_instance()
+ self.image_id = self._get_initial_image()
+
+ def emit_settings_to_log(self) -> None:
+ log.info(
+ "\n".join(
+ ["Settings:"]
+ + [
+ "{}={}".format(key, getattr(self.settings, key))
+ for key in sorted(self.settings.current_settings)
+ ]
+ )
+ )
+
+ @abstractmethod
+ def _get_cloud_instance(self):
+ raise NotImplementedError
+
+ def _get_initial_image(self):
+ image_id = self.settings.OS_IMAGE
+ try:
+ image_id = self.cloud_instance.released_image(
+ self.settings.OS_IMAGE)
+ except (ValueError, IndexError):
+ pass
+ return image_id
+
+ def _perform_launch(self, launch_kwargs):
+ pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
+ pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
+ return pycloudlib_instance
+
+ def launch(self, user_data=None, launch_kwargs=None,
+ settings=integration_settings):
+ if self.settings.EXISTING_INSTANCE_ID:
+ log.info(
+ 'Not launching instance due to EXISTING_INSTANCE_ID. '
+ 'Instance id: %s', self.settings.EXISTING_INSTANCE_ID)
+ self.instance = self.cloud_instance.get_instance(
+ self.settings.EXISTING_INSTANCE_ID
+ )
+ return
+ kwargs = {
+ 'image_id': self.image_id,
+ 'user_data': user_data,
+ 'wait': False,
+ }
+ if launch_kwargs:
+ kwargs.update(launch_kwargs)
+ log.info(
+ "Launching instance with launch_kwargs:\n{}".format(
+ "\n".join("{}={}".format(*item) for item in kwargs.items())
+ )
+ )
+
+ pycloudlib_instance = self._perform_launch(kwargs)
+
+ log.info('Launched instance: %s', pycloudlib_instance)
+ return self.get_instance(pycloudlib_instance, settings)
+
+ def get_instance(self, cloud_instance, settings=integration_settings):
+ return self.integration_instance_cls(self, cloud_instance, settings)
+
+ def destroy(self):
+ pass
+
+ def snapshot(self, instance):
+ return self.cloud_instance.snapshot(instance, clean=True)
+
+
+class Ec2Cloud(IntegrationCloud):
+ datasource = 'ec2'
+ integration_instance_cls = IntegrationEc2Instance
+
+ def _get_cloud_instance(self):
+ return EC2(tag='ec2-integration-test')
+
+
+class GceCloud(IntegrationCloud):
+ datasource = 'gce'
+ integration_instance_cls = IntegrationGceInstance
+
+ def _get_cloud_instance(self):
+ return GCE(
+ tag='gce-integration-test',
+ project=self.settings.GCE_PROJECT,
+ region=self.settings.GCE_REGION,
+ zone=self.settings.GCE_ZONE,
+ )
+
+
+class AzureCloud(IntegrationCloud):
+ datasource = 'azure'
+ integration_instance_cls = IntegrationAzureInstance
+
+ def _get_cloud_instance(self):
+ return Azure(tag='azure-integration-test')
+
+ def destroy(self):
+ self.cloud_instance.delete_resource_group()
+
+
+class OciCloud(IntegrationCloud):
+ datasource = 'oci'
+ integration_instance_cls = IntegrationOciInstance
+
+ def _get_cloud_instance(self):
+ return OCI(
+ tag='oci-integration-test',
+ compartment_id=self.settings.OCI_COMPARTMENT_ID
+ )
+
+
+class _LxdIntegrationCloud(IntegrationCloud):
+ integration_instance_cls = IntegrationLxdInstance
+
+ def _get_cloud_instance(self):
+ return self.pycloudlib_instance_cls(tag=self.instance_tag)
+
+ @staticmethod
+ def _get_or_set_profile_list(release):
+ return None
+
+ @staticmethod
+ def _mount_source(instance: LXDInstance):
+ target_path = '/usr/lib/python3/dist-packages/cloudinit'
+ format_variables = {
+ 'name': instance.name,
+ 'source_path': cloudinit.__path__[0],
+ 'container_path': target_path,
+ }
+ log.info(
+ 'Mounting source {source_path} directly onto LXD container/vm '
+ 'named {name} at {container_path}'.format(**format_variables))
+ command = (
+ 'lxc config device add {name} host-cloud-init disk '
+ 'source={source_path} '
+ 'path={container_path}'
+ ).format(**format_variables)
+ subp(command.split())
+
+ def _perform_launch(self, launch_kwargs):
+ launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None)
+ launch_kwargs.pop('wait')
+ release = launch_kwargs.pop('image_id')
+
+ try:
+ profile_list = launch_kwargs['profile_list']
+ except KeyError:
+ profile_list = self._get_or_set_profile_list(release)
+
+ pycloudlib_instance = self.cloud_instance.init(
+ launch_kwargs.pop('name', None),
+ release,
+ profile_list=profile_list,
+ **launch_kwargs
+ )
+ if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
+ self._mount_source(pycloudlib_instance)
+ pycloudlib_instance.start(wait=False)
+ pycloudlib_instance.wait(raise_on_cloudinit_failure=False)
+ return pycloudlib_instance
+
+
+class LxdContainerCloud(_LxdIntegrationCloud):
+ datasource = 'lxd_container'
+ pycloudlib_instance_cls = LXDContainer
+ instance_tag = 'lxd-container-integration-test'
+
+
+class LxdVmCloud(_LxdIntegrationCloud):
+ datasource = 'lxd_vm'
+ pycloudlib_instance_cls = LXDVirtualMachine
+ instance_tag = 'lxd-vm-integration-test'
+ _profile_list = None
+
+ def _get_or_set_profile_list(self, release):
+ if self._profile_list:
+ return self._profile_list
+ self._profile_list = self.cloud_instance.build_necessary_profiles(
+ release)
+ return self._profile_list
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
new file mode 100644
index 00000000..73b44bfc
--- /dev/null
+++ b/tests/integration_tests/conftest.py
@@ -0,0 +1,182 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+import pytest
+import sys
+from contextlib import contextmanager
+
+from tests.integration_tests import integration_settings
+from tests.integration_tests.clouds import (
+ Ec2Cloud,
+ GceCloud,
+ AzureCloud,
+ OciCloud,
+ LxdContainerCloud,
+ LxdVmCloud,
+)
+
+
+log = logging.getLogger('integration_testing')
+log.addHandler(logging.StreamHandler(sys.stdout))
+log.setLevel(logging.INFO)
+
+platforms = {
+ 'ec2': Ec2Cloud,
+ 'gce': GceCloud,
+ 'azure': AzureCloud,
+ 'oci': OciCloud,
+ 'lxd_container': LxdContainerCloud,
+ 'lxd_vm': LxdVmCloud,
+}
+
+
+def pytest_runtest_setup(item):
+ """Skip tests on unsupported clouds.
+
+ A test can take any number of marks to specify the platforms it can
+ run on. If a platform(s) is specified and we're not running on that
+ platform, then skip the test. If platform specific marks are not
+ specified, then we assume the test can be run anywhere.
+ """
+ all_platforms = platforms.keys()
+ test_marks = [mark.name for mark in item.iter_markers()]
+ supported_platforms = set(all_platforms).intersection(test_marks)
+ current_platform = integration_settings.PLATFORM
+ unsupported_message = 'Cannot run on platform {}'.format(current_platform)
+ if 'no_container' in test_marks:
+ if 'lxd_container' in test_marks:
+ raise Exception(
+ 'lxd_container and no_container marks simultaneously set '
+ 'on test'
+ )
+ if current_platform == 'lxd_container':
+ pytest.skip(unsupported_message)
+ if supported_platforms and current_platform not in supported_platforms:
+ pytest.skip(unsupported_message)
+
+
+# disable_subp_usage is defined at a higher level, but we don't
+# want it applied here
+@pytest.fixture()
+def disable_subp_usage(request):
+ pass
+
+
+@pytest.yield_fixture(scope='session')
+def session_cloud():
+ if integration_settings.PLATFORM not in platforms.keys():
+ raise ValueError(
+ "{} is an invalid PLATFORM specified in settings. "
+ "Must be one of {}".format(
+ integration_settings.PLATFORM, list(platforms.keys())
+ )
+ )
+
+ cloud = platforms[integration_settings.PLATFORM]()
+ cloud.emit_settings_to_log()
+ yield cloud
+ cloud.destroy()
+
+
+@pytest.fixture(scope='session', autouse=True)
+def setup_image(session_cloud):
+ """Setup the target environment with the correct version of cloud-init.
+
+ So we can launch instances / run tests with the correct image
+ """
+ client = None
+ log.info('Setting up environment for %s', session_cloud.datasource)
+ if integration_settings.CLOUD_INIT_SOURCE == 'NONE':
+ pass # that was easy
+ elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
+ if session_cloud.datasource not in ['lxd_container', 'lxd_vm']:
+ raise ValueError(
+ 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD')
+ # The mount needs to happen after the instance is created, so
+ # no further action needed here
+ elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED':
+ client = session_cloud.launch()
+ client.install_proposed_image()
+ elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'):
+ client = session_cloud.launch()
+ client.install_ppa(integration_settings.CLOUD_INIT_SOURCE)
+ elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)):
+ client = session_cloud.launch()
+ client.install_deb()
+ else:
+ raise ValueError(
+ 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(
+ integration_settings.CLOUD_INIT_SOURCE))
+ if client:
+ # Even if we're keeping instances, we don't want to keep this
+ # one around as it was just for image creation
+ client.destroy()
+ log.info('Done with environment setup')
+
+
+@contextmanager
+def _client(request, fixture_utils, session_cloud):
+ """Fixture implementation for the client fixtures.
+
+ Launch the dynamic IntegrationClient instance using any provided
+ userdata, yield to the test, then cleanup
+ """
+ user_data = fixture_utils.closest_marker_first_arg_or(
+ request, 'user_data', None)
+ name = fixture_utils.closest_marker_first_arg_or(
+ request, 'instance_name', None
+ )
+ launch_kwargs = {}
+ if name is not None:
+ launch_kwargs = {"name": name}
+ with session_cloud.launch(
+ user_data=user_data, launch_kwargs=launch_kwargs
+ ) as instance:
+ yield instance
+
+
+@pytest.yield_fixture
+def client(request, fixture_utils, session_cloud):
+ """Provide a client that runs for every test."""
+ with _client(request, fixture_utils, session_cloud) as client:
+ yield client
+
+
+@pytest.yield_fixture(scope='module')
+def module_client(request, fixture_utils, session_cloud):
+ """Provide a client that runs once per module."""
+ with _client(request, fixture_utils, session_cloud) as client:
+ yield client
+
+
+@pytest.yield_fixture(scope='class')
+def class_client(request, fixture_utils, session_cloud):
+ """Provide a client that runs once per class."""
+ with _client(request, fixture_utils, session_cloud) as client:
+ yield client
+
+
+def pytest_assertrepr_compare(op, left, right):
+ """Custom integration test assertion explanations.
+
+ See
+ https://docs.pytest.org/en/stable/assert.html#defining-your-own-explanation-for-failed-assertions
+ for pytest's documentation.
+ """
+ if op == "not in" and isinstance(left, str) and isinstance(right, str):
+ # This stanza emits an improved assertion message if we're testing for
+ # the presence of a string within a cloud-init log: it will report only
+ # the specific lines containing the string (instead of the full log,
+ # the default behaviour).
+ potential_log_lines = right.splitlines()
+ first_line = potential_log_lines[0]
+ if "DEBUG" in first_line and "Cloud-init" in first_line:
+ # We are looking at a cloud-init log, so just pick out the relevant
+ # lines
+ found_lines = [
+ line for line in potential_log_lines if left in line
+ ]
+ return [
+ '"{}" not in cloud-init.log string; unexpectedly found on'
+ " these lines:".format(left)
+ ] + found_lines
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
new file mode 100644
index 00000000..9b13288c
--- /dev/null
+++ b/tests/integration_tests/instances.py
@@ -0,0 +1,154 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+import uuid
+from tempfile import NamedTemporaryFile
+
+from pycloudlib.instance import BaseInstance
+from pycloudlib.result import Result
+
+from tests.integration_tests import integration_settings
+
+try:
+ from typing import TYPE_CHECKING
+ if TYPE_CHECKING:
+ from tests.integration_tests.clouds import IntegrationCloud
+except ImportError:
+ pass
+
+
+log = logging.getLogger('integration_testing')
+
+
+def _get_tmp_path():
+ tmp_filename = str(uuid.uuid4())
+ return '/var/tmp/{}.tmp'.format(tmp_filename)
+
+
+class IntegrationInstance:
+ use_sudo = True
+
+ def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance,
+ settings=integration_settings):
+ self.cloud = cloud
+ self.instance = instance
+ self.settings = settings
+
+ def destroy(self):
+ self.instance.delete()
+
+ def execute(self, command, *, use_sudo=None) -> Result:
+ if self.instance.username == 'root' and use_sudo is False:
+ raise Exception('Root user cannot run unprivileged')
+ if use_sudo is None:
+ use_sudo = self.use_sudo
+ return self.instance.execute(command, use_sudo=use_sudo)
+
+ def pull_file(self, remote_path, local_path):
+ # First copy to a temporary directory because of permissions issues
+ tmp_path = _get_tmp_path()
+ self.instance.execute('cp {} {}'.format(remote_path, tmp_path))
+ self.instance.pull_file(tmp_path, local_path)
+
+ def push_file(self, local_path, remote_path):
+ # First push to a temporary directory because of permissions issues
+ tmp_path = _get_tmp_path()
+ self.instance.push_file(local_path, tmp_path)
+ self.execute('mv {} {}'.format(tmp_path, remote_path))
+
+ def read_from_file(self, remote_path) -> str:
+ result = self.execute('cat {}'.format(remote_path))
+ if result.failed:
+ # TODO: Raise here whatever pycloudlib raises when it has
+ # a consistent error response
+ raise IOError(
+ 'Failed reading remote file via cat: {}\n'
+ 'Return code: {}\n'
+ 'Stderr: {}\n'
+ 'Stdout: {}'.format(
+ remote_path, result.return_code,
+ result.stderr, result.stdout)
+ )
+ return result.stdout
+
+ def write_to_file(self, remote_path, contents: str):
+ # Writes file locally and then pushes it rather
+ # than writing the file directly on the instance
+ with NamedTemporaryFile('w', delete=False) as tmp_file:
+ tmp_file.write(contents)
+
+ try:
+ self.push_file(tmp_file.name, remote_path)
+ finally:
+ os.unlink(tmp_file.name)
+
+ def snapshot(self):
+ return self.cloud.snapshot(self.instance)
+
+ def _install_new_cloud_init(self, remote_script):
+ self.execute(remote_script)
+ version = self.execute('cloud-init -v').split()[-1]
+ log.info('Installed cloud-init version: %s', version)
+ self.instance.clean()
+ image_id = self.snapshot()
+ log.info('Created new image: %s', image_id)
+ self.cloud.image_id = image_id
+
+ def install_proposed_image(self):
+ log.info('Installing proposed image')
+ remote_script = (
+ '{sudo} echo deb "http://archive.ubuntu.com/ubuntu '
+ '$(lsb_release -sc)-proposed main" | '
+ '{sudo} tee /etc/apt/sources.list.d/proposed.list\n'
+ '{sudo} apt-get update -q\n'
+ '{sudo} apt-get install -qy cloud-init'
+ ).format(sudo='sudo' if self.use_sudo else '')
+ self._install_new_cloud_init(remote_script)
+
+ def install_ppa(self, repo):
+ log.info('Installing PPA')
+ remote_script = (
+ '{sudo} add-apt-repository {repo} -y && '
+ '{sudo} apt-get update -q && '
+ '{sudo} apt-get install -qy cloud-init'
+ ).format(sudo='sudo' if self.use_sudo else '', repo=repo)
+ self._install_new_cloud_init(remote_script)
+
+ def install_deb(self):
+ log.info('Installing deb package')
+ deb_path = integration_settings.CLOUD_INIT_SOURCE
+ deb_name = os.path.basename(deb_path)
+ remote_path = '/var/tmp/{}'.format(deb_name)
+ self.push_file(
+ local_path=integration_settings.CLOUD_INIT_SOURCE,
+ remote_path=remote_path)
+ remote_script = '{sudo} dpkg -i {path}'.format(
+ sudo='sudo' if self.use_sudo else '', path=remote_path)
+ self._install_new_cloud_init(remote_script)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if not self.settings.KEEP_INSTANCE:
+ self.destroy()
+
+
+class IntegrationEc2Instance(IntegrationInstance):
+ pass
+
+
+class IntegrationGceInstance(IntegrationInstance):
+ pass
+
+
+class IntegrationAzureInstance(IntegrationInstance):
+ pass
+
+
+class IntegrationOciInstance(IntegrationInstance):
+ pass
+
+
+class IntegrationLxdInstance(IntegrationInstance):
+ use_sudo = False
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
new file mode 100644
index 00000000..a0609f7e
--- /dev/null
+++ b/tests/integration_tests/integration_settings.py
@@ -0,0 +1,96 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import os
+
+##################################################################
+# LAUNCH SETTINGS
+##################################################################
+
+# Keep instance (mostly for debugging) when test is finished
+KEEP_INSTANCE = False
+
+# One of:
+# lxd_container
+# azure
+# ec2
+# gce
+# oci
+PLATFORM = 'lxd_container'
+
+# The cloud-specific instance type to run. E.g., a1.medium on AWS
+# If the pycloudlib instance provides a default, this can be left None
+INSTANCE_TYPE = None
+
+# Determines the base image to use or generate new images from.
+# Can be the name of the OS if running a stock image,
+# otherwise the id of the image being used if using a custom image
+OS_IMAGE = 'focal'
+
+# Populate if you want to use a pre-launched instance instead of
+# creating a new one. The exact contents will be platform dependent
+EXISTING_INSTANCE_ID = None
+
+##################################################################
+# IMAGE GENERATION SETTINGS
+##################################################################
+
+# Depending on where we are in the development / test / SRU cycle, we'll want
+# different methods of getting the source code to our SUT. Because of
+# this there are a number of different ways to initialize
+# the target environment.
+
+# Can be any of the following:
+# NONE
+# Don't modify the target environment at all. This will run
+# cloud-init with whatever code was baked into the image
+# IN_PLACE
+# LXD CONTAINER only. Mount the source code as-is directly into
+# the container to override the pre-existing cloudinit module. This
+# won't work for non-local LXD remotes and won't run any installation
+# code.
+# PROPOSED
+# Install from the Ubuntu proposed repo
+# <ppa repo>, e.g., ppa:cloud-init-dev/proposed
+# Install from a PPA. It MUST start with 'ppa:'
+# <file path>
+# A path to a valid package to be uploaded and installed
+CLOUD_INIT_SOURCE = 'NONE'
+
+##################################################################
+# GCE SPECIFIC SETTINGS
+##################################################################
+# Required for GCE
+GCE_PROJECT = None
+
+# You probably want to override these
+GCE_REGION = 'us-central1'
+GCE_ZONE = 'a'
+
+##################################################################
+# OCI SPECIFIC SETTINGS
+##################################################################
+# Compartment-id found at
+# https://console.us-phoenix-1.oraclecloud.com/a/identity/compartments
+# Required for Oracle
+OCI_COMPARTMENT_ID = None
+
+##################################################################
+# USER SETTINGS OVERRIDES
+##################################################################
+# Bring in any user-file defined settings
+try:
+ from tests.integration_tests.user_settings import * # noqa
+except ImportError:
+ pass
+
+##################################################################
+# ENVIRONMENT SETTINGS OVERRIDES
+##################################################################
+# Any of the settings in this file can be overridden with an
+# environment variable of the same name prepended with CLOUD_INIT_
+# E.g., CLOUD_INIT_PLATFORM
+# Perhaps a bit too hacky, but it works :)
+current_settings = [var for var in locals() if var.isupper()]
+for setting in current_settings:
+ globals()[setting] = os.getenv(
+ 'CLOUD_INIT_{}'.format(setting), globals()[setting]
+ )
diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py
new file mode 100644
index 00000000..d2bcc61a
--- /dev/null
+++ b/tests/integration_tests/modules/test_apt_configure_sources_list.py
@@ -0,0 +1,51 @@
+"""Integration test for the apt module's ``sources_list`` functionality.
+
+This test specifies a ``sources_list`` and then checks that (a) the expected
+number of sources.list entries is present, and (b) that each expected line
+appears in the file.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml``.)"""
+import re
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches: [default]
+ uri: http://archive.ubuntu.com/ubuntu
+ security:
+ - arches: [default]
+ uri: http://security.ubuntu.com/ubuntu
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb-src $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ deb-src $SECURITY $RELEASE-security multiverse
+"""
+
+EXPECTED_REGEXES = [
+ r"deb http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
+ r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
+ r"deb http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
+ r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
+ r"deb http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
+ r"deb-src http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
+]
+
+
+@pytest.mark.ci
+class TestAptConfigureSourcesList:
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_sources_list(self, client):
+ sources_list = client.read_from_file("/etc/apt/sources.list")
+ assert 6 == len(sources_list.rstrip().split('\n'))
+
+ for expected_re in EXPECTED_REGEXES:
+ assert re.search(expected_re, sources_list) is not None
diff --git a/tests/integration_tests/modules/test_ntp_servers.py b/tests/integration_tests/modules/test_ntp_servers.py
new file mode 100644
index 00000000..e72389c1
--- /dev/null
+++ b/tests/integration_tests/modules/test_ntp_servers.py
@@ -0,0 +1,58 @@
+"""Integration test for the ntp module's ``servers`` functionality with ntp.
+
+This test specifies the use of the `ntp` NTP client, and ensures that the given
+NTP servers are configured as expected.
+
+(This is ported from ``tests/cloud_tests/testcases/modules/ntp_servers.yaml``.)
+"""
+import re
+
+import yaml
+import pytest
+
+USER_DATA = """\
+#cloud-config
+ntp:
+ ntp_client: ntp
+ servers:
+ - 172.16.15.14
+ - 172.16.17.18
+"""
+
+EXPECTED_SERVERS = yaml.safe_load(USER_DATA)["ntp"]["servers"]
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestNtpServers:
+
+ def test_ntp_installed(self, class_client):
+ """Test that `ntpd --version` succeeds, indicating installation."""
+ result = class_client.execute("ntpd --version")
+ assert 0 == result.return_code
+
+ def test_dist_config_file_is_empty(self, class_client):
+ """Test that the distributed config file is empty.
+
+ (This test is skipped on all currently supported Ubuntu releases, so
+ may not actually be needed any longer.)
+ """
+ if class_client.execute("test -e /etc/ntp.conf.dist").failed:
+ pytest.skip("/etc/ntp.conf.dist does not exist")
+ dist_file = class_client.read_from_file("/etc/ntp.conf.dist")
+ assert 0 == len(dist_file.strip().splitlines())
+
+ def test_ntp_entries(self, class_client):
+ ntp_conf = class_client.read_from_file("/etc/ntp.conf")
+ for expected_server in EXPECTED_SERVERS:
+ assert re.search(
+ r"^server {} iburst".format(expected_server),
+ ntp_conf,
+ re.MULTILINE
+ )
+
+ def test_ntpq_servers(self, class_client):
+ result = class_client.execute("ntpq -p -w -n")
+ assert result.ok
+ for expected_server in EXPECTED_SERVERS:
+ assert expected_server in result.stdout
diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py
new file mode 100644
index 00000000..8a38ad84
--- /dev/null
+++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py
@@ -0,0 +1,74 @@
+"""Integration test for the package update upgrade install module.
+
+This test module asserts that packages are upgraded/updated during boot
+with the ``package_update_upgrade_install`` module. We are also testing
+if we can install new packages during boot too.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml``.)
+
+NOTE: the testcase for this looks for the command in history.log as
+ /usr/bin/apt-get..., which is not how it always appears. it should
+ instead look for just apt-get...
+"""
+
+import re
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+packages:
+ - sl
+ - tree
+package_update: true
+package_upgrade: true
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+class TestPackageUpdateUpgradeInstall:
+
+ def assert_package_installed(self, pkg_out, name, version=None):
+ """Check dpkg-query --show output for matching package name.
+
+ @param name: package base name
+ @param version: string representing a package version or part of a
+ version.
+ """
+ pkg_match = re.search(
+ "^%s\t(?P<version>.*)$" % name, pkg_out, re.MULTILINE)
+ if pkg_match:
+ installed_version = pkg_match.group("version")
+ if not version:
+ return # Success
+ if installed_version.startswith(version):
+ return # Success
+ raise AssertionError(
+ "Expected package version %s-%s not found. Found %s" %
+ name, version, installed_version)
+ raise AssertionError("Package not installed: %s" % name)
+
+ def test_new_packages_are_installed(self, class_client):
+ pkg_out = class_client.execute("dpkg-query --show")
+
+ self.assert_package_installed(pkg_out, "sl")
+ self.assert_package_installed(pkg_out, "tree")
+
+ def test_packages_were_updated(self, class_client):
+ out = class_client.execute(
+ "grep ^Commandline: /var/log/apt/history.log")
+ assert (
+ "Commandline: /usr/bin/apt-get --option=Dpkg::Options"
+ "::=--force-confold --option=Dpkg::options::=--force-unsafe-io "
+ "--assume-yes --quiet install sl tree") in out
+
+ def test_packages_were_upgraded(self, class_client):
+ """Test cloud-init-output for install & upgrade stuff."""
+ out = class_client.read_from_file("/var/log/cloud-init-output.log")
+ assert "Setting up tree (" in out
+ assert "Setting up sl (" in out
+ assert "Reading package lists..." in out
+ assert "Building dependency tree..." in out
+ assert "Reading state information..." in out
+ assert "Calculating upgrade..." in out
diff --git a/tests/integration_tests/modules/test_runcmd.py b/tests/integration_tests/modules/test_runcmd.py
new file mode 100644
index 00000000..50d1851e
--- /dev/null
+++ b/tests/integration_tests/modules/test_runcmd.py
@@ -0,0 +1,25 @@
+"""Integration test for the runcmd module.
+
+This test specifies a command to be executed by the ``runcmd`` module
+and then checks if that command was executed during boot.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+runcmd:
+ - echo cloud-init run cmd test > /var/tmp/run_cmd
+"""
+
+
+@pytest.mark.ci
+class TestRuncmd:
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_runcmd(self, client):
+ runcmd_output = client.read_from_file("/var/tmp/run_cmd")
+ assert runcmd_output.strip() == "cloud-init run cmd test"
diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py
new file mode 100644
index 00000000..b365fa98
--- /dev/null
+++ b/tests/integration_tests/modules/test_seed_random_data.py
@@ -0,0 +1,28 @@
+"""Integration test for the random seed module.
+
+This test specifies a command to be executed by the ``seed_random`` module, by
+providing a different data to be used as seed data. We will then check
+if that seed data was actually used.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/seed_random_data.yaml``.)"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+random_seed:
+ data: 'MYUb34023nD:LFDK10913jk;dfnk:Df'
+ encoding: raw
+ file: /root/seed
+"""
+
+
+@pytest.mark.ci
+class TestSeedRandomData:
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_seed_random_data(self, client):
+ seed_output = client.read_from_file("/root/seed")
+ assert seed_output.strip() == "MYUb34023nD:LFDK10913jk;dfnk:Df"
diff --git a/tests/integration_tests/modules/test_set_hostname.py b/tests/integration_tests/modules/test_set_hostname.py
new file mode 100644
index 00000000..2bfa403d
--- /dev/null
+++ b/tests/integration_tests/modules/test_set_hostname.py
@@ -0,0 +1,47 @@
+"""Integration test for the set_hostname module.
+
+This module specify two tests: One updates only the hostname and the other
+one updates the hostname and fqdn of the system. For both of these tests
+we will check is the changes requested by the user data are being respected
+after the system is boot.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/set_hostname.yaml`` and
+``tests/cloud_tests/testcases/modules/set_hostname_fqdn.yaml``.)"""
+
+import pytest
+
+
+USER_DATA_HOSTNAME = """\
+#cloud-config
+hostname: cloudinit2
+"""
+
+USER_DATA_FQDN = """\
+#cloud-config
+manage_etc_hosts: true
+hostname: cloudinit1
+fqdn: cloudinit2.i9n.cloud-init.io
+"""
+
+
+@pytest.mark.ci
+class TestHostname:
+
+ @pytest.mark.user_data(USER_DATA_HOSTNAME)
+ def test_hostname(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit2" in hostname_output.strip()
+
+ @pytest.mark.user_data(USER_DATA_FQDN)
+ def test_hostname_and_fqdn(self, client):
+ hostname_output = client.execute("hostname")
+ assert "cloudinit1" in hostname_output.strip()
+
+ fqdn_output = client.execute("hostname --fqdn")
+ assert "cloudinit2.i9n.cloud-init.io" in fqdn_output.strip()
+
+ host_output = client.execute("grep ^127 /etc/hosts")
+ assert '127.0.1.1 {} {}'.format(
+ fqdn_output, hostname_output) in host_output
+ assert '127.0.0.1 localhost' in host_output
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
new file mode 100644
index 00000000..b13f76fb
--- /dev/null
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -0,0 +1,151 @@
+"""Integration test for the set_password module.
+
+This test specifies a combination of user/password pairs, and ensures that the
+system has the correct passwords set.
+
+There are two tests run here: one tests chpasswd's list being a YAML list, the
+other tests chpasswd's list being a string. Both expect the same results, so
+they use a mixin to share their test definitions, because we can (of course)
+only specify one user-data per instance.
+"""
+import crypt
+
+import pytest
+import yaml
+
+
+COMMON_USER_DATA = """\
+#cloud-config
+ssh_pwauth: yes
+users:
+ - default
+ - name: tom
+ # md5 gotomgo
+ passwd: "$1$S7$tT1BEDIYrczeryDQJfdPe0"
+ lock_passwd: false
+ - name: dick
+ # md5 gocubsgo
+ passwd: "$1$ssisyfpf$YqvuJLfrrW6Cg/l53Pi1n1"
+ lock_passwd: false
+ - name: harry
+ # sha512 goharrygo
+ passwd: "$6$LF$9Z2p6rWK6TNC1DC6393ec0As.18KRAvKDbfsGJEdWN3sRQRwpdfoh37EQ3y\
+Uh69tP4GSrGW5XKHxMLiKowJgm/"
+ lock_passwd: false
+ - name: jane
+ # sha256 gojanego
+ passwd: "$5$iW$XsxmWCdpwIW8Yhv.Jn/R3uk6A4UaicfW5Xp7C9p9pg."
+ lock_passwd: false
+ - name: "mikey"
+ lock_passwd: false
+"""
+
+LIST_USER_DATA = COMMON_USER_DATA + """
+chpasswd:
+ list:
+ - tom:mypassword123!
+ - dick:RANDOM
+ - harry:RANDOM
+ - mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
+"""
+
+STRING_USER_DATA = COMMON_USER_DATA + """
+chpasswd:
+ list: |
+ tom:mypassword123!
+ dick:RANDOM
+ harry:RANDOM
+ mikey:$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
+"""
+
+USERS_DICTS = yaml.safe_load(COMMON_USER_DATA)["users"]
+USERS_PASSWD_VALUES = {
+ user_dict["name"]: user_dict["passwd"]
+ for user_dict in USERS_DICTS
+ if "name" in user_dict and "passwd" in user_dict
+}
+
+
+class Mixin:
+ """Shared test definitions."""
+
+ def _fetch_and_parse_etc_shadow(self, class_client):
+ """Fetch /etc/shadow and parse it into Python data structures
+
+ Returns: ({user: password}, [duplicate, users])
+ """
+ shadow_content = class_client.read_from_file("/etc/shadow")
+ users = {}
+ dupes = []
+ for line in shadow_content.splitlines():
+ user, encpw = line.split(":")[0:2]
+ if user in users:
+ dupes.append(user)
+ users[user] = encpw
+ return users, dupes
+
+ def test_no_duplicate_users_in_shadow(self, class_client):
+ """Confirm that set_passwords has not added duplicate shadow entries"""
+ _, dupes = self._fetch_and_parse_etc_shadow(class_client)
+
+ assert [] == dupes
+
+ def test_password_in_users_dict_set_correctly(self, class_client):
+ """Test that the password specified in the users dict is set."""
+ shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client)
+ assert USERS_PASSWD_VALUES["jane"] == shadow_users["jane"]
+
+ def test_password_in_chpasswd_list_set_correctly(self, class_client):
+ """Test that a chpasswd password overrides one in the users dict."""
+ shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client)
+ mikey_hash = "$5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89"
+ assert mikey_hash == shadow_users["mikey"]
+
+ def test_random_passwords_set_correctly(self, class_client):
+ """Test that RANDOM chpasswd entries replace users dict passwords."""
+ shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client)
+
+ # These should have been changed
+ assert shadow_users["harry"] != USERS_PASSWD_VALUES["harry"]
+ assert shadow_users["dick"] != USERS_PASSWD_VALUES["dick"]
+
+ # To random passwords
+ assert shadow_users["harry"].startswith("$")
+ assert shadow_users["dick"].startswith("$")
+
+ # Which are not the same
+ assert shadow_users["harry"] != shadow_users["dick"]
+
+ def test_explicit_password_set_correctly(self, class_client):
+ """Test that an explicitly-specified password is set correctly."""
+ shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client)
+
+ fmt_and_salt = shadow_users["tom"].rsplit("$", 1)[0]
+ expected_value = crypt.crypt("mypassword123!", fmt_and_salt)
+
+ assert expected_value == shadow_users["tom"]
+
+ def test_shadow_expected_users(self, class_client):
+ """Test that the right set of users is in /etc/shadow."""
+ shadow = class_client.read_from_file("/etc/shadow")
+ for user_dict in USERS_DICTS:
+ if "name" in user_dict:
+ assert "{}:".format(user_dict["name"]) in shadow
+
+ def test_sshd_config(self, class_client):
+ """Test that SSH password auth is enabled."""
+ sshd_config = class_client.read_from_file("/etc/ssh/sshd_config")
+ # We look for the exact line match, to avoid a commented line matching
+ assert "PasswordAuthentication yes" in sshd_config.splitlines()
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(LIST_USER_DATA)
+class TestPasswordList(Mixin):
+ """Launch an instance with LIST_USER_DATA, ensure Mixin tests pass."""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(STRING_USER_DATA)
+class TestPasswordListString(Mixin):
+ """Launch an instance with STRING_USER_DATA, ensure Mixin tests pass."""
diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py
new file mode 100644
index 00000000..b626f6b0
--- /dev/null
+++ b/tests/integration_tests/modules/test_snap.py
@@ -0,0 +1,29 @@
+"""Integration test for the snap module.
+
+This test specifies a command to be executed by the ``snap`` module
+and then checks that if that command was executed during boot.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/runcmd.yaml``.)"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+package_update: true
+snap:
+ squashfuse_in_container: true
+ commands:
+ - snap install hello-world
+"""
+
+
+@pytest.mark.ci
+class TestSnap:
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_snap(self, client):
+ snap_output = client.execute("snap list")
+ assert "core " in snap_output
+ assert "hello-world " in snap_output
diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
new file mode 100644
index 00000000..b9b0d85e
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
@@ -0,0 +1,48 @@
+"""Integration test for the ssh_authkey_fingerprints module.
+
+This modules specifies two tests regarding the ``ssh_authkey_fingerprints``
+module. The first one verifies that we can disable the module behavior while
+the second one verifies if the module is working as expected if enabled.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.yaml``,
+``tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_enable.yaml``.
+)"""
+import re
+
+import pytest
+
+
+USER_DATA_SSH_AUTHKEY_DISABLE = """\
+#cloud-config
+no_ssh_fingerprints: true
+"""
+
+USER_DATA_SSH_AUTHKEY_ENABLE="""\
+#cloud-config
+ssh_genkeytypes:
+ - ecdsa
+ - ed25519
+ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
+""" # noqa
+
+
+@pytest.mark.ci
+class TestSshAuthkeyFingerprints:
+
+ @pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_DISABLE)
+ def test_ssh_authkey_fingerprints_disable(self, client):
+ cloudinit_output = client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "Skipping module named ssh-authkey-fingerprints, "
+ "logging of SSH fingerprints disabled") in cloudinit_output
+
+ @pytest.mark.user_data(USER_DATA_SSH_AUTHKEY_ENABLE)
+ def test_ssh_authkey_fingerprints_enable(self, client):
+ syslog_output = client.read_from_file("/var/log/syslog")
+
+ assert re.search(r'256 SHA256:.*(ECDSA)', syslog_output) is not None
+ assert re.search(r'256 SHA256:.*(ED25519)', syslog_output) is not None
+ assert re.search(r'1024 SHA256:.*(DSA)', syslog_output) is None
+ assert re.search(r'2048 SHA256:.*(RSA)', syslog_output) is None
diff --git a/tests/integration_tests/modules/test_ssh_generate.py b/tests/integration_tests/modules/test_ssh_generate.py
new file mode 100644
index 00000000..60c36982
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_generate.py
@@ -0,0 +1,51 @@
+"""Integration test for the ssh module.
+
+This module has two tests to verify if we can create ssh keys
+through the ``ssh`` module. The first test asserts that some keys
+were not created while the second one verifies if the expected
+keys were created.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/ssh_keys_generate.yaml``.)"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+ssh_genkeytypes:
+ - ecdsa
+ - ed25519
+authkey_hash: sha512
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestSshKeysGenerate:
+
+ @pytest.mark.parametrize(
+ "ssh_key_path", (
+ "/etc/ssh/ssh_host_dsa_key.pub",
+ "/etc/ssh/ssh_host_dsa_key",
+ "/etc/ssh/ssh_host_rsa_key.pub",
+ "/etc/ssh/ssh_host_rsa_key",
+ )
+ )
+ def test_ssh_keys_not_generated(self, ssh_key_path, class_client):
+ out = class_client.execute(
+ "test -e {}".format(ssh_key_path)
+ )
+ assert out.failed
+
+ @pytest.mark.parametrize(
+ "ssh_key_path", (
+ "/etc/ssh/ssh_host_ecdsa_key.pub",
+ "/etc/ssh/ssh_host_ecdsa_key",
+ "/etc/ssh/ssh_host_ed25519_key.pub",
+ "/etc/ssh/ssh_host_ed25519_key",
+ )
+ )
+ def test_ssh_keys_generated(self, ssh_key_path, class_client):
+ out = class_client.read_from_file(ssh_key_path)
+ assert "" != out.strip()
diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py
new file mode 100644
index 00000000..45d37d6c
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_import_id.py
@@ -0,0 +1,29 @@
+"""Integration test for the ssh_import_id module.
+
+This test specifies ssh keys to be imported by the ``ssh_import_id`` module
+and then checks that if the ssh keys were successfully imported.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+ssh_import_id:
+ - gh:powersj
+ - lp:smoser
+"""
+
+
+@pytest.mark.ci
+class TestSshImportId:
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_ssh_import_id(self, client):
+ ssh_output = client.read_from_file(
+ "/home/ubuntu/.ssh/authorized_keys")
+
+ assert '# ssh-import-id gh:powersj' in ssh_output
+ assert '# ssh-import-id lp:smoser' in ssh_output
diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py
new file mode 100644
index 00000000..27d193c1
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_keys_provided.py
@@ -0,0 +1,148 @@
+"""Integration test for the ssh module.
+
+This test specifies keys to be provided to the system through the ``ssh``
+module and then checks that if those keys were successfully added to the
+system.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/ssh_keys_provided.yaml''``.)"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+disable_root: false
+ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXW9Gg5H7ehjdSc6qDzwNtgCy94XYHhEYlXZMO2+FJrH3wfHGiMfCwOHxcOMt2QiXItULthdeQWS9QjBSSjVRXf6731igFrqPFyS9qBlOQ5D29C4HBXFnQggGVpBNJ82IRJv7szbbe/vpgLBP4kttUza9Dr4e1YM1ln4PRnjfXea6T0m+m1ixNb5432pTXlqYOnNOxSIm1gHgMLxPuDrJvQERDKrSiKSjIdyC9Jd8t2e1tkNLY0stmckVRbhShmcJvlyofHWbc2Ca1mmtP7MlS1VQnfLkvU1IrFwkmaQmaggX6WR6coRJ6XFXdWcq/AI2K6GjSnl1dnnCxE8VCEXBlXgFzad+PMSG4yiL5j8Oo1ZVpkTdgBnw4okGqTYCXyZg6X00As9IBNQfZMFlQXlIo4FiWgj3CO5QHQOyOX6FuEumaU13GnERrSSdp9tCs1Qm3/DG2RSCQBWTfcgMcStIvKqvJ3IjFn0vGLvI3Ampnq9q1SHwmmzAPSdzcMA76HyMUA5VWaBvWHlUxzIM6unxZASnwvuCzpywSEB5J2OF+p6H+cStJwQ32XwmOG8pLp1srlVWpqZI58Du/lzrkPqONphoZx0LDV86w7RUz1ksDzAdcm0tvmNRFMN1a0frDs506oA3aWK0oDk4Nmvk8sXGTYYw3iQSkOvDUUlIsqdaO+w==
+ssh_keys:
+ rsa_private: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAtPx6PqN3iSEsnTtibyIEy52Tra8T5fn0ryXyg46Di2NBwdnj
+ o8trNv9jenfV/UhmePl58lXjT43wV8OCMl6KsYXyBdegM35NNtono4I4mLLKFMR9
+ 9TOtDn6iYcaNenVhF3ZCj9Z2nNOlTrdc0uchHqKMrxLjCRCUrL91Uf+xioTF901Y
+ RM+ZqC5lT92yAL76F4qPF+Lq1QtUfNfUIwwvOp5ccDZLPxij0YvyBzubYye9hJHu
+ yjbJv78R4JHV+L2WhzSoX3W/6WrxVzeXqFGqH894ccOaC/7tnqSP6V8lIQ6fE2+c
+ DurJcpM3CJRgkndGHjtU55Y71YkcdLksSMvezQIDAQABAoIBAQCrU4IJP8dNeaj5
+ IpkY6NQvR/jfZqfogYi+MKb1IHin/4rlDfUvPcY9pt8ttLlObjYK+OcWn3Vx/sRw
+ 4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2unRQvLZpMRdywBm
+ lq95OrCghnG03aUsFJUZPpi5ydnwbA12ma+KHkG0EzaVlhA7X9N6z0K6U+zue2gl
+ goMLt/MH0rsYawkHrwiwXaIFQeyV4MJP0vmrZLbFk1bycu9X/xPtTYotWyWo4eKA
+ cb05uu04qwexkKHDM0KXtT0JecbTo2rOefFo8Uuab6uJY+fEHNocZ+v1vLA4aOxJ
+ ovp1JuXlAoGBAOWYNgKrlTfy5n0sKsNk+1RuL2jHJZJ3HMd0EIt7/fFQN3Fi08Hu
+ jtntqD30Wj+DJK8b8Lrt66FruxyEJm5VhVmwkukrLR5ige2f6ftZnoFCmdyy+0zP
+ dnPZSUe2H5ZPHa+qthJgHLn+al2P04tGh+1fGHC2PbP+e0Co+/ZRIOxrAoGBAMnN
+ IEen9/FRsqvnDd36I8XnJGskVRTZNjylxBmbKcuMWm+gNhOI7gsCAcqzD4BYZjjW
+ pLhrt/u9p+l4MOJy6OUUdM/okg12SnJEGryysOcVBcXyrvOfklWnANG4EAH5jt1N
+ ftTb1XTxzvWVuR/WJK0B5MZNYM71cumBdUDtPi+nAoGAYmoIXMSnxb+8xNL10aOr
+ h9ljQQp8NHgSQfyiSufvRk0YNuYh1vMnEIsqnsPrG2Zfhx/25GmvoxXGssaCorDN
+ 5FAn6QK06F1ZTD5L0Y3sv4OI6G1gAuC66ZWuL6sFhyyKkQ4f1WiVZ7SCa3CHQSAO
+ i9VDaKz1bf4bXvAQcNj9v9kCgYACSOZCqW4vN0OUmqsXhkt9ZB6Pb/veno70pNPR
+ jmYsvcwQU3oJQpWfXkhy6RAV3epaXmPDCsUsfns2M3wqNC7a2R5xdCqjKGGzZX4A
+ AO3rz9se4J6Gd5oKijeCKFlWDGNHsibrdgm2pz42nZlY+O21X74dWKbt8O16I1MW
+ hxkbJQKBgAXfuen/srVkJgPuqywUYag90VWCpHsuxdn+fZJa50SyZADr+RbiDfH2
+ vek8Uo8ap8AEsv4Rfs9opUcUZevLp3g2741eOaidHVLm0l4iLIVl03otGOqvSzs+
+ A3tFPEOxauXpzCt8f8eXsz0WQXAgIKW2h8zu5QHjomioU3i27mtE
+ -----END RSA PRIVATE KEY-----
+ rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgTLnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4+XnyVeNPjfBXw4IyXoqxhfIF16Azfk022iejgjiYssoUxH31M60OfqJhxo16dWEXdkKP1nac06VOt1zS5yEeooyvEuMJEJSsv3VR/7GKhMX3TVhEz5moLmVP3bIAvvoXio8X4urVC1R819QjDC86nlxwNks/GKPRi/IHO5tjJ72Eke7KNsm/vxHgkdX4vZaHNKhfdb/pavFXN5eoUaofz3hxw5oL/u2epI/pXyUhDp8Tb5wO6slykzcIlGCSd0YeO1TnljvViRx0uSxIy97N root@xenial-lxd
+ rsa_certificate: ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpgBP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgTLnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4+XnyVeNPjfBXw4IyXoqxhfIF16Azfk022iejgjiYssoUxH31M60OfqJhxo16dWEXdkKP1nac06VOt1zS5yEeooyvEuMJEJSsv3VR/7GKhMX3TVhEz5moLmVP3bIAvvoXio8X4urVC1R819QjDC86nlxwNks/GKPRi/IHO5tjJ72Eke7KNsm/vxHgkdX4vZaHNKhfdb/pavFXN5eoUaofz3hxw5oL/u2epI/pXyUhDp8Tb5wO6slykzcIlGCSd0YeO1TnljvViRx0uSxIy97NAAAAAAAAAAAAAAACAAAACnhlbmlhbC1seGQAAAAAAAAAAF+vVEIAAAAAYY83bgAAAAAAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgz4SlDwbq53ZrRsnS6ISdwxgFDRpnEX44K8jFmLpI9NAAAABTAAAAC3NzaC1lZDI1NTE5AAAAQMWpiRWKNMFvRX0g6OQOELMqDhtNBpkIN92IyO25qiY2oDSd1NyVme6XnGDFt8CS7z5NufV04doP4aacLOBbQww= root@xenial-lxd
+ dsa_private: |
+ -----BEGIN DSA PRIVATE KEY-----
+ MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXrhOVAfzZ6+jklP
+ 55mzvC7jO53PWWC31hq10xBoWdev0WtcNF9Tv+4bAa1263y51Rqo4GI7xx+xic1d
+ mLqqfYijBT9k48J/1tV0cs1Wjs6FP/IJTD/kYVC930JjYQMi722lBnUxsQIVAL7i
+ z3fTGKTvSzvW0wQlwnYpS2QFAoGANp+KdyS9V93HgxGQEN1rlj/TSv/a3EVdCKtE
+ nQf55aPHxDAVDVw5JtRh4pZbbRV4oGRPc9KOdjo5BU28vSM3Lmhkb+UaaDXwHkgI
+ nK193o74DKjADWZxuLyyiKHiMOhxozoxDfjWxs8nz6uqvSW0pr521EwIY6RajbED
+ nZ2a3GkCgYEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pf
+ Q2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2E
+ wExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkICFA5kVUcW
+ nCPOXEQsayANi8+Cb7BH
+ -----END DSA PRIVATE KEY-----
+ dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4RZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM7nc9ZYLfWGrXTEGhZ16/Ra1w0X1O/7hsBrXbrfLnVGqjgYjvHH7GJzV2Yuqp9iKMFP2Tjwn/W1XRyzVaOzoU/8glMP+RhUL3fQmNhAyLvbaUGdTGxAAAAFQC+4s930xik70s71tMEJcJ2KUtkBQAAAIA2n4p3JL1X3ceDEZAQ3WuWP9NK/9rcRV0Iq0SdB/nlo8fEMBUNXDkm1GHillttFXigZE9z0o52OjkFTby9IzcuaGRv5RpoNfAeSAicrX3ejvgMqMANZnG4vLKIoeIw6HGjOjEN+NbGzyfPq6q9JbSmvnbUTAhjpFqNsQOdnZrcaQAAAIEAyoUomNRB6bmpsIfzt8zdtqLP5umIj2uhr9MVPL8/QdbxmJ72Z7pfQ2z1B7QAdIBGOlqJXtlau7ABhWK29Efe+99ObyTSSdDc6RCDeAwUmBAiPRQhDH2EwExw3doDSCUb28L1B50wBzQ8mC3KXp6C7IkBXWspb16DLHUHFSI8bkI= root@xenial-lxd
+ ed25519_private: |
+ -----BEGIN OPENSSH PRIVATE KEY-----
+ b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+ QyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+QAAAJgwt+lcMLfp
+ XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNTOhteXao0Nl5DVThJ2+Q
+ AAAEDQlFZpz9q8+/YJHS9+jPAqy2ZT6cGEv8HTB6RZtTjd/dudAZSu4vjZpVWzId5pXmZg
+ 1M6G15dqjQ2XkNVOEnb5AAAAD3Jvb3RAeGVuaWFsLWx4ZAECAwQFBg==
+ -----END OPENSSH PRIVATE KEY-----
+ ed25519_public: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6G15dqjQ2XkNVOEnb5 root@xenial-lxd
+ ecdsa_private: |
+ -----BEGIN EC PRIVATE KEY-----
+ MHcCAQEEIDuK+QFc1wmyJY8uDqQVa1qHte30Rk/fdLxGIBkwJAyOoAoGCCqGSM49
+ AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY5mpZqxgX4vcgb
+ 7f/CtXuM6s2svcDJqAeXr6Wk8OJJcMxylA==
+ -----END EC PRIVATE KEY-----
+ ecdsa_public: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFsS5Tvky/IC/dXhE/afxxUG6kdQOvdQJCYGZN42OZqWasYF+L3IG+3/wrV7jOrNrL3AyagHl6+lpPDiSXDMcpQ= root@xenial-lxd
+""" # noqa
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestSshKeysProvided:
+
+ def test_ssh_dsa_keys_provided(self, class_client):
+ """Test dsa public key was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key.pub")
+ assert (
+ "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R"
+ "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM") in out
+
+ """Test dsa private key was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key")
+ assert (
+ "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr"
+ "hOVAfzZ6+jklP") in out
+
+ def test_ssh_rsa_keys_provided(self, class_client):
+ """Test rsa public key was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key.pub")
+ assert (
+ "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT"
+ "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4") in out
+
+ """Test rsa private key was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key")
+ assert (
+ "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un"
+ "RQvLZpMRdywBm") in out
+
+ def test_ssh_rsa_certificate_provided(self, class_client):
+ """Test rsa certificate was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key-cert.pub")
+ assert (
+ "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg"
+ "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD") in out
+
+ def test_ssh_certificate_updated_sshd_config(self, class_client):
+ """Test ssh certificate was added to /etc/ssh/sshd_config."""
+ out = class_client.read_from_file("/etc/ssh/sshd_config").strip()
+ assert "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub" in out
+
+ def test_ssh_ecdsa_keys_provided(self, class_client):
+ """Test ecdsa public key was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key.pub")
+ assert (
+ "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB"
+ "BBFsS5Tvky/IC/dXhE/afxxU") in out
+
+ """Test ecdsa private key generated."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key")
+ assert (
+ "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY"
+ "5mpZqxgX4vcgb") in out
+
+ def test_ssh_ed25519_keys_provided(self, class_client):
+ """Test ed25519 public key was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key.pub")
+ assert (
+ "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6"
+ "G15dqjQ2XkNVOEnb5") in out
+
+ """Test ed25519 private key was imported."""
+ out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key")
+ assert (
+ "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT"
+ "OhteXao0Nl5DVThJ2+Q") in out
diff --git a/tests/integration_tests/modules/test_timezone.py b/tests/integration_tests/modules/test_timezone.py
new file mode 100644
index 00000000..111d53f7
--- /dev/null
+++ b/tests/integration_tests/modules/test_timezone.py
@@ -0,0 +1,25 @@
+"""Integration test for the timezone module.
+
+This test specifies a timezone to be used by the ``timezone`` module
+and then checks that if that timezone was respected during boot.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/timezone.yaml``.)"""
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+timezone: US/Aleutian
+"""
+
+
+@pytest.mark.ci
+class TestTimezone:
+
+ @pytest.mark.user_data(USER_DATA)
+ def test_timezone(self, client):
+ timezone_output = client.execute(
+ 'date "+%Z" --date="Thu, 03 Nov 2016 00:47:00 -0400"')
+ assert timezone_output.strip() == "HDT"
diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py
new file mode 100644
index 00000000..6a51f5a6
--- /dev/null
+++ b/tests/integration_tests/modules/test_users_groups.py
@@ -0,0 +1,83 @@
+"""Integration test for the user_groups module.
+
+This test specifies a number of users and groups via user-data, and confirms
+that they have been configured correctly in the system under test.
+"""
+import re
+
+import pytest
+
+
+USER_DATA = """\
+#cloud-config
+# Add groups to the system
+groups:
+ - secret: [root]
+ - cloud-users
+
+# Add users to the system. Users are added after groups are added.
+users:
+ - default
+ - name: foobar
+ gecos: Foo B. Bar
+ primary_group: foobar
+ groups: users
+ expiredate: 2038-01-19
+ lock_passwd: false
+ passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYe\
+AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
+ - name: barfoo
+ gecos: Bar B. Foo
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: [cloud-users, secret]
+ lock_passwd: true
+ - name: cloudy
+ gecos: Magic Cloud App Daemon User
+ inactive: true
+ system: true
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestUsersGroups:
+ @pytest.mark.parametrize(
+ "getent_args,regex",
+ [
+ # Test the ubuntu group
+ (["group", "ubuntu"], r"ubuntu:x:[0-9]{4}:"),
+ # Test the cloud-users group
+ (["group", "cloud-users"], r"cloud-users:x:[0-9]{4}:barfoo"),
+ # Test the ubuntu user
+ (
+ ["passwd", "ubuntu"],
+ r"ubuntu:x:[0-9]{4}:[0-9]{4}:Ubuntu:/home/ubuntu:/bin/bash",
+ ),
+ # Test the foobar user
+ (
+ ["passwd", "foobar"],
+ r"foobar:x:[0-9]{4}:[0-9]{4}:Foo B. Bar:/home/foobar:",
+ ),
+ # Test the barfoo user
+ (
+ ["passwd", "barfoo"],
+ r"barfoo:x:[0-9]{4}:[0-9]{4}:Bar B. Foo:/home/barfoo:",
+ ),
+ # Test the cloudy user
+ (["passwd", "cloudy"], r"cloudy:x:[0-9]{3,4}:"),
+ ],
+ )
+ def test_users_groups(self, regex, getent_args, class_client):
+ """Use getent to interrogate the various expected outcomes"""
+ result = class_client.execute(["getent"] + getent_args)
+ assert re.search(regex, result.stdout) is not None, (
+ "'getent {}' resulted in '{}', "
+ "but expected to match regex {}".format(
+ ' '.join(getent_args), result.stdout, regex))
+
+ def test_user_root_in_secret(self, class_client):
+ """Test root user is in 'secret' group."""
+ output = class_client.execute("groups root").stdout
+ _, groups_str = output.split(":", maxsplit=1)
+ groups = groups_str.split()
+ assert "secret" in groups
diff --git a/tests/integration_tests/modules/test_write_files.py b/tests/integration_tests/modules/test_write_files.py
new file mode 100644
index 00000000..15832ae3
--- /dev/null
+++ b/tests/integration_tests/modules/test_write_files.py
@@ -0,0 +1,66 @@
+"""Integration test for the write_files module.
+
+This test specifies files to be created by the ``write_files`` module
+and then checks if those files were created during boot.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/write_files.yaml``.)"""
+
+import base64
+import pytest
+
+
+ASCII_TEXT = "ASCII text"
+B64_CONTENT = base64.b64encode(ASCII_TEXT.encode("utf-8"))
+
+# NOTE: the binary data can be any binary data, not only executables
+# and can be generated via the base 64 command as such:
+# $ base64 < hello > hello.txt
+# the opposite is running:
+# $ base64 -d < hello.txt > hello
+#
+USER_DATA = """\
+#cloud-config
+write_files:
+- encoding: b64
+ content: {}
+ owner: root:root
+ path: /root/file_b64
+ permissions: '0644'
+- content: |
+ # My new /root/file_text
+
+ SMBDOPTIONS="-D"
+ path: /root/file_text
+- content: !!binary |
+ /Z/xrHR4WINT0UNoKPQKbuovp6+Js+JK
+ path: /root/file_binary
+ permissions: '0555'
+- encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /root/file_gzip
+ permissions: '0755'
+""".format(B64_CONTENT.decode("ascii"))
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USER_DATA)
+class TestWriteFiles:
+
+ @pytest.mark.parametrize(
+ "cmd,expected_out", (
+ ("file /root/file_b64", ASCII_TEXT),
+ ("md5sum </root/file_binary", "3801184b97bb8c6e63fa0e1eae2920d7"),
+ ("sha256sum </root/file_binary", (
+ "2c791c4037ea5bd7e928d6a87380f8ba"
+ "7a803cd83d5e4f269e28f5090f0f2c9a"
+ )),
+ ("file /root/file_gzip",
+ "POSIX shell script, ASCII text executable"),
+ ("file /root/file_text", ASCII_TEXT),
+ )
+ )
+ def test_write_files(self, cmd, expected_out, class_client):
+ out = class_client.execute(cmd)
+ assert expected_out in out
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index dcf0fe5a..74f85959 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -214,7 +214,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self.assertEqual(1, exit_code)
# Known whitebox output from schema subcommand
self.assertEqual(
- 'Expected either --config-file argument or --docs\n',
+ 'Expected one of --config-file, --system or --docs arguments\n',
self.stderr.getvalue())
def test_wb_devel_schema_subcommand_doc_content(self):
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index b626229e..eb2828d5 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -188,7 +188,7 @@ class TestIsAliYun(test_helpers.CiTestCase):
ALIYUN_PRODUCT = 'Alibaba Cloud ECS'
read_dmi_data_expected = [mock.call('system-product-name')]
- @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data")
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
def test_true_on_aliyun_product(self, m_read_dmi_data):
"""Should return true if the dmi product data has expected value."""
m_read_dmi_data.return_value = self.ALIYUN_PRODUCT
@@ -197,7 +197,7 @@ class TestIsAliYun(test_helpers.CiTestCase):
m_read_dmi_data.call_args_list)
self.assertEqual(True, ret)
- @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data")
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
def test_false_on_empty_string(self, m_read_dmi_data):
"""Should return false on empty value returned."""
m_read_dmi_data.return_value = ""
@@ -206,7 +206,7 @@ class TestIsAliYun(test_helpers.CiTestCase):
m_read_dmi_data.call_args_list)
self.assertEqual(False, ret)
- @mock.patch("cloudinit.sources.DataSourceAliYun.util.read_dmi_data")
+ @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data")
def test_false_on_unknown_string(self, m_read_dmi_data):
"""Should return false on an unrelated string."""
m_read_dmi_data.return_value = "cubs win"
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index fc59d1d5..7a5393ac 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -14,6 +14,7 @@ import os
import shutil
import tempfile
+from cloudinit import dmi
from cloudinit import helpers
from cloudinit import subp
from cloudinit import util
@@ -88,14 +89,14 @@ class TestGetCloudType(CiTestCase):
super(TestGetCloudType, self).setUp()
self.tmp = self.tmp_dir()
self.paths = helpers.Paths({'cloud_dir': self.tmp})
- self.dmi_data = util.read_dmi_data
+ self.dmi_data = dmi.read_dmi_data
# We have a different code path for arm to deal with LP1243287
# We have to switch arch to x86_64 to avoid test failure
force_arch('x86_64')
def tearDown(self):
# Reset
- util.read_dmi_data = self.dmi_data
+ dmi.read_dmi_data = self.dmi_data
force_arch()
def test_cloud_info_file_ioerror(self):
@@ -123,7 +124,7 @@ class TestGetCloudType(CiTestCase):
Test method get_cloud_type() for RHEVm systems.
Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
'''
- util.read_dmi_data = _dmi_data('RHEV')
+ dmi.read_dmi_data = _dmi_data('RHEV')
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual('RHEV', dsrc.get_cloud_type())
@@ -132,7 +133,7 @@ class TestGetCloudType(CiTestCase):
Test method get_cloud_type() for vSphere systems.
Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
'''
- util.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual('VSPHERE', dsrc.get_cloud_type())
@@ -141,7 +142,7 @@ class TestGetCloudType(CiTestCase):
Test method get_cloud_type() for unknown systems.
Forcing read_dmi_data return to match an unrecognized return.
'''
- util.read_dmi_data = _dmi_data('Unrecognized Platform')
+ dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual('UNKNOWN', dsrc.get_cloud_type())
@@ -219,7 +220,7 @@ class TestGetDataNoCloudInfoFile(CiTestCase):
self.tmp = self.tmp_dir()
self.paths = helpers.Paths(
{'cloud_dir': self.tmp, 'run_dir': self.tmp})
- self.dmi_data = util.read_dmi_data
+ self.dmi_data = dmi.read_dmi_data
dsac.CLOUD_INFO_FILE = \
'no such file'
# We have a different code path for arm to deal with LP1243287
@@ -230,14 +231,14 @@ class TestGetDataNoCloudInfoFile(CiTestCase):
# Reset
dsac.CLOUD_INFO_FILE = \
'/etc/sysconfig/cloud-info'
- util.read_dmi_data = self.dmi_data
+ dmi.read_dmi_data = self.dmi_data
# Return back to original arch
force_arch()
def test_rhev_no_cloud_file(self):
'''Test No cloud info file module get_data() forcing RHEV.'''
- util.read_dmi_data = _dmi_data('RHEV Hypervisor')
+ dmi.read_dmi_data = _dmi_data('RHEV Hypervisor')
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_rhevm = lambda: True
self.assertEqual(True, dsrc.get_data())
@@ -245,7 +246,7 @@ class TestGetDataNoCloudInfoFile(CiTestCase):
def test_vsphere_no_cloud_file(self):
'''Test No cloud info file module get_data() forcing VSPHERE.'''
- util.read_dmi_data = _dmi_data('VMware Virtual Platform')
+ dmi.read_dmi_data = _dmi_data('VMware Virtual Platform')
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
dsrc.user_data_vsphere = lambda: True
self.assertEqual(True, dsrc.get_data())
@@ -253,7 +254,7 @@ class TestGetDataNoCloudInfoFile(CiTestCase):
def test_failure_no_cloud_file(self):
'''Test No cloud info file module get_data() forcing unrecognized.'''
- util.read_dmi_data = _dmi_data('Unrecognized Platform')
+ dmi.read_dmi_data = _dmi_data('Unrecognized Platform')
dsrc = dsac.DataSourceAltCloud({}, None, self.paths)
self.assertEqual(False, dsrc.get_data())
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 47e03bd1..e363c1f9 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -11,6 +11,7 @@ from cloudinit.version import version_string as vs
from cloudinit.tests.helpers import (
HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call,
ExitStack, resourceLocation)
+from cloudinit.sources.helpers import netlink
import copy
import crypt
@@ -78,6 +79,8 @@ def construct_valid_ovf_env(data=None, pubkeys=None,
if platform_settings:
for k, v in platform_settings.items():
content += "<%s>%s</%s>\n" % (k, v, k)
+ if "PreprovisionedVMType" not in platform_settings:
+ content += """<PreprovisionedVMType i:nil="true" />"""
content += """</PlatformSettings></wa:PlatformSettingsSection>
</Environment>"""
@@ -102,7 +105,13 @@ NETWORK_METADATA = {
"vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642",
"vmScaleSetName": "",
"vmSize": "Standard_DS1_v2",
- "zone": ""
+ "zone": "",
+ "publicKeys": [
+ {
+ "keyData": "key1",
+ "path": "path1"
+ }
+ ]
},
"network": {
"interface": [
@@ -150,14 +159,50 @@ SECONDARY_INTERFACE = {
}
}
+IMDS_NETWORK_METADATA = {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {
+ "ipAddress": []
+ },
+ "ipv4": {
+ "subnet": [
+ {
+ "prefix": "24",
+ "address": "10.0.0.0"
+ }
+ ],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81"
+ }
+ ]
+ }
+ }
+ ]
+}
+
MOCKPATH = 'cloudinit.sources.DataSourceAzure.'
class TestParseNetworkConfig(CiTestCase):
maxDiff = None
+ fallback_config = {
+ 'version': 1,
+ 'config': [{
+ 'type': 'physical', 'name': 'eth0',
+ 'mac_address': '00:11:22:33:44:55',
+ 'params': {'driver': 'hv_netsvc'},
+ 'subnets': [{'type': 'dhcp'}],
+ }]
+ }
- def test_single_ipv4_nic_configuration(self):
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_single_ipv4_nic_configuration(self, m_driver):
"""parse_network_config emits dhcp on single nic with ipv4"""
expected = {'ethernets': {
'eth0': {'dhcp4': True,
@@ -167,7 +212,9 @@ class TestParseNetworkConfig(CiTestCase):
'set-name': 'eth0'}}, 'version': 2}
self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
- def test_increases_route_metric_for_non_primary_nics(self):
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_increases_route_metric_for_non_primary_nics(self, m_driver):
"""parse_network_config increases route-metric for each nic"""
expected = {'ethernets': {
'eth0': {'dhcp4': True,
@@ -194,7 +241,9 @@ class TestParseNetworkConfig(CiTestCase):
imds_data['network']['interface'].append(third_intf)
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
- def test_ipv4_and_ipv6_route_metrics_match_for_nics(self):
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
"""parse_network_config emits matching ipv4 and ipv6 route-metrics."""
expected = {'ethernets': {
'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'],
@@ -236,7 +285,9 @@ class TestParseNetworkConfig(CiTestCase):
imds_data['network']['interface'].append(third_intf)
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
- def test_ipv4_secondary_ips_will_be_static_addrs(self):
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
"""parse_network_config emits primary ipv4 as dhcp others are static"""
expected = {'ethernets': {
'eth0': {'addresses': ['10.0.0.5/24'],
@@ -256,7 +307,9 @@ class TestParseNetworkConfig(CiTestCase):
}
self.assertEqual(expected, dsaz.parse_network_config(imds_data))
- def test_ipv6_secondary_ips_will_be_static_cidrs(self):
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
"""parse_network_config emits primary ipv6 as dhcp others are static"""
expected = {'ethernets': {
'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'],
@@ -295,6 +348,42 @@ class TestParseNetworkConfig(CiTestCase):
}}, 'version': 2}
self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
+ self, m_fallback_config, m_driver):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network metadata is not present.
+ """
+ imds_metadata_missing_network_metadata = copy.deepcopy(
+ NETWORK_METADATA)
+ del imds_metadata_missing_network_metadata['network']
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(
+ imds_metadata_missing_network_metadata))
+
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ @mock.patch('cloudinit.net.generate_fallback_config')
+ def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
+ self, m_fallback_config, m_driver):
+ """parse_network_config generates fallback network config when the
+ IMDS instance metadata is corrupted/invalid, such as when
+ network interface metadata is not present.
+ """
+ imds_metadata_missing_interface_metadata = copy.deepcopy(
+ NETWORK_METADATA)
+ del imds_metadata_missing_interface_metadata['network']['interface']
+ m_fallback_config.return_value = self.fallback_config
+ self.assertEqual(
+ self.fallback_config,
+ dsaz.parse_network_config(
+ imds_metadata_missing_interface_metadata))
+
class TestGetMetadataFromIMDS(HttprettyTestCase):
@@ -302,11 +391,11 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
def setUp(self):
super(TestGetMetadataFromIMDS, self).setUp()
- self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2017-12-01"
+ self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01"
@mock.patch(MOCKPATH + 'readurl')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- @mock.patch(MOCKPATH + 'net.is_up')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True)
+ @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
def test_get_metadata_does_not_dhcp_if_network_is_up(
self, m_net_is_up, m_dhcp, m_readurl):
"""Do not perform DHCP setup when nic is already up."""
@@ -323,9 +412,66 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
"Crawl of Azure Instance Metadata Service (IMDS) took", # log_time
self.logs.getvalue())
- @mock.patch(MOCKPATH + 'readurl')
- @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
+ @mock.patch(MOCKPATH + 'readurl', autospec=True)
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ @mock.patch(MOCKPATH + 'net.is_up')
+ def test_get_compute_metadata_uses_compute_url(
+ self, m_net_is_up, m_dhcp, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ network metadata"""
+ m_net_is_up.return_value = True
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+
+ dsaz.get_metadata_from_imds(
+ 'eth0', retries=3, md_type=dsaz.metadata_type.compute)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version="
+ "2019-06-01", exception_cb=mock.ANY,
+ headers=mock.ANY, retries=mock.ANY,
+ timeout=mock.ANY)
+
+ @mock.patch(MOCKPATH + 'readurl', autospec=True)
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
@mock.patch(MOCKPATH + 'net.is_up')
+ def test_get_network_metadata_uses_network_url(
+ self, m_net_is_up, m_dhcp, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ network metadata"""
+ m_net_is_up.return_value = True
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+
+ dsaz.get_metadata_from_imds(
+ 'eth0', retries=3, md_type=dsaz.metadata_type.network)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance/network?api-version="
+ "2019-06-01", exception_cb=mock.ANY,
+ headers=mock.ANY, retries=mock.ANY,
+ timeout=mock.ANY)
+
+ @mock.patch(MOCKPATH + 'readurl', autospec=True)
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ @mock.patch(MOCKPATH + 'net.is_up')
+ def test_get_default_metadata_uses_compute_url(
+ self, m_net_is_up, m_dhcp, m_readurl):
+ """Make sure readurl is called with the correct url when accessing
+ network metadata"""
+ m_net_is_up.return_value = True
+ m_readurl.return_value = url_helper.StringResponse(
+ json.dumps(IMDS_NETWORK_METADATA).encode('utf-8'))
+
+ dsaz.get_metadata_from_imds(
+ 'eth0', retries=3)
+ m_readurl.assert_called_with(
+ "http://169.254.169.254/metadata/instance?api-version="
+ "2019-06-01", exception_cb=mock.ANY,
+ headers=mock.ANY, retries=mock.ANY,
+ timeout=mock.ANY)
+
+ @mock.patch(MOCKPATH + 'readurl', autospec=True)
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True)
+ @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
def test_get_metadata_performs_dhcp_when_network_is_down(
self, m_net_is_up, m_dhcp, m_readurl):
"""Perform DHCP setup when nic is not up."""
@@ -349,7 +495,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS)
@mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up')
+ @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
def test_get_metadata_from_imds_empty_when_no_imds_present(
self, m_net_is_up, m_sleep):
"""Return empty dict when IMDS network metadata is absent."""
@@ -370,7 +516,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
@mock.patch('requests.Session.request')
@mock.patch('cloudinit.url_helper.time.sleep')
- @mock.patch(MOCKPATH + 'net.is_up')
+ @mock.patch(MOCKPATH + 'net.is_up', autospec=True)
def test_get_metadata_from_imds_retries_on_timeout(
self, m_net_is_up, m_sleep, m_request):
"""Retry IMDS network metadata on timeout errors."""
@@ -400,6 +546,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
class TestAzureDataSource(CiTestCase):
+ with_logs = True
+
def setUp(self):
super(TestAzureDataSource, self).setUp()
self.tmp = self.tmp_dir()
@@ -465,7 +613,7 @@ scbus-1 on xpt0 bus 0
])
return dsaz
- def _get_ds(self, data, agent_command=None, distro=None,
+ def _get_ds(self, data, agent_command=None, distro='ubuntu',
apply_network=None):
def dsdevs():
@@ -488,9 +636,12 @@ scbus-1 on xpt0 bus 0
dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- self.get_metadata_from_fabric = mock.MagicMock(return_value={
- 'public-keys': [],
- })
+ self.m_is_platform_viable = mock.MagicMock(autospec=True)
+ self.m_get_metadata_from_fabric = mock.MagicMock(
+ return_value={'public-keys': []})
+ self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
+ self.m_ephemeral_dhcpv4 = mock.MagicMock()
+ self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
@@ -507,15 +658,25 @@ scbus-1 on xpt0 bus 0
(dsaz, 'perform_hostname_bounce', mock.MagicMock()),
(dsaz, 'get_hostname', mock.MagicMock()),
(dsaz, 'set_hostname', mock.MagicMock()),
- (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
+ (dsaz, '_is_platform_viable',
+ self.m_is_platform_viable),
+ (dsaz, 'get_metadata_from_fabric',
+ self.m_get_metadata_from_fabric),
+ (dsaz, 'report_failure_to_fabric',
+ self.m_report_failure_to_fabric),
+ (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4),
+ (dsaz, 'EphemeralDHCPv4WithReporting',
+ self.m_ephemeral_dhcpv4_with_reporting),
+ (dsaz, 'get_boot_telemetry', mock.MagicMock()),
+ (dsaz, 'get_system_info', mock.MagicMock()),
(dsaz.subp, 'which', lambda x: True),
- (dsaz.util, 'read_dmi_data', mock.MagicMock(
+ (dsaz.dmi, 'read_dmi_data', mock.MagicMock(
side_effect=_dmi_mocks)),
(dsaz.util, 'wait_for_files', mock.MagicMock(
side_effect=_wait_for_files)),
])
- if distro is not None:
+ if isinstance(distro, str):
distro_cls = distros.fetch(distro)
distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths)
dsrc = dsaz.DataSourceAzure(
@@ -571,15 +732,87 @@ scbus-1 on xpt0 bus 0
dev = ds.get_resource_disk_on_freebsd(1)
self.assertEqual("da1", dev)
- @mock.patch(MOCKPATH + '_is_platform_viable')
- def test_call_is_platform_viable_seed(self, m_is_platform_viable):
+ def test_not_is_platform_viable_seed_should_return_no_datasource(self):
"""Check seed_dir using _is_platform_viable and return False."""
# Return a non-matching asset tag value
- m_is_platform_viable.return_value = False
- dsrc = dsaz.DataSourceAzure(
- {}, distro=None, paths=self.paths)
- self.assertFalse(dsrc.get_data())
- m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = False
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
+ mock.patch.object(dsrc, '_report_failure') as m_report_failure:
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ self.assertNotIn('agent_invoked', data)
+ # Assert that for non viable platforms,
+ # there is no communication with the Azure datasource.
+ self.assertEqual(
+ 0,
+ m_crawl_metadata.call_count)
+ self.assertEqual(
+ 0,
+ m_report_failure.call_count)
+
+ def test_platform_viable_but_no_devs_should_return_no_datasource(self):
+ """For platforms where the Azure platform is viable
+ (which is indicated by the matching asset tag),
+ the absence of any devs at all (devs == candidate sources
+ for crawling Azure datasource) is NOT expected.
+ Report failure to Azure as this is an unexpected fatal error.
+ """
+ data = {}
+ dsrc = self._get_ds(data)
+ with mock.patch.object(dsrc, '_report_failure') as m_report_failure:
+ self.m_is_platform_viable.return_value = True
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertFalse(ret)
+ self.assertNotIn('agent_invoked', data)
+ self.assertEqual(
+ 1,
+ m_report_failure.call_count)
+
+ def test_crawl_metadata_exception_returns_no_datasource(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ ret = dsrc.get_data()
+ self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ self.assertEqual(
+ 1,
+ m_crawl_metadata.call_count)
+ self.assertFalse(ret)
+ self.assertNotIn('agent_invoked', data)
+
+ def test_crawl_metadata_exception_should_report_failure_with_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
+ mock.patch.object(dsrc, '_report_failure') as m_report_failure:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(
+ 1,
+ m_crawl_metadata.call_count)
+ m_report_failure.assert_called_once_with(
+ description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+
+ def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
+ data = {}
+ dsrc = self._get_ds(data)
+ self.m_is_platform_viable.return_value = True
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+ dsrc.get_data()
+ self.assertEqual(
+ 1,
+ m_crawl_metadata.call_count)
+ self.assertIn(
+ "Could not crawl Azure metadata",
+ self.logs.getvalue())
def test_basic_seed_dir(self):
odata = {'HostName': "myhost", 'UserName': "myuser"}
@@ -653,6 +886,7 @@ scbus-1 on xpt0 bus 0
'sys_cfg': {}}
dsrc = self._get_ds(data)
expected_cfg = {
+ 'PreprovisionedVMType': None,
'PreprovisionedVm': False,
'datasource': {'Azure': {'agent_command': 'my_command'}},
'system_info': {'default_user': {'name': u'myuser'}}}
@@ -700,7 +934,7 @@ scbus-1 on xpt0 bus 0
'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
@mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
def test_crawl_metadata_on_reprovision_reports_ready(
- self, poll_imds_func, report_ready_func, m_write, m_dhcp
+ self, poll_imds_func, m_report_ready, m_write, m_dhcp
):
"""If reprovisioning, report ready at the end"""
ovfenv = construct_valid_ovf_env(
@@ -714,18 +948,76 @@ scbus-1 on xpt0 bus 0
dsrc = self._get_ds(data)
poll_imds_func.return_value = ovfenv
dsrc.crawl_metadata()
+ self.assertEqual(1, m_report_ready.call_count)
+
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
+ @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
+ '_wait_for_all_nics_ready')
+ def test_crawl_metadata_waits_for_nic_on_savable_vms(
+ self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp
+ ):
+ """If reprovisioning, report ready at the end"""
+ ovfenv = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True"}
+ )
+
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
+ self.assertEqual(1, report_ready_func.call_count)
+ self.assertEqual(1, detect_nics.call_count)
+
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting')
+ @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
+ @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds')
+ @mock.patch(
+ 'cloudinit.sources.DataSourceAzure.DataSourceAzure.'
+ '_wait_for_all_nics_ready')
+ @mock.patch('os.path.isfile')
+ def test_detect_nics_when_marker_present(
+ self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write,
+ m_dhcp):
+ """If reprovisioning, wait for nic attach if marker present"""
+
+ def is_file_ret(key):
+ return key == dsaz.REPROVISION_NIC_ATTACH_MARKER_FILE
+
+ is_file.side_effect = is_file_ret
+ ovfenv = construct_valid_ovf_env()
+
+ data = {
+ 'ovfcontent': ovfenv,
+ 'sys_cfg': {}
+ }
+
+ dsrc = self._get_ds(data)
+ poll_imds_func.return_value = ovfenv
+ dsrc.crawl_metadata()
self.assertEqual(1, report_ready_func.call_count)
+ self.assertEqual(1, detect_nics.call_count)
@mock.patch('cloudinit.sources.DataSourceAzure.util.write_file')
@mock.patch('cloudinit.sources.helpers.netlink.'
'wait_for_media_disconnect_connect')
@mock.patch(
'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready')
- @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
- @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@mock.patch('cloudinit.sources.DataSourceAzure.readurl')
def test_crawl_metadata_on_reprovision_reports_ready_using_lease(
- self, m_readurl, m_dhcp, m_net, report_ready_func,
+ self, m_readurl, m_report_ready,
m_media_switch, m_write
):
"""If reprovisioning, report ready using the obtained lease"""
@@ -739,20 +1031,30 @@ scbus-1 on xpt0 bus 0
}
dsrc = self._get_ds(data)
- lease = {
- 'interface': 'eth9', 'fixed-address': '192.168.2.9',
- 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
- 'unknown-245': '624c3620'}
- m_dhcp.return_value = [lease]
- m_media_switch.return_value = None
+ with mock.patch.object(dsrc.distro.networking, 'is_up') \
+ as m_dsrc_distro_networking_is_up:
- reprovision_ovfenv = construct_valid_ovf_env()
- m_readurl.return_value = url_helper.StringResponse(
- reprovision_ovfenv.encode('utf-8'))
+ # For this mock, net should not be up,
+ # so that cached ephemeral won't be used.
+ # This is so that a NEW ephemeral dhcp lease will be discovered
+ # and used instead.
+ m_dsrc_distro_networking_is_up.return_value = False
- dsrc.crawl_metadata()
- self.assertEqual(2, report_ready_func.call_count)
- report_ready_func.assert_called_with(lease=lease)
+ lease = {
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}
+ self.m_ephemeral_dhcpv4_with_reporting.return_value \
+ .__enter__.return_value = lease
+ m_media_switch.return_value = None
+
+ reprovision_ovfenv = construct_valid_ovf_env()
+ m_readurl.return_value = url_helper.StringResponse(
+ reprovision_ovfenv.encode('utf-8'))
+
+ dsrc.crawl_metadata()
+ self.assertEqual(2, m_report_ready.call_count)
+ m_report_ready.assert_called_with(lease=lease)
def test_waagent_d_has_0700_perms(self):
# we expect /var/lib/waagent to be created 0700
@@ -777,7 +1079,9 @@ scbus-1 on xpt0 bus 0
self.assertTrue(ret)
self.assertEqual(data['agent_invoked'], cfg['agent_command'])
- def test_network_config_set_from_imds(self):
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_network_config_set_from_imds(self, m_driver):
"""Datasource.network_config returns IMDS network data."""
sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
odata = {}
@@ -795,7 +1099,10 @@ scbus-1 on xpt0 bus 0
dsrc.get_data()
self.assertEqual(expected_network_config, dsrc.network_config)
- def test_network_config_set_from_imds_route_metric_for_secondary_nic(self):
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
+ def test_network_config_set_from_imds_route_metric_for_secondary_nic(
+ self, m_driver):
"""Datasource.network_config adds route-metric to secondary nics."""
sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
odata = {}
@@ -905,7 +1212,7 @@ scbus-1 on xpt0 bus 0
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertTrue('default_user' in dsrc.cfg['system_info'])
+ self.assertIn('default_user', dsrc.cfg['system_info'])
defuser = dsrc.cfg['system_info']['default_user']
# default user should be updated username and should not be locked.
@@ -919,6 +1226,9 @@ scbus-1 on xpt0 bus 0
crypt.crypt(odata['UserPassword'],
defuser['passwd'][0:pos]))
+ # the same hashed value should also be present in cfg['password']
+ self.assertEqual(defuser['passwd'], dsrc.cfg['password'])
+
def test_user_not_locked_if_password_redacted(self):
odata = {'HostName': "myhost", 'UserName': "myuser",
'UserPassword': dsaz.DEF_PASSWD_REDACTION}
@@ -927,7 +1237,7 @@ scbus-1 on xpt0 bus 0
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertTrue('default_user' in dsrc.cfg['system_info'])
+ self.assertIn('default_user', dsrc.cfg['system_info'])
defuser = dsrc.cfg['system_info']['default_user']
# default user should be updated username and should not be locked.
@@ -955,14 +1265,6 @@ scbus-1 on xpt0 bus 0
self.assertTrue(ret)
self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
- def test_no_datasource_expected(self):
- # no source should be found if no seed_dir and no devs
- data = {}
- dsrc = self._get_ds({})
- ret = dsrc.get_data()
- self.assertFalse(ret)
- self.assertFalse('agent_invoked' in data)
-
def test_cfg_has_pubkeys_fingerprint(self):
odata = {'HostName': "myhost", 'UserName': "myuser"}
mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
@@ -1095,18 +1397,178 @@ scbus-1 on xpt0 bus 0
dsrc = self._get_ds({'ovfcontent': xml})
dsrc.get_data()
+ def test_dsaz_report_ready_returns_true_when_report_succeeds(
+ self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+ self.assertTrue(dsrc._report_ready(lease=mock.MagicMock()))
+
+ def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc(
+ self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+ self.m_get_metadata_from_fabric.side_effect = Exception
+ self.assertFalse(dsrc._report_ready(lease=mock.MagicMock()))
+
+ def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+ self.assertEqual(
+ 1,
+ self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
+ self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
+ mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
+ as m_ephemeral_dhcp_ctx, \
+ mock.patch.object(dsrc.distro.networking, 'is_up') \
+ as m_dsrc_distro_networking_is_up:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ # setup mocks to allow using cached ephemeral dhcp lease
+ m_dsrc_distro_networking_is_up.return_value = True
+ test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
+ test_lease = {'unknown-245': test_lease_dhcp_option_245}
+ m_ephemeral_dhcp_ctx.lease = test_lease
+
+ # We expect 3 calls to report_failure_to_fabric,
+ # because we try 3 different methods of calling report failure.
+ # The different methods are attempted in the following order:
+ # 1. Using cached ephemeral dhcp context to report failure to Azure
+ # 2. Using new ephemeral dhcp to report failure to Azure
+ # 3. Using fallback lease to report failure to Azure
+ self.m_report_failure_to_fabric.side_effect = Exception
+ self.assertFalse(dsrc._report_failure())
+ self.assertEqual(
+ 3,
+ self.m_report_failure_to_fabric.call_count)
+
+ def test_dsaz_report_failure_description_msg(self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ test_msg = 'Test report failure description message'
+ self.assertTrue(dsrc._report_failure(description=test_msg))
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=test_msg)
+
+ def test_dsaz_report_failure_no_description_msg(self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata:
+ m_crawl_metadata.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure()) # no description msg
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ dhcp_opts=mock.ANY, description=None)
+
+ def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
+ mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \
+ as m_ephemeral_dhcp_ctx, \
+ mock.patch.object(dsrc.distro.networking, 'is_up') \
+ as m_dsrc_distro_networking_is_up:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ # setup mocks to allow using cached ephemeral dhcp lease
+ m_dsrc_distro_networking_is_up.return_value = True
+ test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
+ test_lease = {'unknown-245': test_lease_dhcp_option_245}
+ m_ephemeral_dhcp_ctx.lease = test_lease
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with cached ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
+
+ # ensure cached ephemeral is cleaned
+ self.assertEqual(
+ 1,
+ m_ephemeral_dhcp_ctx.clean_network.call_count)
+
+ def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
+ mock.patch.object(dsrc.distro.networking, 'is_up') \
+ as m_dsrc_distro_networking_is_up:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ # net is not up and cannot use cached ephemeral dhcp
+ m_dsrc_distro_networking_is_up.return_value = False
+ # setup ephemeral dhcp lease discovery mock
+ test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245'
+ test_lease = {'unknown-245': test_lease_dhcp_option_245}
+ self.m_ephemeral_dhcpv4_with_reporting.return_value \
+ .__enter__.return_value = test_lease
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with the newly discovered
+ # ephemeral dhcp lease option 245
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245)
+
+ def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease(
+ self):
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ dsrc.ds_cfg['agent_command'] = '__builtin__'
+
+ with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \
+ mock.patch.object(dsrc.distro.networking, 'is_up') \
+ as m_dsrc_distro_networking_is_up:
+ # mock crawl metadata failure to cause report failure
+ m_crawl_metadata.side_effect = Exception
+
+ # net is not up and cannot use cached ephemeral dhcp
+ m_dsrc_distro_networking_is_up.return_value = False
+ # ephemeral dhcp discovery failure,
+ # so cannot use a new ephemeral dhcp
+ self.m_ephemeral_dhcpv4_with_reporting.return_value \
+ .__enter__.side_effect = Exception
+
+ self.assertTrue(dsrc._report_failure())
+
+ # ensure called with fallback lease
+ self.m_report_failure_to_fabric.assert_called_once_with(
+ description=mock.ANY,
+ fallback_lease_file=dsrc.dhclient_lease_file)
+
def test_exception_fetching_fabric_data_doesnt_propagate(self):
"""Errors communicating with fabric should warn, but return True."""
dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.get_metadata_from_fabric.side_effect = Exception
+ self.m_get_metadata_from_fabric.side_effect = Exception
ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
def test_fabric_data_included_in_metadata(self):
dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
dsrc.ds_cfg['agent_command'] = '__builtin__'
- self.get_metadata_from_fabric.return_value = {'test': 'value'}
+ self.m_get_metadata_from_fabric.return_value = {'test': 'value'}
ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
self.assertEqual('value', dsrc.metadata['test'])
@@ -1151,8 +1613,10 @@ scbus-1 on xpt0 bus 0
self.assertEqual(
[mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list)
+ @mock.patch('cloudinit.sources.DataSourceAzure.device_driver',
+ return_value=None)
@mock.patch('cloudinit.net.generate_fallback_config')
- def test_imds_network_config(self, mock_fallback):
+ def test_imds_network_config(self, mock_fallback, m_driver):
"""Network config is generated from IMDS network data when present."""
sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
odata = {'HostName': "myhost", 'UserName': "myuser"}
@@ -1208,7 +1672,7 @@ scbus-1 on xpt0 bus 0
@mock.patch('cloudinit.net.get_interface_mac')
@mock.patch('cloudinit.net.get_devicelist')
@mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config')
+ @mock.patch('cloudinit.net.generate_fallback_config', autospec=True)
def test_fallback_network_config(self, mock_fallback, mock_dd,
mock_devlist, mock_get_mac):
"""On absent IMDS network data, generate network fallback config."""
@@ -1239,71 +1703,81 @@ scbus-1 on xpt0 bus 0
netconfig = dsrc.network_config
self.assertEqual(netconfig, fallback_config)
- mock_fallback.assert_called_with(blacklist_drivers=['mlx4_core'],
- config_driver=True)
+ mock_fallback.assert_called_with(
+ blacklist_drivers=['mlx4_core', 'mlx5_core'],
+ config_driver=True)
- @mock.patch('cloudinit.net.get_interface_mac')
- @mock.patch('cloudinit.net.get_devicelist')
- @mock.patch('cloudinit.net.device_driver')
- @mock.patch('cloudinit.net.generate_fallback_config')
- def test_fallback_network_config_blacklist(self, mock_fallback, mock_dd,
- mock_devlist, mock_get_mac):
- """On absent network metadata, blacklist mlx from fallback config."""
+ @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True)
+ @mock.patch(MOCKPATH + 'util.is_FreeBSD')
+ def test_blacklist_through_distro(
+ self, m_is_freebsd, m_net_get_interfaces):
+ """Verify Azure DS updates blacklist drivers in the distro's
+ networking object."""
odata = {'HostName': "myhost", 'UserName': "myuser"}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
'sys_cfg': {}}
- fallback_config = {
- 'version': 1,
- 'config': [{
- 'type': 'physical', 'name': 'eth0',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'hv_netsvc'},
- 'subnets': [{'type': 'dhcp'}],
- }]
- }
- blacklist_config = {
- 'type': 'physical',
- 'name': 'eth1',
- 'mac_address': '00:11:22:33:44:55',
- 'params': {'driver': 'mlx4_core'}
- }
- mock_fallback.return_value = fallback_config
-
- mock_devlist.return_value = ['eth0', 'eth1']
- mock_dd.side_effect = [
- 'hv_netsvc', # list composition, skipped
- 'mlx4_core', # list composition, match
- 'mlx4_core', # config get driver name
- ]
- mock_get_mac.return_value = '00:11:22:33:44:55'
-
- dsrc = self._get_ds(data)
- # Represent empty response from network imds
- self.m_get_metadata_from_imds.return_value = {}
- ret = dsrc.get_data()
- self.assertTrue(ret)
+ distro_cls = distros.fetch('ubuntu')
+ distro = distro_cls('ubuntu', {}, self.paths)
+ dsrc = self._get_ds(data, distro=distro)
+ dsrc.get_data()
+ self.assertEqual(distro.networking.blacklist_drivers,
+ dsaz.BLACKLIST_DRIVERS)
- netconfig = dsrc.network_config
- expected_config = fallback_config
- expected_config['config'].append(blacklist_config)
- self.assertEqual(netconfig, expected_config)
+ m_is_freebsd.return_value = False
+ distro.networking.get_interfaces_by_mac()
+ m_net_get_interfaces.assert_called_with(
+ blacklist_drivers=dsaz.BLACKLIST_DRIVERS)
- @mock.patch(MOCKPATH + 'subp.subp')
+ @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
def test_get_hostname_with_no_args(self, m_subp):
dsaz.get_hostname()
m_subp.assert_called_once_with(("hostname",), capture=True)
- @mock.patch(MOCKPATH + 'subp.subp')
+ @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
def test_get_hostname_with_string_arg(self, m_subp):
dsaz.get_hostname(hostname_command="hostname")
m_subp.assert_called_once_with(("hostname",), capture=True)
- @mock.patch(MOCKPATH + 'subp.subp')
+ @mock.patch(MOCKPATH + 'subp.subp', autospec=True)
def test_get_hostname_with_iterable_arg(self, m_subp):
dsaz.get_hostname(hostname_command=("hostname",))
m_subp.assert_called_once_with(("hostname",), capture=True)
+ @mock.patch(
+ 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates')
+ def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ['key1'])
+ self.assertEqual(m_parse_certificates.call_count, 0)
+
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ def test_get_public_ssh_keys_without_imds(
+ self,
+ m_get_metadata_from_imds):
+ m_get_metadata_from_imds.return_value = dict()
+ sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}}
+ odata = {'HostName': "myhost", 'UserName': "myuser"}
+ data = {
+ 'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': sys_cfg
+ }
+ dsrc = self._get_ds(data)
+ dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']}
+ dsrc.get_data()
+ dsrc.setup(True)
+ ssh_keys = dsrc.get_public_ssh_keys()
+ self.assertEqual(ssh_keys, ['key2'])
+
class TestAzureBounce(CiTestCase):
@@ -1336,7 +1810,7 @@ class TestAzureBounce(CiTestCase):
raise RuntimeError('should not get here')
self.patches.enter_context(
- mock.patch.object(dsaz.util, 'read_dmi_data',
+ mock.patch.object(dsaz.dmi, 'read_dmi_data',
mock.MagicMock(side_effect=_dmi_mocks)))
def setUp(self):
@@ -1365,8 +1839,7 @@ class TestAzureBounce(CiTestCase):
if ovfcontent is not None:
populate_dir(os.path.join(self.paths.seed_dir, "azure"),
{'ovf-env.xml': ovfcontent})
- dsrc = dsaz.DataSourceAzure(
- {}, distro=None, paths=self.paths)
+ dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
if agent_command is not None:
dsrc.ds_cfg['agent_command'] = agent_command
return dsrc
@@ -1850,7 +2323,7 @@ class TestClearCachedData(CiTestCase):
tmp = self.tmp_dir()
paths = helpers.Paths(
{'cloud_dir': tmp, 'run_dir': tmp})
- dsrc = dsaz.DataSourceAzure({}, distro=None, paths=paths)
+ dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths)
clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds]
dsrc.metadata = 'md'
dsrc.userdata = 'ud'
@@ -1897,6 +2370,29 @@ class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
self.assertFalse(cfg['PreprovisionedVm'])
+ self.assertEqual(None, cfg["PreprovisionedVMType"])
+
+ def test_read_azure_ovf_with_running_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Running."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVMType": "Running",
+ "PreprovisionedVm": "True"})
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg['PreprovisionedVm'])
+ self.assertEqual("Running", cfg['PreprovisionedVMType'])
+
+ def test_read_azure_ovf_with_savable_type(self):
+ """The read_azure_ovf method should set PreprovisionedVMType
+ cfg flag to Savable."""
+ content = construct_valid_ovf_env(
+ platform_settings={"PreprovisionedVMType": "Savable",
+ "PreprovisionedVm": "True"})
+ ret = dsaz.read_azure_ovf(content)
+ cfg = ret[2]
+ self.assertTrue(cfg['PreprovisionedVm'])
+ self.assertEqual("Savable", cfg['PreprovisionedVMType'])
@mock.patch('os.path.isfile')
@@ -1914,7 +2410,7 @@ class TestPreprovisioningShouldReprovision(CiTestCase):
"""The _should_reprovision method should return true with config
flag present."""
isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertTrue(dsa._should_reprovision(
(None, None, {'PreprovisionedVm': True}, None)))
@@ -1922,7 +2418,7 @@ class TestPreprovisioningShouldReprovision(CiTestCase):
"""The _should_reprovision method should return True if the sentinal
exists."""
isfile.return_value = True
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertTrue(dsa._should_reprovision(
(None, None, {'preprovisionedvm': False}, None)))
@@ -1930,7 +2426,7 @@ class TestPreprovisioningShouldReprovision(CiTestCase):
"""The _should_reprovision method should return False
if config and sentinal are not present."""
isfile.return_value = False
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertFalse(dsa._should_reprovision((None, None, {}, None)))
@mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds')
@@ -1941,11 +2437,232 @@ class TestPreprovisioningShouldReprovision(CiTestCase):
username = "myuser"
odata = {'HostName': hostname, 'UserName': username}
_poll_imds.return_value = construct_valid_ovf_env(data=odata)
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
dsa._reprovision()
_poll_imds.assert_called_with()
+class TestPreprovisioningHotAttachNics(CiTestCase):
+
+ def setUp(self):
+ super(TestPreprovisioningHotAttachNics, self).setUp()
+ self.tmp = self.tmp_dir()
+ self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp)
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+ self.paths = helpers.Paths({'cloud_dir': self.tmp})
+
+ @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event',
+ autospec=True)
+ @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
+ def test_nic_detach_writes_marker(self, m_writefile, m_detach):
+ """When we detect that a nic gets detached, we write a marker for it"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ nl_sock = mock.MagicMock()
+ dsa._wait_for_nic_detach(nl_sock)
+ m_detach.assert_called_with(nl_sock)
+ self.assertEqual(1, m_detach.call_count)
+ m_writefile.assert_called_with(
+ dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY)
+
+ @mock.patch(MOCKPATH + 'util.write_file', autospec=True)
+ @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ def test_detect_nic_attach_reports_ready_and_waits_for_detach(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if,
+ m_writefile):
+ """Report ready first and then wait for nic detach"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(1, m_report_ready.call_count)
+ self.assertEqual(1, m_detach.call_count)
+ self.assertEqual(1, m_writefile.call_count)
+ self.assertEqual(1, m_dhcp.call_count)
+ m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE,
+ mock.ANY)
+
+ @mock.patch('os.path.isfile')
+ @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ def test_detect_nic_attach_skips_report_ready_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
+ """Skip reporting ready if we already have a marker file."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ def isfile(key):
+ return key == dsaz.REPORTED_READY_MARKER_FILE
+
+ m_isfile.side_effect = isfile
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(1, m_detach.call_count)
+
+ @mock.patch('os.path.isfile')
+ @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ def test_detect_nic_attach_skips_nic_detach_when_marker_present(
+ self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile):
+ """Skip wait for nic detach if it already happened."""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+
+ m_isfile.return_value = True
+ dsa._wait_for_all_nics_ready()
+ m_fallback_if.return_value = "Dummy interface"
+ self.assertEqual(0, m_report_ready.call_count)
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_detach.call_count)
+
+ @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True)
+ @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ @mock.patch('os.path.isfile')
+ def test_wait_for_nic_attach_if_no_fallback_interface(
+ self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
+ m_attach, m_link_up):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}
+
+ m_isfile.return_value = True
+ m_attach.return_value = "eth0"
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.return_value = IMDS_NETWORK_METADATA
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(1, m_attach.call_count)
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(1, m_link_up.call_count)
+ m_link_up.assert_called_with(mock.ANY, "eth0")
+
+ @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up')
+ @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event')
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ @mock.patch(MOCKPATH + 'get_metadata_from_imds')
+ @mock.patch(MOCKPATH + 'EphemeralDHCPv4')
+ @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach')
+ @mock.patch('os.path.isfile')
+ def test_wait_for_nic_attach_multinic_attach(
+ self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if,
+ m_attach, m_link_up):
+ """Wait for nic attach if we do not have a fallback interface"""
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ lease = {
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}
+ m_attach_call_count = 0
+
+ def nic_attach_ret(nl_sock, nics_found):
+ nonlocal m_attach_call_count
+ if m_attach_call_count == 0:
+ m_attach_call_count = m_attach_call_count + 1
+ return "eth0"
+ return "eth1"
+
+ def network_metadata_ret(ifname, retries, type):
+ # Simulate two NICs by adding the same one twice.
+ md = IMDS_NETWORK_METADATA
+ md['interface'].append(md['interface'][0])
+ if ifname == "eth0":
+ return md
+ raise requests.Timeout('Fake connection timeout')
+
+ m_isfile.return_value = True
+ m_attach.side_effect = nic_attach_ret
+ dhcp_ctx = mock.MagicMock(lease=lease)
+ dhcp_ctx.obtain_lease.return_value = lease
+ m_dhcpv4.return_value = dhcp_ctx
+ m_imds.side_effect = network_metadata_ret
+ m_fallback_if.return_value = None
+
+ dsa._wait_for_all_nics_ready()
+
+ self.assertEqual(0, m_detach.call_count)
+ self.assertEqual(2, m_attach.call_count)
+ # DHCP and network metadata calls will only happen on the primary NIC.
+ self.assertEqual(1, m_dhcpv4.call_count)
+ self.assertEqual(1, m_imds.call_count)
+ self.assertEqual(2, m_link_up.call_count)
+
+ @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
+ def test_wait_for_link_up_returns_if_already_up(
+ self, m_is_link_up):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch('ubuntu')
+ distro = distro_cls('ubuntu', {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+ m_is_link_up.return_value = True
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(1, m_is_link_up.call_count)
+
+ @mock.patch(MOCKPATH + 'util.write_file')
+ @mock.patch('cloudinit.net.read_sys_net')
+ @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up')
+ def test_wait_for_link_up_writes_to_device_file(
+ self, m_is_link_up, m_read_sys_net, m_writefile):
+ """Waiting for link to be up should return immediately if the link is
+ already up."""
+
+ distro_cls = distros.fetch('ubuntu')
+ distro = distro_cls('ubuntu', {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ callcount = 0
+
+ def linkup(key):
+ nonlocal callcount
+ if callcount == 0:
+ callcount += 1
+ return False
+ return True
+
+ m_is_link_up.side_effect = linkup
+
+ dsa.wait_for_link_up("eth0")
+ self.assertEqual(2, m_is_link_up.call_count)
+ self.assertEqual(1, m_read_sys_net.call_count)
+ self.assertEqual(2, m_writefile.call_count)
+
+ @mock.patch('cloudinit.sources.helpers.netlink.'
+ 'create_bound_netlink_socket')
+ def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket):
+ """Waiting for all nics should raise exception if netlink socket
+ creation fails."""
+
+ m_socket.side_effect = netlink.NetlinkCreateSocketError
+ distro_cls = distros.fetch('ubuntu')
+ distro = distro_cls('ubuntu', {}, self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
+
+ self.assertRaises(netlink.NetlinkCreateSocketError,
+ dsa._wait_for_all_nics_ready)
+ # dsa._wait_for_all_nics_ready()
+
+
@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@mock.patch('cloudinit.sources.helpers.netlink.'
@@ -1963,8 +2680,8 @@ class TestPreprovisioningPollIMDS(CiTestCase):
@mock.patch('time.sleep', mock.MagicMock())
@mock.patch(MOCKPATH + 'EphemeralDHCPv4')
- def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func,
- fake_resp, m_media_switch, m_dhcp,
+ def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready,
+ m_request, m_media_switch, m_dhcp,
m_net):
"""The poll_imds will retry DHCP on IMDS timeout."""
report_file = self.tmp_path('report_marker', self.tmp)
@@ -1993,21 +2710,38 @@ class TestPreprovisioningPollIMDS(CiTestCase):
# Third try should succeed and stop retries or redhcp
return mock.MagicMock(status_code=200, text="good", content="good")
- fake_resp.side_effect = fake_timeout_once
+ m_request.side_effect = fake_timeout_once
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
dsa._poll_imds()
- self.assertEqual(report_ready_func.call_count, 1)
- report_ready_func.assert_called_with(lease=lease)
+ self.assertEqual(m_report_ready.call_count, 1)
+ m_report_ready.assert_called_with(lease=lease)
self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls')
self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS')
- def test_poll_imds_report_ready_false(self,
- report_ready_func, fake_resp,
- m_media_switch, m_dhcp, m_net):
- """The poll_imds should not call reporting ready
- when flag is false"""
+ @mock.patch('os.path.isfile')
+ def test_poll_imds_skips_dhcp_if_ctx_present(
+ self, m_isfile, report_ready_func, fake_resp, m_media_switch,
+ m_dhcp, m_net):
+ """The poll_imds function should reuse the dhcp ctx if it is already
+ present. This happens when we wait for nic to be hot-attached before
+ polling for reprovisiondata. Note that if this ctx is set when
+ _poll_imds is called, then it is not expected to be waiting for
+ media_disconnect_connect either."""
+ report_file = self.tmp_path('report_marker', self.tmp)
+ m_isfile.return_value = True
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx"
+ with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
+ dsa._poll_imds()
+ self.assertEqual(0, m_dhcp.call_count)
+ self.assertEqual(0, m_media_switch.call_count)
+
+ def test_does_not_poll_imds_report_ready_when_marker_file_exists(
+ self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
+ """poll_imds should not call report ready when the reported ready
+ marker file exists"""
report_file = self.tmp_path('report_marker', self.tmp)
write_file(report_file, content='dont run report_ready :)')
m_dhcp.return_value = [{
@@ -2015,18 +2749,56 @@ class TestPreprovisioningPollIMDS(CiTestCase):
'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
'unknown-245': '624c3620'}]
m_media_switch.return_value = None
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
+ dsa._poll_imds()
+ self.assertEqual(m_report_ready.call_count, 0)
+
+ def test_poll_imds_report_ready_success_writes_marker_file(
+ self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path('report_marker', self.tmp)
+ m_dhcp.return_value = [{
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}]
+ m_media_switch.return_value = None
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
dsa._poll_imds()
- self.assertEqual(report_ready_func.call_count, 0)
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertTrue(os.path.exists(report_file))
+ def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker(
+ self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net):
+ """poll_imds should write the report_ready marker file if
+ reporting ready succeeds"""
+ report_file = self.tmp_path('report_marker', self.tmp)
+ m_dhcp.return_value = [{
+ 'interface': 'eth9', 'fixed-address': '192.168.2.9',
+ 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
+ 'unknown-245': '624c3620'}]
+ m_media_switch.return_value = None
+ m_report_ready.return_value = False
+ dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ self.assertFalse(os.path.exists(report_file))
+ with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file):
+ self.assertRaises(
+ InvalidMetaDataException,
+ dsa._poll_imds)
+ self.assertEqual(m_report_ready.call_count, 1)
+ self.assertFalse(os.path.exists(report_file))
-@mock.patch(MOCKPATH + 'subp.subp')
-@mock.patch(MOCKPATH + 'util.write_file')
+
+@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock())
+@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock())
+@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock())
@mock.patch(MOCKPATH + 'util.is_FreeBSD')
@mock.patch('cloudinit.sources.helpers.netlink.'
'wait_for_media_disconnect_connect')
-@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True)
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@mock.patch('requests.Session.request')
class TestAzureDataSourcePreprovisioning(CiTestCase):
@@ -2038,24 +2810,24 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
self.paths = helpers.Paths({'cloud_dir': tmp})
dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
- def test_poll_imds_returns_ovf_env(self, fake_resp,
+ def test_poll_imds_returns_ovf_env(self, m_request,
m_dhcp, m_net,
m_media_switch,
- m_is_bsd, write_f, subp):
+ m_is_bsd):
"""The _poll_imds method should return the ovf_env.xml."""
m_is_bsd.return_value = False
m_media_switch.return_value = None
m_dhcp.return_value = [{
'interface': 'eth9', 'fixed-address': '192.168.2.9',
'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02'
+ url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
host = "169.254.169.254"
full_url = url.format(host)
- fake_resp.return_value = mock.MagicMock(status_code=200, text="ovf",
+ m_request.return_value = mock.MagicMock(status_code=200, text="ovf",
content="ovf")
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
self.assertTrue(len(dsa._poll_imds()) > 0)
- self.assertEqual(fake_resp.call_args_list,
+ self.assertEqual(m_request.call_args_list,
[mock.call(allow_redirects=True,
headers={'Metadata': 'true',
'User-Agent':
@@ -2070,10 +2842,10 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
static_routes=None)
self.assertEqual(m_net.call_count, 2)
- def test__reprovision_calls__poll_imds(self, fake_resp,
+ def test__reprovision_calls__poll_imds(self, m_request,
m_dhcp, m_net,
m_media_switch,
- m_is_bsd, write_f, subp):
+ m_is_bsd):
"""The _reprovision method should call poll IMDS."""
m_is_bsd.return_value = False
m_media_switch.return_value = None
@@ -2081,27 +2853,31 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
'interface': 'eth9', 'fixed-address': '192.168.2.9',
'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0',
'unknown-245': '624c3620'}]
- url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02'
+ url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01'
host = "169.254.169.254"
full_url = url.format(host)
hostname = "myhost"
username = "myuser"
odata = {'HostName': hostname, 'UserName': username}
content = construct_valid_ovf_env(data=odata)
- fake_resp.return_value = mock.MagicMock(status_code=200, text=content,
+ m_request.return_value = mock.MagicMock(status_code=200, text=content,
content=content)
- dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
+ dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
md, _ud, cfg, _d = dsa._reprovision()
self.assertEqual(md['local-hostname'], hostname)
self.assertEqual(cfg['system_info']['default_user']['name'], username)
- self.assertEqual(fake_resp.call_args_list,
- [mock.call(allow_redirects=True,
- headers={'Metadata': 'true',
- 'User-Agent':
- 'Cloud-Init/%s' % vs()},
- method='GET',
- timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
- url=full_url)])
+ self.assertIn(
+ mock.call(
+ allow_redirects=True,
+ headers={
+ 'Metadata': 'true',
+ 'User-Agent': 'Cloud-Init/%s' % vs()
+ },
+ method='GET',
+ timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS,
+ url=full_url
+ ),
+ m_request.call_args_list)
self.assertEqual(m_dhcp.call_count, 2)
m_net.assert_any_call(
broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9',
@@ -2163,14 +2939,14 @@ class TestWBIsPlatformViable(CiTestCase):
"""White box tests for _is_platform_viable."""
with_logs = True
- @mock.patch(MOCKPATH + 'util.read_dmi_data')
+ @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
def test_true_on_non_azure_chassis(self, m_read_dmi_data):
"""Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
self.assertTrue(dsaz._is_platform_viable('doesnotmatter'))
@mock.patch(MOCKPATH + 'os.path.exists')
- @mock.patch(MOCKPATH + 'util.read_dmi_data')
+ @mock.patch(MOCKPATH + 'dmi.read_dmi_data')
def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
"""Return True if ovf-env.xml exists in known seed dirs."""
# Non-matching Azure chassis-asset-tag
@@ -2191,7 +2967,7 @@ class TestWBIsPlatformViable(CiTestCase):
MOCKPATH,
{'os.path.exists': False,
# Non-matching Azure chassis-asset-tag
- 'util.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
+ 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X',
'subp.which': None},
dsaz._is_platform_viable, 'doesnotmatter'))
self.assertIn(
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 5e6d3d2d..b8899807 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -1,10 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import os
import re
import unittest
from textwrap import dedent
from xml.etree import ElementTree
+from xml.sax.saxutils import escape, unescape
from cloudinit.sources.helpers import azure as azure_helper
from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir
@@ -70,6 +72,15 @@ HEALTH_REPORT_XML_TEMPLATE = '''\
</Health>
'''
+HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\
+ <Details>
+ <SubStatus>{health_substatus}</SubStatus>
+ <Description>{health_description}</Description>
+ </Details>
+ ''')
+
+HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
+
class SentinelException(Exception):
pass
@@ -281,29 +292,25 @@ class TestAzureEndpointHttpClient(CiTestCase):
super(TestAzureEndpointHttpClient, self).setUp()
patches = ExitStack()
self.addCleanup(patches.close)
-
- self.readurl = patches.enter_context(
- mock.patch.object(azure_helper.url_helper, 'readurl'))
- patches.enter_context(
- mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+ self.m_http_with_retries = patches.enter_context(
+ mock.patch.object(azure_helper, 'http_with_retries'))
def test_non_secure_get(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
url = 'MyTestUrl'
response = client.get(url, secure=False)
- self.assertEqual(1, self.readurl.call_count)
- self.assertEqual(self.readurl.return_value, response)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+ self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
- mock.call(url, headers=self.regular_headers,
- timeout=5, retries=10, sec_between=5),
- self.readurl.call_args)
+ mock.call(url, headers=self.regular_headers),
+ self.m_http_with_retries.call_args)
def test_non_secure_get_raises_exception(self):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- self.readurl.side_effect = SentinelException
url = 'MyTestUrl'
- with self.assertRaises(SentinelException):
- client.get(url, secure=False)
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(SentinelException, client.get, url, secure=False)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
def test_secure_get(self):
url = 'MyTestUrl'
@@ -315,39 +322,37 @@ class TestAzureEndpointHttpClient(CiTestCase):
})
client = azure_helper.AzureEndpointHttpClient(m_certificate)
response = client.get(url, secure=True)
- self.assertEqual(1, self.readurl.call_count)
- self.assertEqual(self.readurl.return_value, response)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+ self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
- mock.call(url, headers=expected_headers,
- timeout=5, retries=10, sec_between=5),
- self.readurl.call_args)
+ mock.call(url, headers=expected_headers),
+ self.m_http_with_retries.call_args)
def test_secure_get_raises_exception(self):
url = 'MyTestUrl'
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- self.readurl.side_effect = SentinelException
- with self.assertRaises(SentinelException):
- client.get(url, secure=True)
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(SentinelException, client.get, url, secure=True)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
def test_post(self):
m_data = mock.MagicMock()
url = 'MyTestUrl'
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
response = client.post(url, data=m_data)
- self.assertEqual(1, self.readurl.call_count)
- self.assertEqual(self.readurl.return_value, response)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+ self.assertEqual(self.m_http_with_retries.return_value, response)
self.assertEqual(
- mock.call(url, data=m_data, headers=self.regular_headers,
- timeout=5, retries=10, sec_between=5),
- self.readurl.call_args)
+ mock.call(url, data=m_data, headers=self.regular_headers),
+ self.m_http_with_retries.call_args)
def test_post_raises_exception(self):
m_data = mock.MagicMock()
url = 'MyTestUrl'
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- self.readurl.side_effect = SentinelException
- with self.assertRaises(SentinelException):
- client.post(url, data=m_data)
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(SentinelException, client.post, url, data=m_data)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
def test_post_with_extra_headers(self):
url = 'MyTestUrl'
@@ -356,21 +361,179 @@ class TestAzureEndpointHttpClient(CiTestCase):
client.post(url, extra_headers=extra_headers)
expected_headers = self.regular_headers.copy()
expected_headers.update(extra_headers)
- self.assertEqual(1, self.readurl.call_count)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
self.assertEqual(
- mock.call(mock.ANY, data=mock.ANY, headers=expected_headers,
- timeout=5, retries=10, sec_between=5),
- self.readurl.call_args)
+ mock.call(url, data=mock.ANY, headers=expected_headers),
+ self.m_http_with_retries.call_args)
def test_post_with_sleep_with_extra_headers_raises_exception(self):
m_data = mock.MagicMock()
url = 'MyTestUrl'
extra_headers = {'test': 'header'}
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
- self.readurl.side_effect = SentinelException
- with self.assertRaises(SentinelException):
- client.post(
- url, data=m_data, extra_headers=extra_headers)
+ self.m_http_with_retries.side_effect = SentinelException
+ self.assertRaises(
+ SentinelException, client.post,
+ url, data=m_data, extra_headers=extra_headers)
+ self.assertEqual(1, self.m_http_with_retries.call_count)
+
+
+class TestAzureHelperHttpWithRetries(CiTestCase):
+
+ with_logs = True
+
+ max_readurl_attempts = 240
+ default_readurl_timeout = 5
+ periodic_logging_attempts = 12
+
+ def setUp(self):
+ super(TestAzureHelperHttpWithRetries, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.m_readurl = patches.enter_context(
+ mock.patch.object(
+ azure_helper.url_helper, 'readurl', mock.MagicMock()))
+ patches.enter_context(
+ mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
+
+ def test_http_with_retries(self):
+ self.m_readurl.return_value = 'TestResp'
+ self.assertEqual(
+ azure_helper.http_with_retries('testurl'),
+ self.m_readurl.return_value)
+ self.assertEqual(self.m_readurl.call_count, 1)
+
+ def test_http_with_retries_propagates_readurl_exc_and_logs_exc(
+ self):
+ self.m_readurl.side_effect = SentinelException
+
+ self.assertRaises(
+ SentinelException, azure_helper.http_with_retries, 'testurl')
+ self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts)
+
+ self.assertIsNotNone(
+ re.search(
+ r'Failed HTTP request with Azure endpoint \S* during '
+ r'attempt \d+ with exception: \S*',
+ self.logs.getvalue()))
+ self.assertIsNone(
+ re.search(
+ r'Successful HTTP request with Azure endpoint \S* after '
+ r'\d+ attempts',
+ self.logs.getvalue()))
+
+ def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc(
+ self):
+ self.m_readurl.side_effect = \
+ [SentinelException] * self.periodic_logging_attempts + \
+ ['TestResp']
+ self.m_readurl.return_value = 'TestResp'
+
+ response = azure_helper.http_with_retries('testurl')
+ self.assertEqual(
+ response,
+ self.m_readurl.return_value)
+ self.assertEqual(
+ self.m_readurl.call_count,
+ self.periodic_logging_attempts + 1)
+
+ def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
+ self.m_readurl.side_effect = \
+ [SentinelException] * self.periodic_logging_attempts + \
+ ['TestResp']
+ self.m_readurl.return_value = 'TestResp'
+
+ azure_helper.http_with_retries('testurl')
+
+ self.assertEqual(
+ self.m_readurl.call_count,
+ self.periodic_logging_attempts + 1)
+ self.assertIsNotNone(
+ re.search(
+ r'Failed HTTP request with Azure endpoint \S* during '
+ r'attempt \d+ with exception: \S*',
+ self.logs.getvalue()))
+ self.assertIsNotNone(
+ re.search(
+ r'Successful HTTP request with Azure endpoint \S* after '
+ r'\d+ attempts',
+ self.logs.getvalue()))
+
+ def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg(
+ self):
+ self.m_readurl.side_effect = \
+ [SentinelException] * \
+ (self.periodic_logging_attempts - 1) + \
+ ['TestResp']
+ self.m_readurl.return_value = 'TestResp'
+
+ azure_helper.http_with_retries('testurl')
+ self.assertEqual(
+ self.m_readurl.call_count,
+ self.periodic_logging_attempts)
+
+ self.assertIsNone(
+ re.search(
+ r'Failed HTTP request with Azure endpoint \S* during '
+ r'attempt \d+ with exception: \S*',
+ self.logs.getvalue()))
+ self.assertIsNotNone(
+ re.search(
+ r'Successful HTTP request with Azure endpoint \S* after '
+ r'\d+ attempts',
+ self.logs.getvalue()))
+
+ def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self):
+ testurl = mock.MagicMock()
+ kwargs = {
+ 'headers': mock.MagicMock(),
+ 'data': mock.MagicMock(),
+ # timeout kwarg should not be modified or deleted if present
+ 'timeout': mock.MagicMock()
+ }
+ azure_helper.http_with_retries(testurl, **kwargs)
+ self.m_readurl.assert_called_once_with(testurl, **kwargs)
+
+ def test_http_with_retries_adds_timeout_kwarg_if_not_present(self):
+ testurl = mock.MagicMock()
+ kwargs = {
+ 'headers': mock.MagicMock(),
+ 'data': mock.MagicMock()
+ }
+ expected_kwargs = copy.deepcopy(kwargs)
+ expected_kwargs['timeout'] = self.default_readurl_timeout
+
+ azure_helper.http_with_retries(testurl, **kwargs)
+ self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
+
+ def test_http_with_retries_deletes_retries_kwargs_passed_in(
+ self):
+ """http_with_retries already implements retry logic,
+ so url_helper.readurl should not have retries.
+ http_with_retries should delete kwargs that
+ cause url_helper.readurl to retry.
+ """
+ testurl = mock.MagicMock()
+ kwargs = {
+ 'headers': mock.MagicMock(),
+ 'data': mock.MagicMock(),
+ 'timeout': mock.MagicMock(),
+ 'retries': mock.MagicMock(),
+ 'infinite': mock.MagicMock()
+ }
+ expected_kwargs = copy.deepcopy(kwargs)
+ expected_kwargs.pop('retries', None)
+ expected_kwargs.pop('infinite', None)
+
+ azure_helper.http_with_retries(testurl, **kwargs)
+ self.m_readurl.assert_called_once_with(testurl, **expected_kwargs)
+ self.assertIn(
+ 'retries kwarg passed in for communication with Azure endpoint.',
+ self.logs.getvalue())
+ self.assertIn(
+ 'infinite kwarg passed in for communication with Azure endpoint.',
+ self.logs.getvalue())
class TestOpenSSLManager(CiTestCase):
@@ -461,17 +624,24 @@ class TestOpenSSLManagerActions(CiTestCase):
class TestGoalStateHealthReporter(CiTestCase):
+ maxDiff = None
+
default_parameters = {
'incarnation': 1634,
'container_id': 'MyContainerId',
'instance_id': 'MyInstanceId'
}
- test_endpoint = 'TestEndpoint'
- test_url = 'http://{0}/machine?comp=health'.format(test_endpoint)
+ test_azure_endpoint = 'TestEndpoint'
+ test_health_report_url = 'http://{0}/machine?comp=health'.format(
+ test_azure_endpoint)
test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'}
provisioning_success_status = 'Ready'
+ provisioning_not_ready_status = 'NotReady'
+ provisioning_failure_substatus = 'ProvisioningFailed'
+ provisioning_failure_err_description = (
+ 'Test error message containing provisioning failure details')
def setUp(self):
super(TestGoalStateHealthReporter, self).setUp()
@@ -496,17 +666,40 @@ class TestGoalStateHealthReporter(CiTestCase):
self.GoalState.return_value.incarnation = \
self.default_parameters['incarnation']
+ def _text_from_xpath_in_xroot(self, xroot, xpath):
+ element = xroot.find(xpath)
+ if element is not None:
+ return element.text
+ return None
+
def _get_formatted_health_report_xml_string(self, **kwargs):
return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs)
+ def _get_formatted_health_detail_subsection_xml_string(self, **kwargs):
+ return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs)
+
def _get_report_ready_health_document(self):
return self._get_formatted_health_report_xml_string(
- incarnation=self.default_parameters['incarnation'],
- container_id=self.default_parameters['container_id'],
- instance_id=self.default_parameters['instance_id'],
- health_status=self.provisioning_success_status,
+ incarnation=escape(str(self.default_parameters['incarnation'])),
+ container_id=escape(self.default_parameters['container_id']),
+ instance_id=escape(self.default_parameters['instance_id']),
+ health_status=escape(self.provisioning_success_status),
health_detail_subsection='')
+ def _get_report_failure_health_document(self):
+ health_detail_subsection = \
+ self._get_formatted_health_detail_subsection_xml_string(
+ health_substatus=escape(self.provisioning_failure_substatus),
+ health_description=escape(
+ self.provisioning_failure_err_description))
+
+ return self._get_formatted_health_report_xml_string(
+ incarnation=escape(str(self.default_parameters['incarnation'])),
+ container_id=escape(self.default_parameters['container_id']),
+ instance_id=escape(self.default_parameters['instance_id']),
+ health_status=escape(self.provisioning_not_ready_status),
+ health_detail_subsection=health_detail_subsection)
+
def test_send_ready_signal_sends_post_request(self):
with mock.patch.object(
azure_helper.GoalStateHealthReporter,
@@ -514,55 +707,130 @@ class TestGoalStateHealthReporter(CiTestCase):
client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
- client, self.test_endpoint)
+ client, self.test_azure_endpoint)
reporter.send_ready_signal()
self.assertEqual(1, self.post.call_count)
self.assertEqual(
mock.call(
- self.test_url,
+ self.test_health_report_url,
data=m_build_report.return_value,
extra_headers=self.test_default_headers),
self.post.call_args)
- def test_build_report_for_health_document(self):
+ def test_send_failure_signal_sends_post_request(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter,
+ 'build_report') as m_build_report:
+ client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ client, self.test_azure_endpoint)
+ reporter.send_failure_signal(
+ description=self.provisioning_failure_err_description)
+
+ self.assertEqual(1, self.post.call_count)
+ self.assertEqual(
+ mock.call(
+ self.test_health_report_url,
+ data=m_build_report.return_value,
+ extra_headers=self.test_default_headers),
+ self.post.call_args)
+
+ def test_build_report_for_ready_signal_health_document(self):
health_document = self._get_report_ready_health_document()
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_endpoint)
+ self.test_azure_endpoint)
generated_health_document = reporter.build_report(
incarnation=self.default_parameters['incarnation'],
container_id=self.default_parameters['container_id'],
instance_id=self.default_parameters['instance_id'],
status=self.provisioning_success_status)
+
self.assertEqual(health_document, generated_health_document)
- self.assertIn(
- '<GoalStateIncarnation>{}</GoalStateIncarnation>'.format(
- str(self.default_parameters['incarnation'])),
- generated_health_document)
- self.assertIn(
- ''.join([
- '<ContainerId>',
- self.default_parameters['container_id'],
- '</ContainerId>']),
- generated_health_document)
- self.assertIn(
- ''.join([
- '<InstanceId>',
- self.default_parameters['instance_id'],
- '</InstanceId>']),
- generated_health_document)
- self.assertIn(
- ''.join([
- '<State>',
- self.provisioning_success_status,
- '</State>']),
- generated_health_document
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, './GoalStateIncarnation'),
+ str(self.default_parameters['incarnation']))
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, './Container/ContainerId'),
+ str(self.default_parameters['container_id']))
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/InstanceId'),
+ str(self.default_parameters['instance_id']))
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/State'),
+ escape(self.provisioning_success_status))
+ self.assertIsNone(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/Details'))
+ self.assertIsNone(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/Details/SubStatus'))
+ self.assertIsNone(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/Details/Description')
)
- self.assertNotIn('<Details>', generated_health_document)
- self.assertNotIn('<SubStatus>', generated_health_document)
- self.assertNotIn('<Description>', generated_health_document)
+
+ def test_build_report_for_failure_signal_health_document(self):
+ health_document = self._get_report_failure_health_document()
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint)
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters['incarnation'],
+ container_id=self.default_parameters['container_id'],
+ instance_id=self.default_parameters['instance_id'],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=self.provisioning_failure_err_description)
+
+ self.assertEqual(health_document, generated_health_document)
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, './GoalStateIncarnation'),
+ str(self.default_parameters['incarnation']))
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot, './Container/ContainerId'),
+ self.default_parameters['container_id'])
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/InstanceId'),
+ self.default_parameters['instance_id'])
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/State'),
+ escape(self.provisioning_not_ready_status))
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/Details/'
+ 'SubStatus'),
+ escape(self.provisioning_failure_substatus))
+ self.assertEqual(
+ self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/Details/'
+ 'Description'),
+ escape(self.provisioning_failure_err_description))
def test_send_ready_signal_calls_build_report(self):
with mock.patch.object(
@@ -571,7 +839,7 @@ class TestGoalStateHealthReporter(CiTestCase):
reporter = azure_helper.GoalStateHealthReporter(
azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
- self.test_endpoint)
+ self.test_azure_endpoint)
reporter.send_ready_signal()
self.assertEqual(1, m_build_report.call_count)
@@ -583,6 +851,131 @@ class TestGoalStateHealthReporter(CiTestCase):
status=self.provisioning_success_status),
m_build_report.call_args)
+ def test_send_failure_signal_calls_build_report(self):
+ with mock.patch.object(
+ azure_helper.GoalStateHealthReporter, 'build_report'
+ ) as m_build_report:
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint)
+ reporter.send_failure_signal(
+ description=self.provisioning_failure_err_description)
+
+ self.assertEqual(1, m_build_report.call_count)
+ self.assertEqual(
+ mock.call(
+ incarnation=self.default_parameters['incarnation'],
+ container_id=self.default_parameters['container_id'],
+ instance_id=self.default_parameters['instance_id'],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=self.provisioning_failure_err_description),
+ m_build_report.call_args)
+
+ def test_build_report_escapes_chars(self):
+ incarnation = 'jd8\'9*&^<\'A><A[p&o+\"SD()*&&&LKAJSD23'
+ container_id = '&&<\"><><ds8\'9+7&d9a86!@($09asdl;<>'
+ instance_id = 'Opo>>>jas\'&d;[p&fp\"a<<!!@&&'
+ health_status = '&<897\"6&>&aa\'sd!@&!)((*<&>'
+ health_substatus = '&as\"d<<a&s>d<\'^@!5&6<7'
+ health_description = '&&&>!#$\"&&<as\'1!@$d&>><>&\"sd<67<]>>'
+
+ health_detail_subsection = \
+ self._get_formatted_health_detail_subsection_xml_string(
+ health_substatus=escape(health_substatus),
+ health_description=escape(health_description))
+ health_document = self._get_formatted_health_report_xml_string(
+ incarnation=escape(incarnation),
+ container_id=escape(container_id),
+ instance_id=escape(instance_id),
+ health_status=escape(health_status),
+ health_detail_subsection=health_detail_subsection)
+
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint)
+ generated_health_document = reporter.build_report(
+ incarnation=incarnation,
+ container_id=container_id,
+ instance_id=instance_id,
+ status=health_status,
+ substatus=health_substatus,
+ description=health_description)
+
+ self.assertEqual(health_document, generated_health_document)
+
+ def test_build_report_conforms_to_length_limits(self):
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint)
+ long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters['incarnation'],
+ container_id=self.default_parameters['container_id'],
+ instance_id=self.default_parameters['instance_id'],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=long_err_msg)
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ generated_health_report_description = self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/Details/Description')
+ self.assertEqual(
+ len(unescape(generated_health_report_description)),
+ HEALTH_REPORT_DESCRIPTION_TRIM_LEN)
+
+ def test_trim_description_then_escape_conforms_to_len_limits_worst_case(
+ self):
+ """When unescaped characters are XML-escaped, the length increases.
+ Char Escape String
+ < &lt;
+ > &gt;
+ " &quot;
+ ' &apos;
+ & &amp;
+
+ We (step 1) trim the health report XML's description field,
+ and then (step 2) XML-escape the health report XML's description field.
+
+ The health report XML's description field limit within cloud-init
+ is HEALTH_REPORT_DESCRIPTION_TRIM_LEN.
+
+ The Azure platform's limit on the health report XML's description field
+ is 4096 chars.
+
+ For worst-case chars, there is a 5x blowup in length
+ when the chars are XML-escaped.
+ ' and " when XML-escaped have a 5x blowup.
+
+ Ensure that (1) trimming and then (2) XML-escaping does not blow past
+ the Azure platform's limit for health report XML's description field
+ (4096 chars).
+ """
+ reporter = azure_helper.GoalStateHealthReporter(
+ azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()),
+ azure_helper.AzureEndpointHttpClient(mock.MagicMock()),
+ self.test_azure_endpoint)
+ long_err_msg = '\'\"' * 10000
+ generated_health_document = reporter.build_report(
+ incarnation=self.default_parameters['incarnation'],
+ container_id=self.default_parameters['container_id'],
+ instance_id=self.default_parameters['instance_id'],
+ status=self.provisioning_not_ready_status,
+ substatus=self.provisioning_failure_substatus,
+ description=long_err_msg)
+
+ generated_xroot = ElementTree.fromstring(generated_health_document)
+ generated_health_report_description = self._text_from_xpath_in_xroot(
+ generated_xroot,
+ './Container/RoleInstanceList/Role/Health/Details/Description')
+ # The escaped description string should be less than
+ # the Azure platform limit for the escaped description string.
+ self.assertLessEqual(len(generated_health_report_description), 4096)
+
class TestWALinuxAgentShim(CiTestCase):
@@ -598,7 +991,7 @@ class TestWALinuxAgentShim(CiTestCase):
self.GoalState = patches.enter_context(
mock.patch.object(azure_helper, 'GoalState'))
self.OpenSSLManager = patches.enter_context(
- mock.patch.object(azure_helper, 'OpenSSLManager'))
+ mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True))
patches.enter_context(
mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
@@ -609,24 +1002,50 @@ class TestWALinuxAgentShim(CiTestCase):
self.GoalState.return_value.container_id = self.test_container_id
self.GoalState.return_value.instance_id = self.test_instance_id
- def test_azure_endpoint_client_uses_certificate_during_report_ready(self):
+ def test_http_client_does_not_use_certificate_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
self.assertEqual(
- [mock.call(self.OpenSSLManager.return_value.certificate)],
+ [mock.call(None)],
+ self.AzureEndpointHttpClient.call_args_list)
+
+ def test_http_client_does_not_use_certificate_for_report_failure(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ self.assertEqual(
+ [mock.call(None)],
self.AzureEndpointHttpClient.call_args_list)
def test_correct_url_used_for_goalstate_during_report_ready(self):
self.find_endpoint.return_value = 'test_endpoint'
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
- get = self.AzureEndpointHttpClient.return_value.get
+ m_get = self.AzureEndpointHttpClient.return_value.get
+ self.assertEqual(
+ [mock.call('http://test_endpoint/machine/?comp=goalstate')],
+ m_get.call_args_list)
+ self.assertEqual(
+ [mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False
+ )],
+ self.GoalState.call_args_list)
+
+ def test_correct_url_used_for_goalstate_during_report_failure(self):
+ self.find_endpoint.return_value = 'test_endpoint'
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ m_get = self.AzureEndpointHttpClient.return_value.get
self.assertEqual(
[mock.call('http://test_endpoint/machine/?comp=goalstate')],
- get.call_args_list)
+ m_get.call_args_list)
self.assertEqual(
- [mock.call(get.return_value.contents,
- self.AzureEndpointHttpClient.return_value)],
+ [mock.call(
+ m_get.return_value.contents,
+ self.AzureEndpointHttpClient.return_value,
+ False
+ )],
self.GoalState.call_args_list)
def test_certificates_used_to_determine_public_keys(self):
@@ -667,6 +1086,16 @@ class TestWALinuxAgentShim(CiTestCase):
self.AzureEndpointHttpClient.return_value.post
.call_args_list)
+ def test_correct_url_used_for_report_failure(self):
+ self.find_endpoint.return_value = 'test_endpoint'
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ expected_url = 'http://test_endpoint/machine?comp=health'
+ self.assertEqual(
+ [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
+ self.AzureEndpointHttpClient.return_value.post
+ .call_args_list)
+
def test_goal_state_values_used_for_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
@@ -678,44 +1107,128 @@ class TestWALinuxAgentShim(CiTestCase):
self.assertIn(self.test_container_id, posted_document)
self.assertIn(self.test_instance_id, posted_document)
- def test_xml_elems_in_report_ready(self):
+ def test_goal_state_values_used_for_report_failure(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post
+ .call_args[1]['data']
+ )
+ self.assertIn(self.test_incarnation, posted_document)
+ self.assertIn(self.test_container_id, posted_document)
+ self.assertIn(self.test_instance_id, posted_document)
+
+ def test_xml_elems_in_report_ready_post(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
health_document = HEALTH_REPORT_XML_TEMPLATE.format(
- incarnation=self.test_incarnation,
- container_id=self.test_container_id,
- instance_id=self.test_instance_id,
- health_status='Ready',
+ incarnation=escape(self.test_incarnation),
+ container_id=escape(self.test_container_id),
+ instance_id=escape(self.test_instance_id),
+ health_status=escape('Ready'),
health_detail_subsection='')
posted_document = (
self.AzureEndpointHttpClient.return_value.post
.call_args[1]['data'])
self.assertEqual(health_document, posted_document)
+ def test_xml_elems_in_report_failure_post(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ health_document = HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=escape(self.test_incarnation),
+ container_id=escape(self.test_container_id),
+ instance_id=escape(self.test_instance_id),
+ health_status=escape('NotReady'),
+ health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE
+ .format(
+ health_substatus=escape('ProvisioningFailed'),
+ health_description=escape('TestDesc')))
+ posted_document = (
+ self.AzureEndpointHttpClient.return_value.post
+ .call_args[1]['data'])
+ self.assertEqual(health_document, posted_document)
+
+ @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ def test_register_with_azure_and_fetch_data_calls_send_ready_signal(
+ self, m_goal_state_health_reporter):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ self.assertEqual(
+ 1,
+ m_goal_state_health_reporter.return_value.send_ready_signal
+ .call_count)
+
+ @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True)
+ def test_register_with_azure_and_report_failure_calls_send_failure_signal(
+ self, m_goal_state_health_reporter):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ m_goal_state_health_reporter.return_value.send_failure_signal \
+ .assert_called_once_with(description='TestDesc')
+
+ def test_register_with_azure_and_report_failure_does_not_need_certificates(
+ self):
+ shim = wa_shim()
+ with mock.patch.object(
+ shim, '_fetch_goal_state_from_azure', autospec=True
+ ) as m_fetch_goal_state_from_azure:
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ m_fetch_goal_state_from_azure.assert_called_once_with(
+ need_certificate=False)
+
def test_clean_up_can_be_called_at_any_time(self):
shim = wa_shim()
shim.clean_up()
+ def test_openssl_manager_not_instantiated_by_shim_report_status(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_fetch_data()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.clean_up()
+ self.OpenSSLManager.assert_not_called()
+
def test_clean_up_after_report_ready(self):
shim = wa_shim()
shim.register_with_azure_and_fetch_data()
shim.clean_up()
- self.assertEqual(
- 1, self.OpenSSLManager.return_value.clean_up.call_count)
+ self.OpenSSLManager.return_value.clean_up.assert_not_called()
+
+ def test_clean_up_after_report_failure(self):
+ shim = wa_shim()
+ shim.register_with_azure_and_report_failure(description='TestDesc')
+ shim.clean_up()
+ self.OpenSSLManager.return_value.clean_up.assert_not_called()
def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self):
self.AzureEndpointHttpClient.return_value.get \
- .side_effect = (SentinelException)
+ .side_effect = SentinelException
shim = wa_shim()
self.assertRaises(SentinelException,
shim.register_with_azure_and_fetch_data)
+ def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self):
+ self.AzureEndpointHttpClient.return_value.get \
+ .side_effect = SentinelException
+ shim = wa_shim()
+ self.assertRaises(SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description='TestDesc')
+
def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
self.GoalState.side_effect = SentinelException
shim = wa_shim()
self.assertRaises(SentinelException,
shim.register_with_azure_and_fetch_data)
+ def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc(
+ self):
+ self.GoalState.side_effect = SentinelException
+ shim = wa_shim()
+ self.assertRaises(SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description='TestDesc')
+
def test_failure_to_send_report_ready_health_doc_bubbles_up(self):
self.AzureEndpointHttpClient.return_value.post \
.side_effect = SentinelException
@@ -723,55 +1236,132 @@ class TestWALinuxAgentShim(CiTestCase):
self.assertRaises(SentinelException,
shim.register_with_azure_and_fetch_data)
+ def test_failure_to_send_report_failure_health_doc_bubbles_up(self):
+ self.AzureEndpointHttpClient.return_value.post \
+ .side_effect = SentinelException
+ shim = wa_shim()
+ self.assertRaises(SentinelException,
+ shim.register_with_azure_and_report_failure,
+ description='TestDesc')
+
class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_data_from_shim_returned(self, shim):
+ def setUp(self):
+ super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.m_shim = patches.enter_context(
+ mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+
+ def test_data_from_shim_returned(self):
ret = azure_helper.get_metadata_from_fabric()
self.assertEqual(
- shim.return_value.register_with_azure_and_fetch_data.return_value,
+ self.m_shim.return_value.register_with_azure_and_fetch_data
+ .return_value,
ret)
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_success_calls_clean_up(self, shim):
+ def test_success_calls_clean_up(self):
azure_helper.get_metadata_from_fabric()
- self.assertEqual(1, shim.return_value.clean_up.call_count)
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_failure_in_registration_calls_clean_up(self, shim):
- shim.return_value.register_with_azure_and_fetch_data.side_effect = (
- SentinelException)
+ def test_failure_in_registration_propagates_exc_and_calls_clean_up(
+ self):
+ self.m_shim.return_value.register_with_azure_and_fetch_data \
+ .side_effect = SentinelException
self.assertRaises(SentinelException,
azure_helper.get_metadata_from_fabric)
- self.assertEqual(1, shim.return_value.clean_up.call_count)
+ self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_calls_shim_register_with_azure_and_fetch_data(self, shim):
+ def test_calls_shim_register_with_azure_and_fetch_data(self):
m_pubkey_info = mock.MagicMock()
azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info)
self.assertEqual(
1,
- shim.return_value
+ self.m_shim.return_value
.register_with_azure_and_fetch_data.call_count)
self.assertEqual(
mock.call(pubkey_info=m_pubkey_info),
- shim.return_value
+ self.m_shim.return_value
.register_with_azure_and_fetch_data.call_args)
- @mock.patch.object(azure_helper, 'WALinuxAgentShim')
- def test_instantiates_shim_with_kwargs(self, shim):
+ def test_instantiates_shim_with_kwargs(self):
m_fallback_lease_file = mock.MagicMock()
m_dhcp_options = mock.MagicMock()
azure_helper.get_metadata_from_fabric(
fallback_lease_file=m_fallback_lease_file,
dhcp_opts=m_dhcp_options)
- self.assertEqual(1, shim.call_count)
+ self.assertEqual(1, self.m_shim.call_count)
self.assertEqual(
mock.call(
fallback_lease_file=m_fallback_lease_file,
dhcp_options=m_dhcp_options),
- shim.call_args)
+ self.m_shim.call_args)
+
+
+class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
+
+ def setUp(self):
+ super(
+ TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.m_shim = patches.enter_context(
+ mock.patch.object(azure_helper, 'WALinuxAgentShim'))
+
+ def test_success_calls_clean_up(self):
+ azure_helper.report_failure_to_fabric()
+ self.assertEqual(
+ 1,
+ self.m_shim.return_value.clean_up.call_count)
+
+ def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up(
+ self):
+ self.m_shim.return_value.register_with_azure_and_report_failure \
+ .side_effect = SentinelException
+ self.assertRaises(SentinelException,
+ azure_helper.report_failure_to_fabric)
+ self.assertEqual(
+ 1,
+ self.m_shim.return_value.clean_up.call_count)
+
+ def test_report_failure_to_fabric_with_desc_calls_shim_report_failure(
+ self):
+ azure_helper.report_failure_to_fabric(description='TestDesc')
+ self.m_shim.return_value.register_with_azure_and_report_failure \
+ .assert_called_once_with(description='TestDesc')
+
+ def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure(
+ self):
+ azure_helper.report_failure_to_fabric()
+ # default err message description should be shown to the user
+ # if no description is passed in
+ self.m_shim.return_value.register_with_azure_and_report_failure \
+ .assert_called_once_with(
+ description=azure_helper
+ .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+
+ def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure(
+ self):
+ azure_helper.report_failure_to_fabric(description='')
+ # default err message description should be shown to the user
+ # if an empty description is passed in
+ self.m_shim.return_value.register_with_azure_and_report_failure \
+ .assert_called_once_with(
+ description=azure_helper
+ .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+
+ def test_instantiates_shim_with_kwargs(self):
+ m_fallback_lease_file = mock.MagicMock()
+ m_dhcp_options = mock.MagicMock()
+ azure_helper.report_failure_to_fabric(
+ fallback_lease_file=m_fallback_lease_file,
+ dhcp_opts=m_dhcp_options)
+ self.m_shim.assert_called_once_with(
+ fallback_lease_file=m_fallback_lease_file,
+ dhcp_options=m_dhcp_options)
class TestExtractIpAddressFromNetworkd(CiTestCase):
diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py
index d0879545..eadb92f1 100644
--- a/tests/unittests/test_datasource/test_hetzner.py
+++ b/tests/unittests/test_datasource/test_hetzner.py
@@ -77,10 +77,11 @@ class TestDataSourceHetzner(CiTestCase):
@mock.patch('cloudinit.net.find_fallback_nic')
@mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
@mock.patch('cloudinit.sources.helpers.hetzner.read_userdata')
- @mock.patch('cloudinit.sources.DataSourceHetzner.on_hetzner')
- def test_read_data(self, m_on_hetzner, m_usermd, m_readmd, m_fallback_nic,
- m_net):
- m_on_hetzner.return_value = True
+ @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
+ def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd,
+ m_fallback_nic, m_net):
+ m_get_hcloud_data.return_value = (True,
+ str(METADATA.get('instance-id')))
m_readmd.return_value = METADATA.copy()
m_usermd.return_value = USERDATA
m_fallback_nic.return_value = 'eth0'
@@ -107,11 +108,12 @@ class TestDataSourceHetzner(CiTestCase):
@mock.patch('cloudinit.sources.helpers.hetzner.read_metadata')
@mock.patch('cloudinit.net.find_fallback_nic')
- @mock.patch('cloudinit.sources.DataSourceHetzner.on_hetzner')
- def test_not_on_hetzner_returns_false(self, m_on_hetzner, m_find_fallback,
- m_read_md):
- """If helper 'on_hetzner' returns False, return False from get_data."""
- m_on_hetzner.return_value = False
+ @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data')
+ def test_not_on_hetzner_returns_false(self, m_get_hcloud_data,
+ m_find_fallback, m_read_md):
+ """If helper 'get_hcloud_data' returns False,
+ return False from get_data."""
+ m_get_hcloud_data.return_value = (False, None)
ds = self.get_ds()
ret = ds.get_data()
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 2e6b53ff..02cc9b38 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from cloudinit import dmi
from cloudinit import helpers
from cloudinit.sources.DataSourceNoCloud import (
DataSourceNoCloud as dsNoCloud,
@@ -30,7 +31,7 @@ class TestNoCloudDataSource(CiTestCase):
self.mocks.enter_context(
mock.patch.object(util, 'get_cmdline', return_value=self.cmdline))
self.mocks.enter_context(
- mock.patch.object(util, 'read_dmi_data', return_value=None))
+ mock.patch.object(dmi, 'read_dmi_data', return_value=None))
def _test_fs_config_is_read(self, fs_label, fs_label_to_search):
vfat_device = 'device-1'
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 3cfba74d..415755aa 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -459,7 +459,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
ds.detect_openstack(), 'Expected detect_openstack == True')
@test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env,
m_is_x86):
"""Return False on EC2 platforms."""
@@ -479,7 +479,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
ds.detect_openstack(), 'Expected detect_openstack == False on EC2')
m_proc_env.assert_called_with(1)
- @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
def test_detect_openstack_intel_product_name_compute(self, m_dmi,
m_is_x86):
"""Return True on OpenStack compute and nova instances."""
@@ -491,7 +491,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
self.assertTrue(
ds.detect_openstack(), 'Failed to detect_openstack')
- @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi,
m_is_x86):
"""Return True on OpenStack reporting OpenTelekomCloud asset-tag."""
@@ -509,7 +509,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
ds.detect_openstack(),
'Expected detect_openstack == True on OpenTelekomCloud')
- @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi,
m_is_x86):
"""Return True on OpenStack reporting SAP CCloud VM asset-tag."""
@@ -527,7 +527,7 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
ds.detect_openstack(),
'Expected detect_openstack == True on SAP CCloud VM')
- @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
m_is_x86):
"""Return True on OpenStack reporting Oracle cloud asset-tag."""
@@ -548,8 +548,38 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
ds.detect_openstack(accept_oracle=False),
'Expected detect_openstack == False.')
+ def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi,
+ m_is_x86,
+ chassis_tag):
+ """Return True on OpenStack reporting generic asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'Generic OpenStack Platform'
+ if dmi_key == 'chassis-asset-tag':
+ return chassis_tag
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ 'Expected detect_openstack == True on Generic OpenStack Platform')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
+ def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi,
+ m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, 'OpenStack Nova')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
+ def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi,
+ m_is_x86):
+ self._test_detect_openstack_nova_compute_chassis_asset_tag(
+ m_dmi, m_is_x86, 'OpenStack Compute')
+
@test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env')
- @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data')
def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env,
m_is_x86):
"""Return True when nova product_name specified in /proc/1/environ."""
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index 1d088577..16773de5 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -129,7 +129,7 @@ class TestDatasourceOVF(CiTestCase):
ds = self.datasource(sys_cfg={}, distro={}, paths=paths)
retcode = wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': None,
+ {'dmi.read_dmi_data': None,
'transport_iso9660': NOT_FOUND,
'transport_vmware_guestinfo': NOT_FOUND},
ds.get_data)
@@ -145,7 +145,7 @@ class TestDatasourceOVF(CiTestCase):
paths=paths)
retcode = wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': 'vmware',
+ {'dmi.read_dmi_data': 'vmware',
'transport_iso9660': NOT_FOUND,
'transport_vmware_guestinfo': NOT_FOUND},
ds.get_data)
@@ -174,7 +174,7 @@ class TestDatasourceOVF(CiTestCase):
with self.assertRaises(CustomScriptNotFound) as context:
wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': 'vmware',
+ {'dmi.read_dmi_data': 'vmware',
'util.del_dir': True,
'search_file': self.tdir,
'wait_for_imc_cfg_file': conf_file,
@@ -211,7 +211,7 @@ class TestDatasourceOVF(CiTestCase):
with self.assertRaises(RuntimeError) as context:
wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': 'vmware',
+ {'dmi.read_dmi_data': 'vmware',
'util.del_dir': True,
'search_file': self.tdir,
'wait_for_imc_cfg_file': conf_file,
@@ -246,7 +246,7 @@ class TestDatasourceOVF(CiTestCase):
with self.assertRaises(CustomScriptNotFound) as context:
wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': 'vmware',
+ {'dmi.read_dmi_data': 'vmware',
'util.del_dir': True,
'search_file': self.tdir,
'wait_for_imc_cfg_file': conf_file,
@@ -290,7 +290,7 @@ class TestDatasourceOVF(CiTestCase):
with self.assertRaises(CustomScriptNotFound) as context:
wrap_and_call(
'cloudinit.sources.DataSourceOVF',
- {'util.read_dmi_data': 'vmware',
+ {'dmi.read_dmi_data': 'vmware',
'util.del_dir': True,
'search_file': self.tdir,
'wait_for_imc_cfg_file': conf_file,
@@ -313,7 +313,7 @@ class TestDatasourceOVF(CiTestCase):
self.assertEqual('ovf', ds.cloud_name)
self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'util.read_dmi_data', return_value='!VMware'):
+ with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'):
with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
m_iso9660.return_value = NOT_FOUND
@@ -334,7 +334,7 @@ class TestDatasourceOVF(CiTestCase):
self.assertEqual('ovf', ds.cloud_name)
self.assertEqual('ovf', ds.platform_type)
- with mock.patch(MPATH + 'util.read_dmi_data', return_value='VMWare'):
+ with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'):
with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd:
with mock.patch(MPATH + 'transport_iso9660') as m_iso9660:
m_iso9660.return_value = NOT_FOUND
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index 9d82bda9..32f3274a 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -87,7 +87,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch('cloudinit.util.get_cmdline')
@mock.patch('os.path.exists')
- @mock.patch('cloudinit.util.read_dmi_data')
+ @mock.patch('cloudinit.dmi.read_dmi_data')
def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists,
m_get_cmdline):
self.install_mocks(
@@ -105,7 +105,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch('cloudinit.util.get_cmdline')
@mock.patch('os.path.exists')
- @mock.patch('cloudinit.util.read_dmi_data')
+ @mock.patch('cloudinit.dmi.read_dmi_data')
def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists,
m_get_cmdline):
"""
@@ -121,7 +121,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch('cloudinit.util.get_cmdline')
@mock.patch('os.path.exists')
- @mock.patch('cloudinit.util.read_dmi_data')
+ @mock.patch('cloudinit.dmi.read_dmi_data')
def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists,
m_get_cmdline):
"""
@@ -136,7 +136,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch('cloudinit.util.get_cmdline')
@mock.patch('os.path.exists')
- @mock.patch('cloudinit.util.read_dmi_data')
+ @mock.patch('cloudinit.dmi.read_dmi_data')
def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists,
m_get_cmdline):
"""
diff --git a/tests/unittests/test_distros/test_gentoo.py b/tests/unittests/test_distros/test_gentoo.py
new file mode 100644
index 00000000..37a4f51f
--- /dev/null
+++ b/tests/unittests/test_distros/test_gentoo.py
@@ -0,0 +1,26 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import util
+from cloudinit import atomic_helper
+from cloudinit.tests.helpers import CiTestCase
+from . import _get_distro
+
+
+class TestGentoo(CiTestCase):
+
+ def test_write_hostname(self):
+ distro = _get_distro("gentoo")
+ hostname = "myhostname"
+ hostfile = self.tmp_path("hostfile")
+ distro._write_hostname(hostname, hostfile)
+ self.assertEqual('hostname="myhostname"\n', util.load_file(hostfile))
+
+ def test_write_existing_hostname_with_comments(self):
+ distro = _get_distro("gentoo")
+ hostname = "myhostname"
+ contents = '#This is the hostname\nhostname="localhost"'
+ hostfile = self.tmp_path("hostfile")
+ atomic_helper.write_file(hostfile, contents, omode="w")
+ distro._write_hostname(hostname, hostfile)
+ self.assertEqual('#This is the hostname\nhostname="myhostname"\n',
+ util.load_file(hostfile))
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 8d7b09c8..a1df066a 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -514,7 +514,9 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
DEVICE=eth0
IPV6ADDR=2607:f0d0:1002:0011::2/64
IPV6INIT=yes
+ IPV6_AUTOCONF=no
IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
+ IPV6_FORCE_ACCEPT_RA=no
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
@@ -539,6 +541,87 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
V1_NET_CFG_IPV6,
expected_cfgs=expected_cfgs.copy())
+ def test_vlan_render_unsupported(self):
+ """Render officially unsupported vlan names."""
+ cfg = {
+ 'version': 2,
+ 'ethernets': {
+ 'eth0': {'addresses': ["192.10.1.2/24"],
+ 'match': {'macaddress': "00:16:3e:60:7c:df"}}},
+ 'vlans': {
+ 'infra0': {'addresses': ["10.0.1.2/16"],
+ 'id': 1001, 'link': 'eth0'}},
+ }
+ expected_cfgs = {
+ self.ifcfg_path('eth0'): dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth0
+ HWADDR=00:16:3e:60:7c:df
+ IPADDR=192.10.1.2
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.ifcfg_path('infra0'): dedent("""\
+ BOOTPROTO=none
+ DEVICE=infra0
+ IPADDR=10.0.1.2
+ NETMASK=255.255.0.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ PHYSDEV=eth0
+ USERCTL=no
+ VLAN=yes
+ """),
+ self.control_path(): dedent("""\
+ NETWORKING=yes
+ """),
+ }
+ self._apply_and_verify(
+ self.distro.apply_network_config, cfg,
+ expected_cfgs=expected_cfgs)
+
+ def test_vlan_render(self):
+ cfg = {
+ 'version': 2,
+ 'ethernets': {
+ 'eth0': {'addresses': ["192.10.1.2/24"]}},
+ 'vlans': {
+ 'eth0.1001': {'addresses': ["10.0.1.2/16"],
+ 'id': 1001, 'link': 'eth0'}},
+ }
+ expected_cfgs = {
+ self.ifcfg_path('eth0'): dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth0
+ IPADDR=192.10.1.2
+ NETMASK=255.255.255.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ self.ifcfg_path('eth0.1001'): dedent("""\
+ BOOTPROTO=none
+ DEVICE=eth0.1001
+ IPADDR=10.0.1.2
+ NETMASK=255.255.0.0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ PHYSDEV=eth0
+ USERCTL=no
+ VLAN=yes
+ """),
+ self.control_path(): dedent("""\
+ NETWORKING=yes
+ """),
+ }
+ self._apply_and_verify(
+ self.distro.apply_network_config, cfg,
+ expected_cfgs=expected_cfgs)
+
class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py
index 68ea0083..7d940750 100644
--- a/tests/unittests/test_distros/test_resolv.py
+++ b/tests/unittests/test_distros/test_resolv.py
@@ -1,12 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.distros.parsers import resolv_conf
-from cloudinit.distros import rhel_util
from cloudinit.tests.helpers import TestCase
import re
-import tempfile
BASE_RESOLVE = '''
@@ -24,10 +22,6 @@ class TestResolvHelper(TestCase):
rp_r = str(rp).strip()
self.assertEqual(BASE_RESOLVE, rp_r)
- def test_write_works(self):
- with tempfile.NamedTemporaryFile() as fh:
- rhel_util.update_resolve_conf_file(fh.name, [], [])
-
def test_local_domain(self):
rp = resolv_conf.ResolvConf(BASE_RESOLVE)
self.assertIsNone(rp.local_domain)
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 9314b244..1d8aaf18 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -20,6 +20,8 @@ UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu "
UNAME_PPC64EL = ("Linux diamond 4.4.0-83-generic #106-Ubuntu SMP "
"Mon Jun 26 17:53:54 UTC 2017 "
"ppc64le ppc64le ppc64le GNU/Linux")
+UNAME_FREEBSD = ("FreeBSD fbsd12-1 12.1-RELEASE-p10 "
+ "FreeBSD 12.1-RELEASE-p10 GENERIC amd64")
BLKID_EFI_ROOT = """
DEVNAME=/dev/sda1
@@ -80,6 +82,7 @@ MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0}
MOCK_VIRT_IS_VM_OTHER = {'name': 'detect_virt', 'RET': 'vm-other', 'ret': 0}
MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0}
MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0}
+MOCK_UNAME_IS_FREEBSD = {'name': 'uname', 'out': UNAME_FREEBSD, 'ret': 0}
shell_true = 0
shell_false = 1
@@ -143,6 +146,8 @@ class DsIdentifyBase(CiTestCase):
'out': 'No value found', 'ret': 1},
{'name': 'dmi_decode', 'ret': 1,
'err': 'No dmidecode program. ERROR.'},
+ {'name': 'get_kenv_field', 'ret': 1,
+ 'err': 'No kenv program. ERROR.'},
]
written = [d['name'] for d in mocks]
@@ -257,6 +262,10 @@ class TestDsIdentify(DsIdentifyBase):
"""EC2: bobrightbox.com in product_serial is not brightbox'"""
self._test_ds_not_found('Ec2-brightbox-negative')
+ def test_freebsd_nocloud(self):
+ """NoCloud identified on FreeBSD via label by geom."""
+ self._test_ds_found('NoCloud-fbsd')
+
def test_gce_by_product_name(self):
"""GCE identifies itself with product_name."""
self._test_ds_found('GCE')
@@ -644,14 +653,22 @@ class TestDsIdentify(DsIdentifyBase):
class TestBSDNoSys(DsIdentifyBase):
"""Test *BSD code paths
- FreeBSD doesn't have /sys so we use dmidecode(8) here
- It also doesn't have systemd-detect-virt(8), so we use sysctl(8) to query
+ FreeBSD doesn't have /sys so we use kenv(1) here.
+ Other BSD systems fallback to dmidecode(8).
+ BSDs also doesn't have systemd-detect-virt(8), so we use sysctl(8) to query
kern.vm_guest, and optionally map it"""
- def test_dmi_decode(self):
+ def test_dmi_kenv(self):
+ """Test that kenv(1) works on systems which don't have /sys
+
+ This will be used on FreeBSD systems.
+ """
+ self._test_ds_found('Hetzner-kenv')
+
+ def test_dmi_dmidecode(self):
"""Test that dmidecode(8) works on systems which don't have /sys
- This will be used on *BSD systems.
+ This will be used on all other BSD systems.
"""
self._test_ds_found('Hetzner-dmidecode')
@@ -725,6 +742,26 @@ def blkid_out(disks=None):
return '\n'.join(lines)
+def geom_out(disks=None):
+ """Convert a list of disk dictionaries into geom content.
+
+ geom called with -a (provider) and -s (script-friendly), will produce the
+ following output:
+
+ gpt/gptboot0 N/A vtbd1p1
+ gpt/swap0 N/A vtbd1p2
+ iso9660/cidata N/A vtbd2
+ """
+ if disks is None:
+ disks = []
+ lines = []
+ for disk in disks:
+ lines.append("%s/%s N/A %s" % (
+ disk["TYPE"], disk["LABEL"], disk["DEVNAME"]))
+ lines.append("")
+ return '\n'.join(lines)
+
+
def _print_run_output(rc, out, err, cfg, files):
"""A helper to print return of TestDsIdentify.
@@ -807,6 +844,19 @@ VALID_CFG = {
'dev/vdb': 'pretend iso content for cidata\n',
}
},
+ 'NoCloud-fbsd': {
+ 'ds': 'NoCloud',
+ 'mocks': [
+ MOCK_VIRT_IS_KVM,
+ MOCK_UNAME_IS_FREEBSD,
+ {'name': 'geom', 'ret': 0,
+ 'out': geom_out(
+ [{'DEVNAME': 'vtbd', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])},
+ ],
+ 'files': {
+ '/dev/vtdb': 'pretend iso content for cidata\n',
+ }
+ },
'NoCloudUpper': {
'ds': 'NoCloud',
'mocks': [
@@ -986,6 +1036,13 @@ VALID_CFG = {
'ds': 'Hetzner',
'files': {P_SYS_VENDOR: 'Hetzner\n'},
},
+ 'Hetzner-kenv': {
+ 'ds': 'Hetzner',
+ 'mocks': [
+ MOCK_UNAME_IS_FREEBSD,
+ {'name': 'get_kenv_field', 'ret': 0, 'RET': 'Hetzner'}
+ ],
+ },
'Hetzner-dmidecode': {
'ds': 'Hetzner',
'mocks': [
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index 21011204..b2181992 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -214,7 +214,7 @@ class TestLxdMaybeCleanupDefault(t_help.CiTestCase):
"""deletion of network should occur if create is True."""
cc_lxd.maybe_cleanup_default(
net_name=self.defnet, did_init=True, create=True, attach=False)
- m_lxc.assert_called_once_with(["network", "delete", self.defnet])
+ m_lxc.assert_called_with(["network", "delete", self.defnet])
@mock.patch("cloudinit.config.cc_lxd._lxc")
def test_device_removed_if_attach_true(self, m_lxc):
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index 93b24fdc..4ac49424 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -4,72 +4,102 @@ import sys
from cloudinit.config import cc_power_state_change as psc
+from cloudinit import distros
+from cloudinit import helpers
+
from cloudinit.tests import helpers as t_help
from cloudinit.tests.helpers import mock
class TestLoadPowerState(t_help.TestCase):
+ def setUp(self):
+ super(TestLoadPowerState, self).setUp()
+ cls = distros.fetch('ubuntu')
+ paths = helpers.Paths({})
+ self.dist = cls('ubuntu', {}, paths)
+
def test_no_config(self):
# completely empty config should mean do nothing
- (cmd, _timeout, _condition) = psc.load_power_state({}, 'ubuntu')
+ (cmd, _timeout, _condition) = psc.load_power_state({}, self.dist)
self.assertIsNone(cmd)
def test_irrelevant_config(self):
# no power_state field in config should return None for cmd
(cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'},
- 'ubuntu')
+ self.dist)
self.assertIsNone(cmd)
def test_invalid_mode(self):
+
cfg = {'power_state': {'mode': 'gibberish'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
+ self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
cfg = {'power_state': {'mode': ''}}
- self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
+ self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_empty_mode(self):
cfg = {'power_state': {'message': 'goodbye'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
+ self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_valid_modes(self):
cfg = {'power_state': {}}
for mode in ('halt', 'poweroff', 'reboot'):
cfg['power_state']['mode'] = mode
- check_lps_ret(psc.load_power_state(cfg, 'ubuntu'), mode=mode)
+ check_lps_ret(psc.load_power_state(cfg, self.dist), mode=mode)
def test_invalid_delay(self):
cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}}
- self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
+ self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_valid_delay(self):
cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
for delay in ("now", "+1", "+30"):
cfg['power_state']['delay'] = delay
- check_lps_ret(psc.load_power_state(cfg, 'ubuntu'))
+ check_lps_ret(psc.load_power_state(cfg, self.dist))
def test_message_present(self):
cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}}
- ret = psc.load_power_state(cfg, 'ubuntu')
- check_lps_ret(psc.load_power_state(cfg, 'ubuntu'))
+ ret = psc.load_power_state(cfg, self.dist)
+ check_lps_ret(psc.load_power_state(cfg, self.dist))
self.assertIn(cfg['power_state']['message'], ret[0])
def test_no_message(self):
# if message is not present, then no argument should be passed for it
cfg = {'power_state': {'mode': 'poweroff'}}
- (cmd, _timeout, _condition) = psc.load_power_state(cfg, 'ubuntu')
+ (cmd, _timeout, _condition) = psc.load_power_state(cfg, self.dist)
self.assertNotIn("", cmd)
- check_lps_ret(psc.load_power_state(cfg, 'ubuntu'))
+ check_lps_ret(psc.load_power_state(cfg, self.dist))
self.assertTrue(len(cmd) == 3)
def test_condition_null_raises(self):
cfg = {'power_state': {'mode': 'poweroff', 'condition': None}}
- self.assertRaises(TypeError, psc.load_power_state, cfg, 'ubuntu')
+ self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist)
def test_condition_default_is_true(self):
cfg = {'power_state': {'mode': 'poweroff'}}
- _cmd, _timeout, cond = psc.load_power_state(cfg, 'ubuntu')
+ _cmd, _timeout, cond = psc.load_power_state(cfg, self.dist)
self.assertEqual(cond, True)
+ def test_freebsd_poweroff_uses_lowercase_p(self):
+ cls = distros.fetch('freebsd')
+ paths = helpers.Paths({})
+ freebsd = cls('freebsd', {}, paths)
+ cfg = {'power_state': {'mode': 'poweroff'}}
+ ret = psc.load_power_state(cfg, freebsd)
+ self.assertIn('-p', ret[0])
+
+ def test_alpine_delay(self):
+ # alpine takes delay in seconds.
+ cls = distros.fetch('alpine')
+ paths = helpers.Paths({})
+ alpine = cls('alpine', {}, paths)
+ cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}}
+ for delay, value in (('now', 0), ("+1", 60), ("+30", 1800)):
+ cfg['power_state']['delay'] = delay
+ ret = psc.load_power_state(cfg, alpine)
+ self.assertEqual('-d', ret[0][1])
+ self.assertEqual(str(value), ret[0][2])
+
class TestCheckCondition(t_help.TestCase):
def cmd_with_exit(self, rc):
diff --git a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py b/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py
new file mode 100644
index 00000000..e13b7793
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py
@@ -0,0 +1,109 @@
+from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci
+
+from cloudinit import util
+
+from cloudinit.tests import helpers as t_help
+from cloudinit.tests.helpers import mock
+
+from textwrap import dedent
+import logging
+
+LOG = logging.getLogger(__name__)
+MPATH = "cloudinit.config.cc_refresh_rmc_and_interface"
+NET_INFO = {
+ 'lo': {'ipv4': [{'ip': '127.0.0.1',
+ 'bcast': '', 'mask': '255.0.0.0',
+ 'scope': 'host'}],
+ 'ipv6': [{'ip': '::1/128',
+ 'scope6': 'host'}], 'hwaddr': '',
+ 'up': 'True'},
+ 'env2': {'ipv4': [{'ip': '8.0.0.19',
+ 'bcast': '8.0.0.255', 'mask': '255.255.255.0',
+ 'scope': 'global'}],
+ 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64',
+ 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20',
+ 'up': 'True'},
+ 'env3': {'ipv4': [{'ip': '90.0.0.14',
+ 'bcast': '90.0.0.255', 'mask': '255.255.255.0',
+ 'scope': 'global'}],
+ 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64',
+ 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21',
+ 'up': 'True'},
+ 'env4': {'ipv4': [{'ip': '9.114.23.7',
+ 'bcast': '9.114.23.255', 'mask': '255.255.255.0',
+ 'scope': 'global'}],
+ 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64',
+ 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22',
+ 'up': 'True'},
+ 'env5': {'ipv4': [],
+ 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64',
+ 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c',
+ 'up': 'True'}}
+
+
+class TestRsctNodeFile(t_help.CiTestCase):
+ def test_disable_ipv6_interface(self):
+ """test parsing of iface files."""
+ fname = self.tmp_path("iface-eth5")
+ util.write_file(fname, dedent("""\
+ BOOTPROTO=static
+ DEVICE=eth5
+ HWADDR=42:20:86:df:fa:4c
+ IPV6INIT=yes
+ IPADDR6=fe80::9c26:c3ff:fea4:62c8/64
+ IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64
+ NM_CONTROLLED=yes
+ ONBOOT=yes
+ STARTMODE=auto
+ TYPE=Ethernet
+ USERCTL=no
+ """))
+
+ ccrmci.disable_ipv6(fname)
+ self.assertEqual(dedent("""\
+ BOOTPROTO=static
+ DEVICE=eth5
+ HWADDR=42:20:86:df:fa:4c
+ ONBOOT=yes
+ STARTMODE=auto
+ TYPE=Ethernet
+ USERCTL=no
+ NM_CONTROLLED=no
+ """), util.load_file(fname))
+
+ @mock.patch(MPATH + '.refresh_rmc')
+ @mock.patch(MPATH + '.restart_network_manager')
+ @mock.patch(MPATH + '.disable_ipv6')
+ @mock.patch(MPATH + '.refresh_ipv6')
+ @mock.patch(MPATH + '.netinfo.netdev_info')
+ @mock.patch(MPATH + '.subp.which')
+ def test_handle(self, m_refresh_rmc,
+ m_netdev_info, m_refresh_ipv6, m_disable_ipv6,
+ m_restart_nm, m_which):
+ """Basic test of handle."""
+ m_netdev_info.return_value = NET_INFO
+ m_which.return_value = '/opt/rsct/bin/rmcctrl'
+ ccrmci.handle(
+ "refresh_rmc_and_interface", None, None, None, None)
+ self.assertEqual(1, m_netdev_info.call_count)
+ m_refresh_ipv6.assert_called_with('env5')
+ m_disable_ipv6.assert_called_with(
+ '/etc/sysconfig/network-scripts/ifcfg-env5')
+ self.assertEqual(1, m_restart_nm.call_count)
+ self.assertEqual(1, m_refresh_rmc.call_count)
+
+ @mock.patch(MPATH + '.netinfo.netdev_info')
+ def test_find_ipv6(self, m_netdev_info):
+ """find_ipv6_ifaces parses netdev_info returning those with ipv6"""
+ m_netdev_info.return_value = NET_INFO
+ found = ccrmci.find_ipv6_ifaces()
+ self.assertEqual(['env5'], found)
+
+ @mock.patch(MPATH + '.subp.subp')
+ def test_refresh_ipv6(self, m_subp):
+ """refresh_ipv6 should ip down and up the interface."""
+ iface = "myeth0"
+ ccrmci.refresh_ipv6(iface)
+ m_subp.assert_has_calls([
+ mock.call(['ip', 'link', 'set', iface, 'down']),
+ mock.call(['ip', 'link', 'set', iface, 'up'])])
diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py
index db9a0414..28d55072 100644
--- a/tests/unittests/test_handler/test_handler_resizefs.py
+++ b/tests/unittests/test_handler/test_handler_resizefs.py
@@ -6,8 +6,8 @@ from cloudinit.config.cc_resizefs import (
from collections import namedtuple
import logging
-import textwrap
+from cloudinit.subp import ProcessExecutionError
from cloudinit.tests.helpers import (
CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call)
@@ -22,44 +22,41 @@ class TestResizefs(CiTestCase):
super(TestResizefs, self).setUp()
self.name = "resizefs"
- @mock.patch('cloudinit.config.cc_resizefs._get_dumpfs_output')
- @mock.patch('cloudinit.config.cc_resizefs._get_gpart_output')
- def test_skip_ufs_resize(self, gpart_out, dumpfs_out):
+ @mock.patch('cloudinit.subp.subp')
+ def test_skip_ufs_resize(self, m_subp):
fs_type = "ufs"
resize_what = "/"
devpth = "/dev/da0p2"
- dumpfs_out.return_value = (
- "# newfs command for / (/dev/label/rootfs)\n"
- "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 "
- "-f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 "
- "-o time -s 58719232 /dev/label/rootfs\n")
- gpart_out.return_value = textwrap.dedent("""\
- => 40 62914480 da0 GPT (30G)
- 40 1024 1 freebsd-boot (512K)
- 1064 58719232 2 freebsd-ufs (28G)
- 58720296 3145728 3 freebsd-swap (1.5G)
- 61866024 1048496 - free - (512M)
- """)
+ err = ("growfs: requested size 2.0GB is not larger than the "
+ "current filesystem size 2.0GB\n")
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
res = can_skip_resize(fs_type, resize_what, devpth)
self.assertTrue(res)
- @mock.patch('cloudinit.config.cc_resizefs._get_dumpfs_output')
- @mock.patch('cloudinit.config.cc_resizefs._get_gpart_output')
- def test_skip_ufs_resize_roundup(self, gpart_out, dumpfs_out):
+ @mock.patch('cloudinit.subp.subp')
+ def test_cannot_skip_ufs_resize(self, m_subp):
fs_type = "ufs"
resize_what = "/"
devpth = "/dev/da0p2"
- dumpfs_out.return_value = (
- "# newfs command for / (/dev/label/rootfs)\n"
- "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 "
- "-f 4096 -g 16384 -h 64 -i 8192 -j -k 368 -m 8 "
- "-o time -s 297080 /dev/label/rootfs\n")
- gpart_out.return_value = textwrap.dedent("""\
- => 34 297086 da0 GPT (145M)
- 34 297086 1 freebsd-ufs (145M)
- """)
+ m_subp.return_value = (
+ ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"),
+ ("growfs: no room to allocate last cylinder group; "
+ "leaving 364KB unused\n")
+ )
res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertTrue(res)
+ self.assertFalse(res)
+
+ @mock.patch('cloudinit.subp.subp')
+ def test_cannot_skip_ufs_growfs_exception(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ with self.assertRaises(ProcessExecutionError):
+ can_skip_resize(fs_type, resize_what, devpth)
def test_can_skip_resize_ext(self):
self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1'))
diff --git a/tests/unittests/test_handler/test_handler_resizefs_vyos.py b/tests/unittests/test_handler/test_handler_resizefs_vyos.py
index d1fafff7..c18ab1ea 100644
--- a/tests/unittests/test_handler/test_handler_resizefs_vyos.py
+++ b/tests/unittests/test_handler/test_handler_resizefs_vyos.py
@@ -6,8 +6,8 @@ from cloudinit.config.cc_resizefs_vyos import (
from collections import namedtuple
import logging
-import textwrap
+from cloudinit.subp import ProcessExecutionError
from cloudinit.tests.helpers import (
CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call)
@@ -22,44 +22,41 @@ class TestResizefs(CiTestCase):
super(TestResizefs, self).setUp()
self.name = "resizefs"
- @mock.patch('cloudinit.config.cc_resizefs_vyos._get_dumpfs_output')
- @mock.patch('cloudinit.config.cc_resizefs_vyos._get_gpart_output')
- def test_skip_ufs_resize(self, gpart_out, dumpfs_out):
+ @mock.patch('cloudinit.subp.subp')
+ def test_skip_ufs_resize(self, m_subp):
fs_type = "ufs"
resize_what = "/"
devpth = "/dev/da0p2"
- dumpfs_out.return_value = (
- "# newfs command for / (/dev/label/rootfs)\n"
- "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 "
- "-f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 "
- "-o time -s 58719232 /dev/label/rootfs\n")
- gpart_out.return_value = textwrap.dedent("""\
- => 40 62914480 da0 GPT (30G)
- 40 1024 1 freebsd-boot (512K)
- 1064 58719232 2 freebsd-ufs (28G)
- 58720296 3145728 3 freebsd-swap (1.5G)
- 61866024 1048496 - free - (512M)
- """)
+ err = ("growfs: requested size 2.0GB is not larger than the "
+ "current filesystem size 2.0GB\n")
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
res = can_skip_resize(fs_type, resize_what, devpth)
self.assertTrue(res)
- @mock.patch('cloudinit.config.cc_resizefs_vyos._get_dumpfs_output')
- @mock.patch('cloudinit.config.cc_resizefs_vyos._get_gpart_output')
- def test_skip_ufs_resize_roundup(self, gpart_out, dumpfs_out):
+ @mock.patch('cloudinit.subp.subp')
+ def test_cannot_skip_ufs_resize(self, m_subp):
fs_type = "ufs"
resize_what = "/"
devpth = "/dev/da0p2"
- dumpfs_out.return_value = (
- "# newfs command for / (/dev/label/rootfs)\n"
- "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 "
- "-f 4096 -g 16384 -h 64 -i 8192 -j -k 368 -m 8 "
- "-o time -s 297080 /dev/label/rootfs\n")
- gpart_out.return_value = textwrap.dedent("""\
- => 34 297086 da0 GPT (145M)
- 34 297086 1 freebsd-ufs (145M)
- """)
+ m_subp.return_value = (
+ ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"),
+ ("growfs: no room to allocate last cylinder group; "
+ "leaving 364KB unused\n")
+ )
res = can_skip_resize(fs_type, resize_what, devpth)
- self.assertTrue(res)
+ self.assertFalse(res)
+
+ @mock.patch('cloudinit.subp.subp')
+ def test_cannot_skip_ufs_growfs_exception(self, m_subp):
+ fs_type = "ufs"
+ resize_what = "/"
+ devpth = "/dev/da0p2"
+ err = "growfs: /dev/da0p2 is not clean - run fsck.\n"
+ exception = ProcessExecutionError(stderr=err, exit_code=1)
+ m_subp.side_effect = exception
+ with self.assertRaises(ProcessExecutionError):
+ can_skip_resize(fs_type, resize_what, devpth)
def test_can_skip_resize_ext(self):
self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1'))
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
index 44292571..15aa77bb 100644
--- a/tests/unittests/test_handler/test_schema.py
+++ b/tests/unittests/test_handler/test_schema.py
@@ -9,9 +9,9 @@ from cloudinit.util import write_file
from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from copy import copy
+import itertools
import os
import pytest
-from io import StringIO
from pathlib import Path
from textwrap import dedent
from yaml import safe_load
@@ -400,50 +400,97 @@ class AnnotatedCloudconfigFileTest(CiTestCase):
annotated_cloudconfig_file(parsed_config, content, schema_errors))
-class MainTest(CiTestCase):
+class TestMain:
- def test_main_missing_args(self):
+ exclusive_combinations = itertools.combinations(
+ ["--system", "--docs all", "--config-file something"], 2
+ )
+
+ @pytest.mark.parametrize("params", exclusive_combinations)
+ def test_main_exclusive_args(self, params, capsys):
+ """Main exits non-zero and error on required exclusive args."""
+ params = list(itertools.chain(*[a.split() for a in params]))
+ with mock.patch('sys.argv', ['mycmd'] + params):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+
+ _out, err = capsys.readouterr()
+ expected = (
+ 'Expected one of --config-file, --system or --docs arguments\n'
+ )
+ assert expected == err
+
+ def test_main_missing_args(self, capsys):
"""Main exits non-zero and reports an error on missing parameters."""
with mock.patch('sys.argv', ['mycmd']):
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- main()
- self.assertEqual(1, context_manager.exception.code)
- self.assertEqual(
- 'Expected either --config-file argument or --docs\n',
- m_stderr.getvalue())
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+
+ _out, err = capsys.readouterr()
+ expected = (
+ 'Expected one of --config-file, --system or --docs arguments\n'
+ )
+ assert expected == err
- def test_main_absent_config_file(self):
+ def test_main_absent_config_file(self, capsys):
"""Main exits non-zero when config file is absent."""
myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE']
with mock.patch('sys.argv', myargs):
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- main()
- self.assertEqual(1, context_manager.exception.code)
- self.assertEqual(
- 'Configfile NOT_A_FILE does not exist\n',
- m_stderr.getvalue())
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ assert 'Configfile NOT_A_FILE does not exist\n' == err
- def test_main_prints_docs(self):
+ def test_main_prints_docs(self, capsys):
"""When --docs parameter is provided, main generates documentation."""
myargs = ['mycmd', '--docs', 'all']
with mock.patch('sys.argv', myargs):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, main(), 'Expected 0 exit code')
- self.assertIn('\nNTP\n---\n', m_stdout.getvalue())
- self.assertIn('\nRuncmd\n------\n', m_stdout.getvalue())
+ assert 0 == main(), 'Expected 0 exit code'
+ out, _err = capsys.readouterr()
+ assert '\nNTP\n---\n' in out
+ assert '\nRuncmd\n------\n' in out
- def test_main_validates_config_file(self):
+ def test_main_validates_config_file(self, tmpdir, capsys):
"""When --config-file parameter is provided, main validates schema."""
- myyaml = self.tmp_path('my.yaml')
- myargs = ['mycmd', '--config-file', myyaml]
- write_file(myyaml, b'#cloud-config\nntp:') # shortest ntp schema
+ myyaml = tmpdir.join('my.yaml')
+ myargs = ['mycmd', '--config-file', myyaml.strpath]
+ myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema
with mock.patch('sys.argv', myargs):
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, main(), 'Expected 0 exit code')
- self.assertIn(
- 'Valid cloud-config file {0}'.format(myyaml), m_stdout.getvalue())
+ assert 0 == main(), 'Expected 0 exit code'
+ out, _err = capsys.readouterr()
+ assert 'Valid cloud-config: {0}\n'.format(myyaml) == out
+
+ @mock.patch('cloudinit.config.schema.read_cfg_paths')
+ @mock.patch('cloudinit.config.schema.os.getuid', return_value=0)
+ def test_main_validates_system_userdata(
+ self, m_getuid, m_read_cfg_paths, capsys, paths
+ ):
+ """When --system is provided, main validates system userdata."""
+ m_read_cfg_paths.return_value = paths
+ ud_file = paths.get_ipath_cur("userdata_raw")
+ write_file(ud_file, b'#cloud-config\nntp:')
+ myargs = ['mycmd', '--system']
+ with mock.patch('sys.argv', myargs):
+ assert 0 == main(), 'Expected 0 exit code'
+ out, _err = capsys.readouterr()
+ assert 'Valid cloud-config: system userdata\n' == out
+
+ @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000)
+ def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths):
+ """Non-root user can't use --system param"""
+ myargs = ['mycmd', '--system']
+ with mock.patch('sys.argv', myargs):
+ with pytest.raises(SystemExit) as context_manager:
+ main()
+ assert 1 == context_manager.value.code
+ _out, err = capsys.readouterr()
+ expected = (
+ 'Unable to read system userdata as non-root user. Try using sudo\n'
+ )
+ assert expected == err
class CloudTestsIntegrationTest(CiTestCase):
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 54cc8469..70453683 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -752,7 +752,9 @@ IPADDR=172.19.1.34
IPV6ADDR=2001:DB8::10/64
IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
IPV6INIT=yes
+IPV6_AUTOCONF=no
IPV6_DEFAULTGW=2001:DB8::1
+IPV6_FORCE_ACCEPT_RA=no
NETMASK=255.255.252.0
NM_CONTROLLED=no
ONBOOT=yes
@@ -910,7 +912,7 @@ NETWORK_CONFIGS = {
# Physical interfaces.
- type: physical
name: eth99
- mac_address: "c0:d6:9f:2c:e8:80"
+ mac_address: c0:d6:9f:2c:e8:80
subnets:
- type: dhcp4
- type: static
@@ -926,7 +928,7 @@ NETWORK_CONFIGS = {
metric: 10000
- type: physical
name: eth1
- mac_address: "cf:d6:af:48:e8:80"
+ mac_address: cf:d6:af:48:e8:80
- type: nameserver
address:
- 1.2.3.4
@@ -1027,6 +1029,8 @@ NETWORK_CONFIGS = {
IPADDR=192.168.14.2
IPV6ADDR=2001:1::1/64
IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
@@ -1253,6 +1257,33 @@ NETWORK_CONFIGS = {
"""),
},
},
+ 'static6': {
+ 'yaml': textwrap.dedent("""\
+ version: 1
+ config:
+ - type: 'physical'
+ name: 'iface0'
+ accept-ra: 'no'
+ subnets:
+ - type: 'static6'
+ address: 2001:1::1/64
+ """).rstrip(' '),
+ 'expected_sysconfig_rhel': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=none
+ DEVICE=iface0
+ IPV6ADDR=2001:1::1/64
+ IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
+ DEVICE=iface0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+ },
'dhcpv6_stateless': {
'expected_eni': textwrap.dedent("""\
auto lo
@@ -1347,6 +1378,89 @@ NETWORK_CONFIGS = {
"""),
},
},
+ 'wakeonlan_disabled': {
+ 'expected_eni': textwrap.dedent("""\
+ auto lo
+ iface lo inet loopback
+
+ auto iface0
+ iface iface0 inet dhcp
+ """).rstrip(' '),
+ 'expected_netplan': textwrap.dedent("""
+ network:
+ ethernets:
+ iface0:
+ dhcp4: true
+ wakeonlan: false
+ version: 2
+ """),
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp4
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=iface0
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+ 'yaml_v2': textwrap.dedent("""\
+ version: 2
+ ethernets:
+ iface0:
+ dhcp4: true
+ wakeonlan: false
+ """).rstrip(' '),
+ },
+ 'wakeonlan_enabled': {
+ 'expected_eni': textwrap.dedent("""\
+ auto lo
+ iface lo inet loopback
+
+ auto iface0
+ iface iface0 inet dhcp
+ ethernet-wol g
+ """).rstrip(' '),
+ 'expected_netplan': textwrap.dedent("""
+ network:
+ ethernets:
+ iface0:
+ dhcp4: true
+ wakeonlan: true
+ version: 2
+ """),
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp4
+ ETHTOOL_OPTS="wol g"
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ DEVICE=iface0
+ ETHTOOL_OPTS="wol g"
+ NM_CONTROLLED=no
+ ONBOOT=yes
+ TYPE=Ethernet
+ USERCTL=no
+ """),
+ },
+ 'yaml_v2': textwrap.dedent("""\
+ version: 2
+ ethernets:
+ iface0:
+ dhcp4: true
+ wakeonlan: true
+ """).rstrip(' '),
+ },
'all': {
'expected_eni': ("""\
auto lo
@@ -1633,7 +1747,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=bond0
- TYPE=Ethernet
USERCTL=no
VLAN=yes"""),
'ifcfg-br0': textwrap.dedent("""\
@@ -1644,6 +1757,8 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
IPADDR=192.168.14.2
IPV6ADDR=2001:1::1/64
IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
IPV6_DEFAULTGW=2001:4800:78ff:1b::1
MACADDR=bb:bb:bb:bb:bb:aa
NETMASK=255.255.255.0
@@ -1677,7 +1792,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=eth0
- TYPE=Ethernet
USERCTL=no
VLAN=yes"""),
'ifcfg-eth1': textwrap.dedent("""\
@@ -1745,26 +1859,26 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
# Physical interfaces.
- type: physical
name: eth0
- mac_address: "c0:d6:9f:2c:e8:80"
+ mac_address: c0:d6:9f:2c:e8:80
- type: physical
name: eth1
- mac_address: "aa:d6:9f:2c:e8:80"
+ mac_address: aa:d6:9f:2c:e8:80
- type: physical
name: eth2
- mac_address: "c0:bb:9f:2c:e8:80"
+ mac_address: c0:bb:9f:2c:e8:80
- type: physical
name: eth3
- mac_address: "66:bb:9f:2c:e8:80"
+ mac_address: 66:bb:9f:2c:e8:80
- type: physical
name: eth4
- mac_address: "98:bb:9f:2c:e8:80"
+ mac_address: 98:bb:9f:2c:e8:80
# specify how ifupdown should treat iface
# control is one of ['auto', 'hotplug', 'manual']
# with manual meaning ifup/ifdown should not affect the iface
# useful for things like iscsi root + dhcp
- type: physical
name: eth5
- mac_address: "98:bb:9f:2c:e8:8a"
+ mac_address: 98:bb:9f:2c:e8:8a
subnets:
- type: dhcp
control: manual
@@ -1795,7 +1909,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
name: bond0
# if 'mac_address' is omitted, the MAC is taken from
# the first slave.
- mac_address: "aa:bb:cc:dd:ee:ff"
+ mac_address: aa:bb:cc:dd:ee:ff
bond_interfaces:
- eth1
- eth2
@@ -1890,13 +2004,13 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
config:
- type: physical
name: bond0s0
- mac_address: "aa:bb:cc:dd:e8:00"
+ mac_address: aa:bb:cc:dd:e8:00
- type: physical
name: bond0s1
- mac_address: "aa:bb:cc:dd:e8:01"
+ mac_address: aa:bb:cc:dd:e8:01
- type: bond
name: bond0
- mac_address: "aa:bb:cc:dd:e8:ff"
+ mac_address: aa:bb:cc:dd:e8:ff
mtu: 9000
bond_interfaces:
- bond0s0
@@ -2044,12 +2158,12 @@ iface bond0 inet6 static
eth0:
match:
driver: "virtio_net"
- macaddress: "aa:bb:cc:dd:e8:00"
+ macaddress: aa:bb:cc:dd:e8:00
vf0:
set-name: vf0
match:
driver: "e1000"
- macaddress: "aa:bb:cc:dd:e8:01"
+ macaddress: aa:bb:cc:dd:e8:01
bonds:
bond0:
addresses:
@@ -2174,6 +2288,8 @@ iface bond0 inet6 static
IPADDR1=192.168.1.2
IPV6ADDR=2001:1::1/92
IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
MTU=9000
NETMASK=255.255.255.0
NETMASK1=255.255.255.0
@@ -2223,7 +2339,7 @@ iface bond0 inet6 static
config:
- type: physical
name: en0
- mac_address: "aa:bb:cc:dd:e8:00"
+ mac_address: aa:bb:cc:dd:e8:00
- type: vlan
mtu: 2222
name: en0.99
@@ -2279,6 +2395,8 @@ iface bond0 inet6 static
IPADDR1=192.168.1.2
IPV6ADDR=2001:1::bbbb/96
IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
IPV6_DEFAULTGW=2001:1::1
MTU=2222
NETMASK=255.255.255.0
@@ -2286,7 +2404,6 @@ iface bond0 inet6 static
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=en0
- TYPE=Ethernet
USERCTL=no
VLAN=yes"""),
},
@@ -2297,13 +2414,13 @@ iface bond0 inet6 static
config:
- type: physical
name: eth0
- mac_address: "52:54:00:12:34:00"
+ mac_address: '52:54:00:12:34:00'
subnets:
- type: static
address: 2001:1::100/96
- type: physical
name: eth1
- mac_address: "52:54:00:12:34:01"
+ mac_address: '52:54:00:12:34:01'
subnets:
- type: static
address: 2001:1::101/96
@@ -2363,6 +2480,8 @@ iface bond0 inet6 static
HWADDR=52:54:00:12:34:00
IPV6ADDR=2001:1::100/96
IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
@@ -2375,6 +2494,8 @@ iface bond0 inet6 static
HWADDR=52:54:00:12:34:01
IPV6ADDR=2001:1::101/96
IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
NM_CONTROLLED=no
ONBOOT=yes
TYPE=Ethernet
@@ -2388,7 +2509,7 @@ iface bond0 inet6 static
config:
- type: physical
name: eth0
- mac_address: "52:54:00:12:34:00"
+ mac_address: '52:54:00:12:34:00'
subnets:
- type: static
address: 192.168.1.2/24
@@ -2396,12 +2517,12 @@ iface bond0 inet6 static
- type: physical
name: eth1
mtu: 1480
- mac_address: "52:54:00:12:34:aa"
+ mac_address: 52:54:00:12:34:aa
subnets:
- type: manual
- type: physical
name: eth2
- mac_address: "52:54:00:12:34:ff"
+ mac_address: 52:54:00:12:34:ff
subnets:
- type: manual
control: manual
@@ -3181,6 +3302,61 @@ USERCTL=no
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
+ def test_stattic6_from_json(self):
+ net_json = {
+ "services": [{"type": "dns", "address": "172.19.0.12"}],
+ "networks": [{
+ "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+ "type": "ipv4", "netmask": "255.255.252.0",
+ "link": "tap1a81968a-79",
+ "routes": [{
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": "172.19.3.254",
+ }, {
+ "netmask": "0.0.0.0", # A second default gateway
+ "network": "0.0.0.0",
+ "gateway": "172.20.3.254",
+ }],
+ "ip_address": "172.19.1.34", "id": "network0"
+ }, {
+ "network_id": "mgmt",
+ "netmask": "ffff:ffff:ffff:ffff::",
+ "link": "interface1",
+ "mode": "link-local",
+ "routes": [],
+ "ip_address": "fe80::c096:67ff:fe5c:6e84",
+ "type": "static6",
+ "id": "network1",
+ "services": [],
+ "accept-ra": "false"
+ }],
+ "links": [
+ {
+ "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+ "mtu": None, "type": "bridge", "id":
+ "tap1a81968a-79",
+ "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+ },
+ ],
+ }
+ macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+ render_dir = self.tmp_dir()
+ network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+ renderer = self._get_renderer()
+ with self.assertRaises(ValueError):
+ renderer.render_network_state(ns, target=render_dir)
+ self.assertEqual([], os.listdir(render_dir))
+
+ def test_static6_from_yaml(self):
+ entry = NETWORK_CONFIGS['static6']
+ found = self._render_and_read(network_config=yaml.load(
+ entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
def test_dhcpv6_reject_ra_config_v2(self):
entry = NETWORK_CONFIGS['dhcpv6_reject_ra']
found = self._render_and_read(network_config=yaml.load(
@@ -3200,6 +3376,20 @@ USERCTL=no
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
+ def test_wakeonlan_disabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_disabled']
+ found = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_wakeonlan_enabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_enabled']
+ found = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
def test_check_ifcfg_rh(self):
"""ifcfg-rh plugin is added NetworkManager.conf if conf present."""
render_dir = self.tmp_dir()
@@ -3298,6 +3488,8 @@ USERCTL=no
IPADDR=192.168.42.100
IPV6ADDR=2001:db8::100/32
IPV6INIT=yes
+ IPV6_AUTOCONF=no
+ IPV6_FORCE_ACCEPT_RA=no
IPV6_DEFAULTGW=2001:db8::1
NETMASK=255.255.255.0
NM_CONTROLLED=no
@@ -3339,7 +3531,6 @@ USERCTL=no
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=eno1
- TYPE=Ethernet
USERCTL=no
VLAN=yes
""")
@@ -3735,6 +3926,20 @@ STARTMODE=auto
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
+ def test_wakeonlan_disabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_disabled']
+ found = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_wakeonlan_enabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_enabled']
+ found = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
def test_render_v4_and_v6(self):
entry = NETWORK_CONFIGS['v4_and_v6']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
@@ -4384,6 +4589,22 @@ class TestNetplanRoundTrip(CiTestCase):
entry['expected_netplan'].splitlines(),
files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+ def testsimple_wakeonlan_disabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_disabled']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
+ def testsimple_wakeonlan_enabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_enabled']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self.assertEqual(
+ entry['expected_netplan'].splitlines(),
+ files['/etc/netplan/50-cloud-init.yaml'].splitlines())
+
def testsimple_render_all(self):
entry = NETWORK_CONFIGS['all']
files = self._render_and_read(network_config=yaml.load(entry['yaml']))
@@ -4551,6 +4772,22 @@ class TestEniRoundTrip(CiTestCase):
entry['expected_eni'].splitlines(),
files['/etc/network/interfaces'].splitlines())
+ def testsimple_wakeonlan_disabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_disabled']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self.assertEqual(
+ entry['expected_eni'].splitlines(),
+ files['/etc/network/interfaces'].splitlines())
+
+ def testsimple_wakeonlan_enabled_config_v2(self):
+ entry = NETWORK_CONFIGS['wakeonlan_enabled']
+ files = self._render_and_read(network_config=yaml.load(
+ entry['yaml_v2']))
+ self.assertEqual(
+ entry['expected_eni'].splitlines(),
+ files['/etc/network/interfaces'].splitlines())
+
def testsimple_render_manual(self):
"""Test rendering of 'manual' for 'type' and 'control'.
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
index 47ede670..9324b78d 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -188,18 +188,34 @@ class TextKvpReporter(CiTestCase):
if not re.search("variant=" + pattern, evt_msg):
raise AssertionError("missing distro variant string")
- def test_report_diagnostic_event(self):
+ def test_report_diagnostic_event_without_logger_func(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
+ diagnostic_msg = "test_diagnostic"
+ reporter.publish_event(
+ azure.report_diagnostic_event(diagnostic_msg))
+ reporter.q.join()
+ kvps = list(reporter._iterate_kvps(0))
+ self.assertEqual(1, len(kvps))
+ evt_msg = kvps[0]['value']
+
+ if diagnostic_msg not in evt_msg:
+ raise AssertionError("missing expected diagnostic message")
+ def test_report_diagnostic_event_with_logger_func(self):
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
+ logger_func = mock.MagicMock()
+ diagnostic_msg = "test_diagnostic"
reporter.publish_event(
- azure.report_diagnostic_event("test_diagnostic"))
+ azure.report_diagnostic_event(diagnostic_msg,
+ logger_func=logger_func))
reporter.q.join()
kvps = list(reporter._iterate_kvps(0))
self.assertEqual(1, len(kvps))
evt_msg = kvps[0]['value']
- if "test_diagnostic" not in evt_msg:
+ if diagnostic_msg not in evt_msg:
raise AssertionError("missing expected diagnostic message")
+ logger_func.assert_called_once_with(diagnostic_msg)
def test_report_compressed_event(self):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
@@ -214,14 +230,39 @@ class TextKvpReporter(CiTestCase):
instantiated_handler_registry.unregister_item("telemetry",
force=False)
+ @mock.patch('cloudinit.sources.helpers.azure.report_compressed_event')
+ @mock.patch('cloudinit.sources.helpers.azure.report_diagnostic_event')
+ @mock.patch('cloudinit.subp.subp')
+ def test_push_log_to_kvp_exception_handling(self, m_subp, m_diag, m_com):
+ reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
+ try:
+ instantiated_handler_registry.register_item("telemetry", reporter)
+ log_file = self.tmp_path("cloud-init.log")
+ azure.MAX_LOG_TO_KVP_LENGTH = 100
+ azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path(
+ 'log_pushed_to_kvp')
+ with open(log_file, "w") as f:
+ log_content = "A" * 50 + "B" * 100
+ f.write(log_content)
+
+ m_com.side_effect = Exception("Mock Exception")
+ azure.push_log_to_kvp(log_file)
+
+ # exceptions will trigger diagnostic reporting calls
+ self.assertEqual(m_diag.call_count, 3)
+ finally:
+ instantiated_handler_registry.unregister_item("telemetry",
+ force=False)
+
+ @mock.patch('cloudinit.subp.subp')
@mock.patch.object(LogHandler, 'publish_event')
- def test_push_log_to_kvp(self, publish_event):
+ def test_push_log_to_kvp(self, publish_event, m_subp):
reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path)
try:
instantiated_handler_registry.register_item("telemetry", reporter)
log_file = self.tmp_path("cloud-init.log")
azure.MAX_LOG_TO_KVP_LENGTH = 100
- azure.LOG_PUSHED_TO_KVP_MARKER_FILE = self.tmp_path(
+ azure.LOG_PUSHED_TO_KVP_INDEX_FILE = self.tmp_path(
'log_pushed_to_kvp')
with open(log_file, "w") as f:
log_content = "A" * 50 + "B" * 100
@@ -233,13 +274,18 @@ class TextKvpReporter(CiTestCase):
f.write(extra_content)
azure.push_log_to_kvp(log_file)
+ # make sure dmesg is called every time
+ m_subp.assert_called_with(
+ ['dmesg'], capture=True, decode=False)
+
for call_arg in publish_event.call_args_list:
event = call_arg[0][0]
self.assertNotEqual(
event.event_type, azure.COMPRESSED_EVENT_TYPE)
self.validate_compressed_kvps(
- reporter, 1,
- [log_content[-azure.MAX_LOG_TO_KVP_LENGTH:].encode()])
+ reporter, 2,
+ [log_content[-azure.MAX_LOG_TO_KVP_LENGTH:].encode(),
+ extra_content.encode()])
finally:
instantiated_handler_registry.unregister_item("telemetry",
force=False)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index fd1d1bac..88a111e3 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -593,7 +593,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
fpw.pw_name, sshd_config)
content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
+ self.assertEqual(authorized_keys, auth_key_fn)
self.assertTrue(VALID_CONTENT['rsa'] in content)
self.assertTrue(VALID_CONTENT['dsa'] in content)
@@ -610,7 +610,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
sshd_config = self.tmp_path('sshd_config')
util.write_file(
sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys)
)
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
@@ -618,7 +618,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
)
content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
+ self.assertEqual(user_keys, auth_key_fn)
self.assertTrue(VALID_CONTENT['rsa'] in content)
self.assertTrue(VALID_CONTENT['dsa'] in content)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index fc557469..857629f1 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -492,129 +492,6 @@ class TestIsX86(helpers.CiTestCase):
self.assertTrue(util.is_x86())
-class TestReadDMIData(helpers.FilesystemMockingTestCase):
-
- def setUp(self):
- super(TestReadDMIData, self).setUp()
- self.new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, self.new_root)
- self.patchOS(self.new_root)
- self.patchUtils(self.new_root)
- p = mock.patch("cloudinit.util.is_container", return_value=False)
- self.addCleanup(p.stop)
- self._m_is_container = p.start()
-
- def _create_sysfs_parent_directory(self):
- util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
-
- def _create_sysfs_file(self, key, content):
- """Mocks the sys path found on Linux systems."""
- self._create_sysfs_parent_directory()
- dmi_key = "/sys/class/dmi/id/{0}".format(key)
- util.write_file(dmi_key, content)
-
- def _configure_dmidecode_return(self, key, content, error=None):
- """
- In order to test a missing sys path and call outs to dmidecode, this
- function fakes the results of dmidecode to test the results.
- """
- def _dmidecode_subp(cmd):
- if cmd[-1] != key:
- raise subp.ProcessExecutionError()
- return (content, error)
-
- self.patched_funcs.enter_context(
- mock.patch("cloudinit.subp.which", side_effect=lambda _: True))
- self.patched_funcs.enter_context(
- mock.patch("cloudinit.subp.subp", side_effect=_dmidecode_subp))
-
- def patch_mapping(self, new_mapping):
- self.patched_funcs.enter_context(
- mock.patch('cloudinit.util.DMIDECODE_TO_DMI_SYS_MAPPING',
- new_mapping))
-
- def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self):
- self.patch_mapping({'mapped-key': 'mapped-value'})
- expected_dmi_value = 'sys-used-correctly'
- self._create_sysfs_file('mapped-value', expected_dmi_value)
- self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong')
- self.assertEqual(expected_dmi_value, util.read_dmi_data('mapped-key'))
-
- def test_dmidecode_used_if_no_sysfs_file_on_disk(self):
- self.patch_mapping({})
- self._create_sysfs_parent_directory()
- expected_dmi_value = 'dmidecode-used'
- self._configure_dmidecode_return('use-dmidecode', expected_dmi_value)
- with mock.patch("cloudinit.util.os.uname") as m_uname:
- m_uname.return_value = ('x-sysname', 'x-nodename',
- 'x-release', 'x-version', 'x86_64')
- self.assertEqual(expected_dmi_value,
- util.read_dmi_data('use-dmidecode'))
-
- def test_dmidecode_not_used_on_arm(self):
- self.patch_mapping({})
- print("current =%s", subp)
- self._create_sysfs_parent_directory()
- dmi_val = 'from-dmidecode'
- dmi_name = 'use-dmidecode'
- self._configure_dmidecode_return(dmi_name, dmi_val)
- print("now =%s", subp)
-
- expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val}
- found = {}
- # we do not run the 'dmi-decode' binary on some arches
- # verify that anything requested that is not in the sysfs dir
- # will return None on those arches.
- with mock.patch("cloudinit.util.os.uname") as m_uname:
- for arch in expected:
- m_uname.return_value = ('x-sysname', 'x-nodename',
- 'x-release', 'x-version', arch)
- print("now2 =%s", subp)
- found[arch] = util.read_dmi_data(dmi_name)
- self.assertEqual(expected, found)
-
- def test_none_returned_if_neither_source_has_data(self):
- self.patch_mapping({})
- self._configure_dmidecode_return('key', 'value')
- self.assertIsNone(util.read_dmi_data('expect-fail'))
-
- def test_none_returned_if_dmidecode_not_in_path(self):
- self.patched_funcs.enter_context(
- mock.patch.object(subp, 'which', lambda _: False))
- self.patch_mapping({})
- self.assertIsNone(util.read_dmi_data('expect-fail'))
-
- def test_dots_returned_instead_of_foxfox(self):
- # uninitialized dmi values show as \xff, return those as .
- my_len = 32
- dmi_value = b'\xff' * my_len + b'\n'
- expected = ""
- dmi_key = 'system-product-name'
- sysfs_key = 'product_name'
- self._create_sysfs_file(sysfs_key, dmi_value)
- self.assertEqual(expected, util.read_dmi_data(dmi_key))
-
- def test_container_returns_none(self):
- """In a container read_dmi_data should always return None."""
-
- # first verify we get the value if not in container
- self._m_is_container.return_value = False
- key, val = ("system-product-name", "my_product")
- self._create_sysfs_file('product_name', val)
- self.assertEqual(val, util.read_dmi_data(key))
-
- # then verify in container returns None
- self._m_is_container.return_value = True
- self.assertIsNone(util.read_dmi_data(key))
-
- def test_container_returns_none_on_unknown(self):
- """In a container even bogus keys return None."""
- self._m_is_container.return_value = True
- self._create_sysfs_file('product_name', "should-be-ignored")
- self.assertIsNone(util.read_dmi_data("bogus"))
- self.assertIsNone(util.read_dmi_data("system-product-name"))
-
-
class TestGetConfigLogfiles(helpers.CiTestCase):
def test_empty_cfg_returns_empty_list(self):
@@ -735,13 +612,35 @@ class TestReadSeeded(helpers.TestCase):
def test_unicode_not_messed_up(self):
ud = b"userdatablob"
+ vd = b"vendordatablob"
+ helpers.populate_dir(
+ self.tmp, {'meta-data': "key1: val1", 'user-data': ud,
+ 'vendor-data': vd})
+ sdir = self.tmp + os.path.sep
+ (found_md, found_ud, found_vd) = util.read_seeded(sdir)
+
+ self.assertEqual(found_md, {'key1': 'val1'})
+ self.assertEqual(found_ud, ud)
+ self.assertEqual(found_vd, vd)
+
+
+class TestReadSeededWithoutVendorData(helpers.TestCase):
+ def setUp(self):
+ super(TestReadSeededWithoutVendorData, self).setUp()
+ self.tmp = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tmp)
+
+ def test_unicode_not_messed_up(self):
+ ud = b"userdatablob"
+ vd = None
helpers.populate_dir(
self.tmp, {'meta-data': "key1: val1", 'user-data': ud})
sdir = self.tmp + os.path.sep
- (found_md, found_ud) = util.read_seeded(sdir)
+ (found_md, found_ud, found_vd) = util.read_seeded(sdir)
self.assertEqual(found_md, {'key1': 'val1'})
self.assertEqual(found_ud, ud)
+ self.assertEqual(found_vd, vd)
class TestEncode(helpers.TestCase):