From 089a307db1fc572461eea1589f1876132c058311 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Tue, 21 Sep 2021 22:28:30 +0200 Subject: tox: bump the pinned flake8 and pylint version (#1029) tox: bump the pinned flake8 and pylint version * pylint: fix W1406 (redundant-u-string-prefix) The u prefix for strings is no longer necessary in Python >=3.0. * pylint: disable W1514 (unspecified-encoding) From https://www.python.org/dev/peps/pep-0597/ (Python 3.10): The new warning stems form https://www.python.org/dev/peps/pep-0597, which says: Developers using macOS or Linux may forget that the default encoding is not always UTF-8. [...] Even Python experts may assume that the default encoding is UTF-8. This creates bugs that only happen on Windows. The warning could be fixed by always specifying encoding='utf-8', however we should be careful to not break environments which are not utf-8 (or explicitly state that only utf-8 is supported). Let's silence the warning for now. * _quick_read_instance_id: cover the case where load_yaml() returns None Spotted by pylint: - E1135 (unsupported-membership-test) - E1136 (unsubscriptable-object) LP: #1944414 --- cloudinit/tests/test_subp.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'cloudinit/tests/test_subp.py') diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py index 911c1f3d..515d5d64 100644 --- a/cloudinit/tests/test_subp.py +++ b/cloudinit/tests/test_subp.py @@ -91,8 +91,8 @@ class TestSubp(CiTestCase): tmp_file = self.tmp_path('test.out') cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True) - self.assertEqual(u'', out) - self.assertEqual(u'', _err) + self.assertEqual('', out) + self.assertEqual('', _err) self.assertEqual('HI MOM\n', util.load_file(tmp_file)) def test_subp_handles_strings(self): @@ -100,8 +100,8 @@ class TestSubp(CiTestCase): tmp_file = self.tmp_path('test.out') cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) (out, _err) = subp.subp(cmd, shell=True) - self.assertEqual(u'', out) - self.assertEqual(u'', _err) + self.assertEqual('', out) + self.assertEqual('', _err) self.assertEqual('HI MOM\n', util.load_file(tmp_file)) def test_subp_handles_utf8(self): -- cgit v1.2.3 From 039c40f9b3d88ee8158604bb18ca4bf2fb5d5e51 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 3 Dec 2021 13:11:46 -0700 Subject: Reorganize unit test locations under tests/unittests (#1126) This attempts to standardize unit test file location under test/unittests/ such that any source file located at cloudinit/path/to/file.py may have a corresponding unit test file at test/unittests/path/to/test_file.py. Noteworthy Comments: ==================== Four different duplicate test files existed: test_{gpg,util,cc_mounts,cc_resolv_conf}.py Each of these duplicate file pairs has been merged together. This is a break in git history for these files. The test suite appears to have a dependency on test order. Changing test order causes some tests to fail. This should be rectified, but for now some tests have been modified in tests/unittests/config/test_set_passwords.py. A helper class name starts with "Test" which causes pytest to try executing it as a test case, which then throws warnings "due to Class having __init__()". Silence by changing the name of the class. # helpers.py is imported in many test files, import paths change cloudinit/tests/helpers.py -> tests/unittests/helpers.py # Move directories: cloudinit/distros/tests -> tests/unittests/distros cloudinit/cmd/devel/tests -> tests/unittests/cmd/devel cloudinit/cmd/tests -> tests/unittests/cmd/ cloudinit/sources/helpers/tests -> tests/unittests/sources/helpers cloudinit/sources/tests -> tests/unittests/sources cloudinit/net/tests -> tests/unittests/net cloudinit/config/tests -> tests/unittests/config cloudinit/analyze/tests/ -> tests/unittests/analyze/ # Standardize tests already in tests/unittests/ test_datasource -> sources test_distros -> distros test_vmware -> sources/vmware test_handler -> config # this contains cloudconfig module tests test_runs -> runs --- cloudinit/analyze/tests/test_boot.py | 161 - cloudinit/analyze/tests/test_dump.py | 208 -- cloudinit/cmd/devel/tests/__init__.py | 0 cloudinit/cmd/devel/tests/test_logs.py | 167 - cloudinit/cmd/devel/tests/test_render.py | 144 - cloudinit/cmd/tests/__init__.py | 0 cloudinit/cmd/tests/test_clean.py | 178 - cloudinit/cmd/tests/test_cloud_id.py | 127 - cloudinit/cmd/tests/test_main.py | 188 -- cloudinit/cmd/tests/test_query.py | 392 --- cloudinit/cmd/tests/test_status.py | 391 --- cloudinit/config/tests/test_apt_pipelining.py | 28 - .../config/tests/test_disable_ec2_metadata.py | 48 - cloudinit/config/tests/test_final_message.py | 46 - cloudinit/config/tests/test_grub_dpkg.py | 176 - cloudinit/config/tests/test_keys_to_console.py | 34 - cloudinit/config/tests/test_mounts.py | 61 - cloudinit/config/tests/test_resolv_conf.py | 92 - cloudinit/config/tests/test_set_passwords.py | 168 - cloudinit/config/tests/test_snap.py | 564 ---- cloudinit/config/tests/test_ssh.py | 405 --- cloudinit/config/tests/test_ubuntu_advantage.py | 333 -- cloudinit/config/tests/test_ubuntu_drivers.py | 244 -- cloudinit/config/tests/test_users_groups.py | 172 - cloudinit/distros/tests/__init__.py | 0 cloudinit/distros/tests/test_init.py | 161 - cloudinit/distros/tests/test_networking.py | 223 -- cloudinit/net/tests/__init__.py | 0 cloudinit/net/tests/test_dhcp.py | 647 ---- cloudinit/net/tests/test_init.py | 1402 -------- cloudinit/net/tests/test_network_state.py | 164 - cloudinit/net/tests/test_networkd.py | 64 - cloudinit/sources/helpers/tests/test_netlink.py | 480 --- cloudinit/sources/helpers/tests/test_openstack.py | 49 - cloudinit/sources/tests/__init__.py | 0 cloudinit/sources/tests/test_init.py | 771 ----- cloudinit/sources/tests/test_lxd.py | 376 --- cloudinit/sources/tests/test_oracle.py | 797 ----- cloudinit/tests/__init__.py | 0 cloudinit/tests/helpers.py | 507 --- cloudinit/tests/test_conftest.py | 65 - cloudinit/tests/test_dhclient_hook.py | 105 - cloudinit/tests/test_dmi.py | 154 - cloudinit/tests/test_event.py | 26 - cloudinit/tests/test_features.py | 60 - cloudinit/tests/test_gpg.py | 55 - cloudinit/tests/test_netinfo.py | 181 -- cloudinit/tests/test_persistence.py | 127 - cloudinit/tests/test_simpletable.py | 106 - cloudinit/tests/test_stages.py | 478 --- cloudinit/tests/test_subp.py | 286 -- cloudinit/tests/test_temp_utils.py | 117 - cloudinit/tests/test_upgrade.py | 52 - cloudinit/tests/test_url_helper.py | 178 - cloudinit/tests/test_util.py | 1187 ------- cloudinit/tests/test_version.py | 31 - doc/rtd/topics/testing.rst | 13 +- setup.py | 2 +- tests/unittests/analyze/test_boot.py | 161 + tests/unittests/analyze/test_dump.py | 208 ++ tests/unittests/cloudinit/__init__py | 0 tests/unittests/cmd/__init__.py | 0 tests/unittests/cmd/devel/__init__.py | 0 tests/unittests/cmd/devel/test_logs.py | 167 + tests/unittests/cmd/devel/test_render.py | 144 + tests/unittests/cmd/test_clean.py | 178 + tests/unittests/cmd/test_cloud_id.py | 127 + tests/unittests/cmd/test_main.py | 188 ++ tests/unittests/cmd/test_query.py | 392 +++ tests/unittests/cmd/test_status.py | 391 +++ tests/unittests/config/__init__.py | 0 tests/unittests/config/test_apt_conf_v1.py | 129 + .../config/test_apt_configure_sources_list_v1.py | 181 ++ .../config/test_apt_configure_sources_list_v3.py | 226 ++ tests/unittests/config/test_apt_key.py | 137 + tests/unittests/config/test_apt_source_v1.py | 651 ++++ tests/unittests/config/test_apt_source_v3.py | 1170 +++++++ tests/unittests/config/test_cc_apk_configure.py | 299 ++ tests/unittests/config/test_cc_apt_pipelining.py | 28 + tests/unittests/config/test_cc_bootcmd.py | 152 + tests/unittests/config/test_cc_ca_certs.py | 361 +++ tests/unittests/config/test_cc_chef.py | 271 ++ tests/unittests/config/test_cc_debug.py | 59 + .../config/test_cc_disable_ec2_metadata.py | 48 + tests/unittests/config/test_cc_disk_setup.py | 243 ++ tests/unittests/config/test_cc_final_message.py | 46 + tests/unittests/config/test_cc_growpart.py | 309 ++ tests/unittests/config/test_cc_grub_dpkg.py | 176 + tests/unittests/config/test_cc_install_hotplug.py | 113 + tests/unittests/config/test_cc_keys_to_console.py | 34 + tests/unittests/config/test_cc_landscape.py | 126 + tests/unittests/config/test_cc_locale.py | 116 + tests/unittests/config/test_cc_lxd.py | 222 ++ tests/unittests/config/test_cc_mcollective.py | 146 + tests/unittests/config/test_cc_mounts.py | 461 +++ tests/unittests/config/test_cc_ntp.py | 765 +++++ .../unittests/config/test_cc_power_state_change.py | 159 + tests/unittests/config/test_cc_puppet.py | 380 +++ .../config/test_cc_refresh_rmc_and_interface.py | 109 + tests/unittests/config/test_cc_resizefs.py | 398 +++ tests/unittests/config/test_cc_resolv_conf.py | 193 ++ tests/unittests/config/test_cc_rh_subscription.py | 234 ++ tests/unittests/config/test_cc_rsyslog.py | 178 + tests/unittests/config/test_cc_runcmd.py | 129 + tests/unittests/config/test_cc_seed_random.py | 205 ++ tests/unittests/config/test_cc_set_hostname.py | 207 ++ tests/unittests/config/test_cc_set_passwords.py | 162 + tests/unittests/config/test_cc_snap.py | 564 ++++ tests/unittests/config/test_cc_spacewalk.py | 42 + tests/unittests/config/test_cc_ssh.py | 405 +++ tests/unittests/config/test_cc_timezone.py | 54 + tests/unittests/config/test_cc_ubuntu_advantage.py | 333 ++ tests/unittests/config/test_cc_ubuntu_drivers.py | 244 ++ tests/unittests/config/test_cc_update_etc_hosts.py | 70 + tests/unittests/config/test_cc_users_groups.py | 172 + tests/unittests/config/test_cc_write_files.py | 246 ++ .../config/test_cc_write_files_deferred.py | 77 + tests/unittests/config/test_cc_yum_add_repo.py | 111 + tests/unittests/config/test_cc_zypper_add_repo.py | 231 ++ tests/unittests/config/test_schema.py | 515 +++ tests/unittests/distros/__init__.py | 21 + tests/unittests/distros/test_arch.py | 45 + tests/unittests/distros/test_bsd_utils.py | 67 + tests/unittests/distros/test_create_users.py | 236 ++ tests/unittests/distros/test_debian.py | 174 + tests/unittests/distros/test_dragonflybsd.py | 25 + tests/unittests/distros/test_freebsd.py | 45 + tests/unittests/distros/test_generic.py | 315 ++ tests/unittests/distros/test_gentoo.py | 26 + tests/unittests/distros/test_hostname.py | 42 + tests/unittests/distros/test_hosts.py | 45 + tests/unittests/distros/test_init.py | 161 + tests/unittests/distros/test_manage_service.py | 38 + tests/unittests/distros/test_netbsd.py | 17 + tests/unittests/distros/test_netconfig.py | 916 ++++++ tests/unittests/distros/test_networking.py | 223 ++ tests/unittests/distros/test_opensuse.py | 12 + tests/unittests/distros/test_photon.py | 68 + tests/unittests/distros/test_resolv.py | 65 + tests/unittests/distros/test_sles.py | 12 + tests/unittests/distros/test_sysconfig.py | 86 + .../unittests/distros/test_user_data_normalize.py | 372 +++ tests/unittests/filters/__init__.py | 0 tests/unittests/filters/test_launch_index.py | 135 + tests/unittests/helpers.py | 507 +++ tests/unittests/net/__init__.py | 0 tests/unittests/net/test_dhcp.py | 647 ++++ tests/unittests/net/test_init.py | 1402 ++++++++ tests/unittests/net/test_network_state.py | 164 + tests/unittests/net/test_networkd.py | 64 + tests/unittests/runs/__init__.py | 0 tests/unittests/runs/test_merge_run.py | 60 + tests/unittests/runs/test_simple_run.py | 182 ++ tests/unittests/sources/__init__.py | 0 tests/unittests/sources/helpers/test_netlink.py | 480 +++ tests/unittests/sources/helpers/test_openstack.py | 49 + tests/unittests/sources/test_aliyun.py | 248 ++ tests/unittests/sources/test_altcloud.py | 450 +++ tests/unittests/sources/test_azure.py | 3394 ++++++++++++++++++++ tests/unittests/sources/test_azure_helper.py | 1441 +++++++++ tests/unittests/sources/test_cloudsigma.py | 137 + tests/unittests/sources/test_cloudstack.py | 186 ++ tests/unittests/sources/test_common.py | 121 + tests/unittests/sources/test_configdrive.py | 844 +++++ tests/unittests/sources/test_digitalocean.py | 372 +++ tests/unittests/sources/test_ec2.py | 978 ++++++ tests/unittests/sources/test_exoscale.py | 211 ++ tests/unittests/sources/test_gce.py | 388 +++ tests/unittests/sources/test_hetzner.py | 142 + tests/unittests/sources/test_ibmcloud.py | 343 ++ tests/unittests/sources/test_init.py | 771 +++++ tests/unittests/sources/test_lxd.py | 376 +++ tests/unittests/sources/test_maas.py | 200 ++ tests/unittests/sources/test_nocloud.py | 393 +++ tests/unittests/sources/test_opennebula.py | 977 ++++++ tests/unittests/sources/test_openstack.py | 724 +++++ tests/unittests/sources/test_oracle.py | 797 +++++ tests/unittests/sources/test_ovf.py | 1046 ++++++ tests/unittests/sources/test_rbx.py | 238 ++ tests/unittests/sources/test_scaleway.py | 473 +++ tests/unittests/sources/test_smartos.py | 1163 +++++++ tests/unittests/sources/test_upcloud.py | 314 ++ tests/unittests/sources/test_vmware.py | 391 +++ tests/unittests/sources/test_vultr.py | 337 ++ tests/unittests/sources/vmware/__init__.py | 0 .../unittests/sources/vmware/test_custom_script.py | 109 + .../sources/vmware/test_guestcust_util.py | 98 + .../sources/vmware/test_vmware_config_file.py | 545 ++++ tests/unittests/test__init__.py | 2 +- tests/unittests/test_atomic_helper.py | 2 +- tests/unittests/test_builtin_handlers.py | 2 +- tests/unittests/test_cli.py | 2 +- tests/unittests/test_conftest.py | 65 + tests/unittests/test_cs_util.py | 2 +- tests/unittests/test_data.py | 2 +- tests/unittests/test_datasource/__init__.py | 0 tests/unittests/test_datasource/test_aliyun.py | 248 -- tests/unittests/test_datasource/test_altcloud.py | 450 --- tests/unittests/test_datasource/test_azure.py | 3394 -------------------- .../unittests/test_datasource/test_azure_helper.py | 1441 --------- tests/unittests/test_datasource/test_cloudsigma.py | 137 - tests/unittests/test_datasource/test_cloudstack.py | 186 -- tests/unittests/test_datasource/test_common.py | 121 - .../unittests/test_datasource/test_configdrive.py | 844 ----- .../unittests/test_datasource/test_digitalocean.py | 372 --- tests/unittests/test_datasource/test_ec2.py | 978 ------ tests/unittests/test_datasource/test_exoscale.py | 211 -- tests/unittests/test_datasource/test_gce.py | 388 --- tests/unittests/test_datasource/test_hetzner.py | 142 - tests/unittests/test_datasource/test_ibmcloud.py | 343 -- tests/unittests/test_datasource/test_maas.py | 200 -- tests/unittests/test_datasource/test_nocloud.py | 393 --- tests/unittests/test_datasource/test_opennebula.py | 977 ------ tests/unittests/test_datasource/test_openstack.py | 724 ----- tests/unittests/test_datasource/test_ovf.py | 1046 ------ tests/unittests/test_datasource/test_rbx.py | 238 -- tests/unittests/test_datasource/test_scaleway.py | 473 --- tests/unittests/test_datasource/test_smartos.py | 1163 ------- tests/unittests/test_datasource/test_upcloud.py | 314 -- tests/unittests/test_datasource/test_vmware.py | 391 --- tests/unittests/test_datasource/test_vultr.py | 337 -- tests/unittests/test_dhclient_hook.py | 105 + tests/unittests/test_distros/__init__.py | 21 - tests/unittests/test_distros/test_arch.py | 45 - tests/unittests/test_distros/test_bsd_utils.py | 67 - tests/unittests/test_distros/test_create_users.py | 236 -- tests/unittests/test_distros/test_debian.py | 174 - tests/unittests/test_distros/test_dragonflybsd.py | 25 - tests/unittests/test_distros/test_freebsd.py | 45 - tests/unittests/test_distros/test_generic.py | 315 -- tests/unittests/test_distros/test_gentoo.py | 26 - tests/unittests/test_distros/test_hostname.py | 42 - tests/unittests/test_distros/test_hosts.py | 45 - .../unittests/test_distros/test_manage_service.py | 38 - tests/unittests/test_distros/test_netbsd.py | 17 - tests/unittests/test_distros/test_netconfig.py | 916 ------ tests/unittests/test_distros/test_opensuse.py | 12 - tests/unittests/test_distros/test_photon.py | 68 - tests/unittests/test_distros/test_resolv.py | 65 - tests/unittests/test_distros/test_sles.py | 12 - tests/unittests/test_distros/test_sysconfig.py | 86 - .../test_distros/test_user_data_normalize.py | 372 --- tests/unittests/test_dmi.py | 154 + tests/unittests/test_ds_identify.py | 2 +- tests/unittests/test_ec2_util.py | 2 +- tests/unittests/test_event.py | 26 + tests/unittests/test_features.py | 60 + tests/unittests/test_filters/__init__.py | 0 tests/unittests/test_filters/test_launch_index.py | 135 - tests/unittests/test_gpg.py | 49 + tests/unittests/test_handler/__init__.py | 0 .../test_handler/test_handler_apk_configure.py | 299 -- .../test_handler/test_handler_apt_conf_v1.py | 129 - .../test_handler_apt_configure_sources_list_v1.py | 181 -- .../test_handler_apt_configure_sources_list_v3.py | 226 -- .../unittests/test_handler/test_handler_apt_key.py | 137 - .../test_handler/test_handler_apt_source_v1.py | 651 ---- .../test_handler/test_handler_apt_source_v3.py | 1170 ------- .../unittests/test_handler/test_handler_bootcmd.py | 152 - .../test_handler/test_handler_ca_certs.py | 361 --- tests/unittests/test_handler/test_handler_chef.py | 271 -- tests/unittests/test_handler/test_handler_debug.py | 59 - .../test_handler/test_handler_disk_setup.py | 243 -- .../test_handler/test_handler_etc_hosts.py | 70 - .../test_handler/test_handler_growpart.py | 309 -- .../test_handler/test_handler_install_hotplug.py | 113 - .../test_handler/test_handler_landscape.py | 126 - .../unittests/test_handler/test_handler_locale.py | 116 - tests/unittests/test_handler/test_handler_lxd.py | 222 -- .../test_handler/test_handler_mcollective.py | 146 - .../unittests/test_handler/test_handler_mounts.py | 406 --- tests/unittests/test_handler/test_handler_ntp.py | 765 ----- .../test_handler/test_handler_power_state.py | 159 - .../unittests/test_handler/test_handler_puppet.py | 380 --- .../test_handler_refresh_rmc_and_interface.py | 109 - .../test_handler/test_handler_resizefs.py | 398 --- .../test_handler/test_handler_resolv_conf.py | 105 - .../unittests/test_handler/test_handler_rsyslog.py | 178 - .../unittests/test_handler/test_handler_runcmd.py | 129 - .../test_handler/test_handler_seed_random.py | 205 -- .../test_handler/test_handler_set_hostname.py | 207 -- .../test_handler/test_handler_spacewalk.py | 42 - .../test_handler/test_handler_timezone.py | 54 - .../test_handler/test_handler_write_files.py | 246 -- .../test_handler_write_files_deferred.py | 77 - .../test_handler/test_handler_yum_add_repo.py | 111 - .../test_handler/test_handler_zypper_add_repo.py | 231 -- tests/unittests/test_handler/test_schema.py | 515 --- tests/unittests/test_helpers.py | 2 +- tests/unittests/test_log.py | 2 +- tests/unittests/test_merging.py | 2 +- tests/unittests/test_net.py | 2 +- tests/unittests/test_net_freebsd.py | 2 +- tests/unittests/test_netinfo.py | 181 ++ tests/unittests/test_pathprefix2dict.py | 2 +- tests/unittests/test_persistence.py | 127 + tests/unittests/test_registry.py | 2 +- tests/unittests/test_reporting.py | 2 +- tests/unittests/test_reporting_hyperv.py | 2 +- tests/unittests/test_rh_subscription.py | 234 -- tests/unittests/test_runs/__init__.py | 0 tests/unittests/test_runs/test_merge_run.py | 60 - tests/unittests/test_runs/test_simple_run.py | 182 -- tests/unittests/test_simpletable.py | 106 + tests/unittests/test_sshutil.py | 2 +- tests/unittests/test_stages.py | 478 +++ tests/unittests/test_subp.py | 286 ++ tests/unittests/test_temp_utils.py | 117 + tests/unittests/test_templating.py | 2 +- tests/unittests/test_upgrade.py | 52 + tests/unittests/test_url_helper.py | 178 + tests/unittests/test_util.py | 1660 +++++++++- tests/unittests/test_version.py | 31 + tests/unittests/test_vmware/__init__.py | 0 tests/unittests/test_vmware/test_custom_script.py | 109 - tests/unittests/test_vmware/test_guestcust_util.py | 98 - tests/unittests/test_vmware_config_file.py | 545 ---- tests/unittests/util.py | 8 +- tox.ini | 10 +- 319 files changed, 42328 insertions(+), 42144 deletions(-) delete mode 100644 cloudinit/analyze/tests/test_boot.py delete mode 100644 cloudinit/analyze/tests/test_dump.py delete mode 100644 cloudinit/cmd/devel/tests/__init__.py delete mode 100644 cloudinit/cmd/devel/tests/test_logs.py delete mode 100644 cloudinit/cmd/devel/tests/test_render.py delete mode 100644 cloudinit/cmd/tests/__init__.py delete mode 100644 cloudinit/cmd/tests/test_clean.py delete mode 100644 cloudinit/cmd/tests/test_cloud_id.py delete mode 100644 cloudinit/cmd/tests/test_main.py delete mode 100644 cloudinit/cmd/tests/test_query.py delete mode 100644 cloudinit/cmd/tests/test_status.py delete mode 100644 cloudinit/config/tests/test_apt_pipelining.py delete mode 100644 cloudinit/config/tests/test_disable_ec2_metadata.py delete mode 100644 cloudinit/config/tests/test_final_message.py delete mode 100644 cloudinit/config/tests/test_grub_dpkg.py delete mode 100644 cloudinit/config/tests/test_keys_to_console.py delete mode 100644 cloudinit/config/tests/test_mounts.py delete mode 100644 cloudinit/config/tests/test_resolv_conf.py delete mode 100644 cloudinit/config/tests/test_set_passwords.py delete mode 100644 cloudinit/config/tests/test_snap.py delete mode 100644 cloudinit/config/tests/test_ssh.py delete mode 100644 cloudinit/config/tests/test_ubuntu_advantage.py delete mode 100644 cloudinit/config/tests/test_ubuntu_drivers.py delete mode 100644 cloudinit/config/tests/test_users_groups.py delete mode 100644 cloudinit/distros/tests/__init__.py delete mode 100644 cloudinit/distros/tests/test_init.py delete mode 100644 cloudinit/distros/tests/test_networking.py delete mode 100644 cloudinit/net/tests/__init__.py delete mode 100644 cloudinit/net/tests/test_dhcp.py delete mode 100644 cloudinit/net/tests/test_init.py delete mode 100644 cloudinit/net/tests/test_network_state.py delete mode 100644 cloudinit/net/tests/test_networkd.py delete mode 100644 cloudinit/sources/helpers/tests/test_netlink.py delete mode 100644 cloudinit/sources/helpers/tests/test_openstack.py delete mode 100644 cloudinit/sources/tests/__init__.py delete mode 100644 cloudinit/sources/tests/test_init.py delete mode 100644 cloudinit/sources/tests/test_lxd.py delete mode 100644 cloudinit/sources/tests/test_oracle.py delete mode 100644 cloudinit/tests/__init__.py delete mode 100644 cloudinit/tests/helpers.py delete mode 100644 cloudinit/tests/test_conftest.py delete mode 100644 cloudinit/tests/test_dhclient_hook.py delete mode 100644 cloudinit/tests/test_dmi.py delete mode 100644 cloudinit/tests/test_event.py delete mode 100644 cloudinit/tests/test_features.py delete mode 100644 cloudinit/tests/test_gpg.py delete mode 100644 cloudinit/tests/test_netinfo.py delete mode 100644 cloudinit/tests/test_persistence.py delete mode 100644 cloudinit/tests/test_simpletable.py delete mode 100644 cloudinit/tests/test_stages.py delete mode 100644 cloudinit/tests/test_subp.py delete mode 100644 cloudinit/tests/test_temp_utils.py delete mode 100644 cloudinit/tests/test_upgrade.py delete mode 100644 cloudinit/tests/test_url_helper.py delete mode 100644 cloudinit/tests/test_util.py delete mode 100644 cloudinit/tests/test_version.py create mode 100644 tests/unittests/analyze/test_boot.py create mode 100644 tests/unittests/analyze/test_dump.py create mode 100644 tests/unittests/cloudinit/__init__py create mode 100644 tests/unittests/cmd/__init__.py create mode 100644 tests/unittests/cmd/devel/__init__.py create mode 100644 tests/unittests/cmd/devel/test_logs.py create mode 100644 tests/unittests/cmd/devel/test_render.py create mode 100644 tests/unittests/cmd/test_clean.py create mode 100644 tests/unittests/cmd/test_cloud_id.py create mode 100644 tests/unittests/cmd/test_main.py create mode 100644 tests/unittests/cmd/test_query.py create mode 100644 tests/unittests/cmd/test_status.py create mode 100644 tests/unittests/config/__init__.py create mode 100644 tests/unittests/config/test_apt_conf_v1.py create mode 100644 tests/unittests/config/test_apt_configure_sources_list_v1.py create mode 100644 tests/unittests/config/test_apt_configure_sources_list_v3.py create mode 100644 tests/unittests/config/test_apt_key.py create mode 100644 tests/unittests/config/test_apt_source_v1.py create mode 100644 tests/unittests/config/test_apt_source_v3.py create mode 100644 tests/unittests/config/test_cc_apk_configure.py create mode 100644 tests/unittests/config/test_cc_apt_pipelining.py create mode 100644 tests/unittests/config/test_cc_bootcmd.py create mode 100644 tests/unittests/config/test_cc_ca_certs.py create mode 100644 tests/unittests/config/test_cc_chef.py create mode 100644 tests/unittests/config/test_cc_debug.py create mode 100644 tests/unittests/config/test_cc_disable_ec2_metadata.py create mode 100644 tests/unittests/config/test_cc_disk_setup.py create mode 100644 tests/unittests/config/test_cc_final_message.py create mode 100644 tests/unittests/config/test_cc_growpart.py create mode 100644 tests/unittests/config/test_cc_grub_dpkg.py create mode 100644 tests/unittests/config/test_cc_install_hotplug.py create mode 100644 tests/unittests/config/test_cc_keys_to_console.py create mode 100644 tests/unittests/config/test_cc_landscape.py create mode 100644 tests/unittests/config/test_cc_locale.py create mode 100644 tests/unittests/config/test_cc_lxd.py create mode 100644 tests/unittests/config/test_cc_mcollective.py create mode 100644 tests/unittests/config/test_cc_mounts.py create mode 100644 tests/unittests/config/test_cc_ntp.py create mode 100644 tests/unittests/config/test_cc_power_state_change.py create mode 100644 tests/unittests/config/test_cc_puppet.py create mode 100644 tests/unittests/config/test_cc_refresh_rmc_and_interface.py create mode 100644 tests/unittests/config/test_cc_resizefs.py create mode 100644 tests/unittests/config/test_cc_resolv_conf.py create mode 100644 tests/unittests/config/test_cc_rh_subscription.py create mode 100644 tests/unittests/config/test_cc_rsyslog.py create mode 100644 tests/unittests/config/test_cc_runcmd.py create mode 100644 tests/unittests/config/test_cc_seed_random.py create mode 100644 tests/unittests/config/test_cc_set_hostname.py create mode 100644 tests/unittests/config/test_cc_set_passwords.py create mode 100644 tests/unittests/config/test_cc_snap.py create mode 100644 tests/unittests/config/test_cc_spacewalk.py create mode 100644 tests/unittests/config/test_cc_ssh.py create mode 100644 tests/unittests/config/test_cc_timezone.py create mode 100644 tests/unittests/config/test_cc_ubuntu_advantage.py create mode 100644 tests/unittests/config/test_cc_ubuntu_drivers.py create mode 100644 tests/unittests/config/test_cc_update_etc_hosts.py create mode 100644 tests/unittests/config/test_cc_users_groups.py create mode 100644 tests/unittests/config/test_cc_write_files.py create mode 100644 tests/unittests/config/test_cc_write_files_deferred.py create mode 100644 tests/unittests/config/test_cc_yum_add_repo.py create mode 100644 tests/unittests/config/test_cc_zypper_add_repo.py create mode 100644 tests/unittests/config/test_schema.py create mode 100644 tests/unittests/distros/__init__.py create mode 100644 tests/unittests/distros/test_arch.py create mode 100644 tests/unittests/distros/test_bsd_utils.py create mode 100644 tests/unittests/distros/test_create_users.py create mode 100644 tests/unittests/distros/test_debian.py create mode 100644 tests/unittests/distros/test_dragonflybsd.py create mode 100644 tests/unittests/distros/test_freebsd.py create mode 100644 tests/unittests/distros/test_generic.py create mode 100644 tests/unittests/distros/test_gentoo.py create mode 100644 tests/unittests/distros/test_hostname.py create mode 100644 tests/unittests/distros/test_hosts.py create mode 100644 tests/unittests/distros/test_init.py create mode 100644 tests/unittests/distros/test_manage_service.py create mode 100644 tests/unittests/distros/test_netbsd.py create mode 100644 tests/unittests/distros/test_netconfig.py create mode 100644 tests/unittests/distros/test_networking.py create mode 100644 tests/unittests/distros/test_opensuse.py create mode 100644 tests/unittests/distros/test_photon.py create mode 100644 tests/unittests/distros/test_resolv.py create mode 100644 tests/unittests/distros/test_sles.py create mode 100644 tests/unittests/distros/test_sysconfig.py create mode 100644 tests/unittests/distros/test_user_data_normalize.py create mode 100644 tests/unittests/filters/__init__.py create mode 100644 tests/unittests/filters/test_launch_index.py create mode 100644 tests/unittests/helpers.py create mode 100644 tests/unittests/net/__init__.py create mode 100644 tests/unittests/net/test_dhcp.py create mode 100644 tests/unittests/net/test_init.py create mode 100644 tests/unittests/net/test_network_state.py create mode 100644 tests/unittests/net/test_networkd.py create mode 100644 tests/unittests/runs/__init__.py create mode 100644 tests/unittests/runs/test_merge_run.py create mode 100644 tests/unittests/runs/test_simple_run.py create mode 100644 tests/unittests/sources/__init__.py create mode 100644 tests/unittests/sources/helpers/test_netlink.py create mode 100644 tests/unittests/sources/helpers/test_openstack.py create mode 100644 tests/unittests/sources/test_aliyun.py create mode 100644 tests/unittests/sources/test_altcloud.py create mode 100644 tests/unittests/sources/test_azure.py create mode 100644 tests/unittests/sources/test_azure_helper.py create mode 100644 tests/unittests/sources/test_cloudsigma.py create mode 100644 tests/unittests/sources/test_cloudstack.py create mode 100644 tests/unittests/sources/test_common.py create mode 100644 tests/unittests/sources/test_configdrive.py create mode 100644 tests/unittests/sources/test_digitalocean.py create mode 100644 tests/unittests/sources/test_ec2.py create mode 100644 tests/unittests/sources/test_exoscale.py create mode 100644 tests/unittests/sources/test_gce.py create mode 100644 tests/unittests/sources/test_hetzner.py create mode 100644 tests/unittests/sources/test_ibmcloud.py create mode 100644 tests/unittests/sources/test_init.py create mode 100644 tests/unittests/sources/test_lxd.py create mode 100644 tests/unittests/sources/test_maas.py create mode 100644 tests/unittests/sources/test_nocloud.py create mode 100644 tests/unittests/sources/test_opennebula.py create mode 100644 tests/unittests/sources/test_openstack.py create mode 100644 tests/unittests/sources/test_oracle.py create mode 100644 tests/unittests/sources/test_ovf.py create mode 100644 tests/unittests/sources/test_rbx.py create mode 100644 tests/unittests/sources/test_scaleway.py create mode 100644 tests/unittests/sources/test_smartos.py create mode 100644 tests/unittests/sources/test_upcloud.py create mode 100644 tests/unittests/sources/test_vmware.py create mode 100644 tests/unittests/sources/test_vultr.py create mode 100644 tests/unittests/sources/vmware/__init__.py create mode 100644 tests/unittests/sources/vmware/test_custom_script.py create mode 100644 tests/unittests/sources/vmware/test_guestcust_util.py create mode 100644 tests/unittests/sources/vmware/test_vmware_config_file.py create mode 100644 tests/unittests/test_conftest.py delete mode 100644 tests/unittests/test_datasource/__init__.py delete mode 100644 tests/unittests/test_datasource/test_aliyun.py delete mode 100644 tests/unittests/test_datasource/test_altcloud.py delete mode 100644 tests/unittests/test_datasource/test_azure.py delete mode 100644 tests/unittests/test_datasource/test_azure_helper.py delete mode 100644 tests/unittests/test_datasource/test_cloudsigma.py delete mode 100644 tests/unittests/test_datasource/test_cloudstack.py delete mode 100644 tests/unittests/test_datasource/test_common.py delete mode 100644 tests/unittests/test_datasource/test_configdrive.py delete mode 100644 tests/unittests/test_datasource/test_digitalocean.py delete mode 100644 tests/unittests/test_datasource/test_ec2.py delete mode 100644 tests/unittests/test_datasource/test_exoscale.py delete mode 100644 tests/unittests/test_datasource/test_gce.py delete mode 100644 tests/unittests/test_datasource/test_hetzner.py delete mode 100644 tests/unittests/test_datasource/test_ibmcloud.py delete mode 100644 tests/unittests/test_datasource/test_maas.py delete mode 100644 tests/unittests/test_datasource/test_nocloud.py delete mode 100644 tests/unittests/test_datasource/test_opennebula.py delete mode 100644 tests/unittests/test_datasource/test_openstack.py delete mode 100644 tests/unittests/test_datasource/test_ovf.py delete mode 100644 tests/unittests/test_datasource/test_rbx.py delete mode 100644 tests/unittests/test_datasource/test_scaleway.py delete mode 100644 tests/unittests/test_datasource/test_smartos.py delete mode 100644 tests/unittests/test_datasource/test_upcloud.py delete mode 100644 tests/unittests/test_datasource/test_vmware.py delete mode 100644 tests/unittests/test_datasource/test_vultr.py create mode 100644 tests/unittests/test_dhclient_hook.py delete mode 100644 tests/unittests/test_distros/__init__.py delete mode 100644 tests/unittests/test_distros/test_arch.py delete mode 100644 tests/unittests/test_distros/test_bsd_utils.py delete mode 100644 tests/unittests/test_distros/test_create_users.py delete mode 100644 tests/unittests/test_distros/test_debian.py delete mode 100644 tests/unittests/test_distros/test_dragonflybsd.py delete mode 100644 tests/unittests/test_distros/test_freebsd.py delete mode 100644 tests/unittests/test_distros/test_generic.py delete mode 100644 tests/unittests/test_distros/test_gentoo.py delete mode 100644 tests/unittests/test_distros/test_hostname.py delete mode 100644 tests/unittests/test_distros/test_hosts.py delete mode 100644 tests/unittests/test_distros/test_manage_service.py delete mode 100644 tests/unittests/test_distros/test_netbsd.py delete mode 100644 tests/unittests/test_distros/test_netconfig.py delete mode 100644 tests/unittests/test_distros/test_opensuse.py delete mode 100644 tests/unittests/test_distros/test_photon.py delete mode 100644 tests/unittests/test_distros/test_resolv.py delete mode 100644 tests/unittests/test_distros/test_sles.py delete mode 100644 tests/unittests/test_distros/test_sysconfig.py delete mode 100644 tests/unittests/test_distros/test_user_data_normalize.py create mode 100644 tests/unittests/test_dmi.py create mode 100644 tests/unittests/test_event.py create mode 100644 tests/unittests/test_features.py delete mode 100644 tests/unittests/test_filters/__init__.py delete mode 100644 tests/unittests/test_filters/test_launch_index.py delete mode 100644 tests/unittests/test_handler/__init__.py delete mode 100644 tests/unittests/test_handler/test_handler_apk_configure.py delete mode 100644 tests/unittests/test_handler/test_handler_apt_conf_v1.py delete mode 100644 tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py delete mode 100644 tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py delete mode 100644 tests/unittests/test_handler/test_handler_apt_key.py delete mode 100644 tests/unittests/test_handler/test_handler_apt_source_v1.py delete mode 100644 tests/unittests/test_handler/test_handler_apt_source_v3.py delete mode 100644 tests/unittests/test_handler/test_handler_bootcmd.py delete mode 100644 tests/unittests/test_handler/test_handler_ca_certs.py delete mode 100644 tests/unittests/test_handler/test_handler_chef.py delete mode 100644 tests/unittests/test_handler/test_handler_debug.py delete mode 100644 tests/unittests/test_handler/test_handler_disk_setup.py delete mode 100644 tests/unittests/test_handler/test_handler_etc_hosts.py delete mode 100644 tests/unittests/test_handler/test_handler_growpart.py delete mode 100644 tests/unittests/test_handler/test_handler_install_hotplug.py delete mode 100644 tests/unittests/test_handler/test_handler_landscape.py delete mode 100644 tests/unittests/test_handler/test_handler_locale.py delete mode 100644 tests/unittests/test_handler/test_handler_lxd.py delete mode 100644 tests/unittests/test_handler/test_handler_mcollective.py delete mode 100644 tests/unittests/test_handler/test_handler_mounts.py delete mode 100644 tests/unittests/test_handler/test_handler_ntp.py delete mode 100644 tests/unittests/test_handler/test_handler_power_state.py delete mode 100644 tests/unittests/test_handler/test_handler_puppet.py delete mode 100644 tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py delete mode 100644 tests/unittests/test_handler/test_handler_resizefs.py delete mode 100644 tests/unittests/test_handler/test_handler_resolv_conf.py delete mode 100644 tests/unittests/test_handler/test_handler_rsyslog.py delete mode 100644 tests/unittests/test_handler/test_handler_runcmd.py delete mode 100644 tests/unittests/test_handler/test_handler_seed_random.py delete mode 100644 tests/unittests/test_handler/test_handler_set_hostname.py delete mode 100644 tests/unittests/test_handler/test_handler_spacewalk.py delete mode 100644 tests/unittests/test_handler/test_handler_timezone.py delete mode 100644 tests/unittests/test_handler/test_handler_write_files.py delete mode 100644 tests/unittests/test_handler/test_handler_write_files_deferred.py delete mode 100644 tests/unittests/test_handler/test_handler_yum_add_repo.py delete mode 100644 tests/unittests/test_handler/test_handler_zypper_add_repo.py delete mode 100644 tests/unittests/test_handler/test_schema.py create mode 100644 tests/unittests/test_netinfo.py create mode 100644 tests/unittests/test_persistence.py delete mode 100644 tests/unittests/test_rh_subscription.py delete mode 100644 tests/unittests/test_runs/__init__.py delete mode 100644 tests/unittests/test_runs/test_merge_run.py delete mode 100644 tests/unittests/test_runs/test_simple_run.py create mode 100644 tests/unittests/test_simpletable.py create mode 100644 tests/unittests/test_stages.py create mode 100644 tests/unittests/test_subp.py create mode 100644 tests/unittests/test_temp_utils.py create mode 100644 tests/unittests/test_upgrade.py create mode 100644 tests/unittests/test_url_helper.py create mode 100644 tests/unittests/test_version.py delete mode 100644 tests/unittests/test_vmware/__init__.py delete mode 100644 tests/unittests/test_vmware/test_custom_script.py delete mode 100644 tests/unittests/test_vmware/test_guestcust_util.py delete mode 100644 tests/unittests/test_vmware_config_file.py (limited to 'cloudinit/tests/test_subp.py') diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py deleted file mode 100644 index 6b3afb5e..00000000 --- a/cloudinit/analyze/tests/test_boot.py +++ /dev/null @@ -1,161 +0,0 @@ -import os -from cloudinit.analyze.__main__ import (analyze_boot, get_parser) -from cloudinit.tests.helpers import CiTestCase, mock -from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \ - FAIL_CODE, CONTAINER_CODE - -err_code = (FAIL_CODE, -1, -1, -1) - - -class TestDistroChecker(CiTestCase): - - def test_blank_distro(self): - self.assertEqual(err_code, dist_check_timestamp()) - - @mock.patch('cloudinit.util.is_FreeBSD', return_value=True) - def test_freebsd_gentoo_cant_find(self, m_is_FreeBSD): - self.assertEqual(err_code, dist_check_timestamp()) - - @mock.patch('cloudinit.subp.subp', return_value=(0, 1)) - def test_subp_fails(self, m_subp): - self.assertEqual(err_code, dist_check_timestamp()) - - -class TestSystemCtlReader(CiTestCase): - - def test_systemctl_invalid_property(self): - reader = SystemctlReader('dummyProperty') - with self.assertRaises(RuntimeError): - reader.parse_epoch_as_float() - - def test_systemctl_invalid_parameter(self): - reader = SystemctlReader('dummyProperty', 'dummyParameter') - with self.assertRaises(RuntimeError): - reader.parse_epoch_as_float() - - @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) - def test_systemctl_works_correctly_threshold(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') - self.assertEqual(1.0, reader.parse_epoch_as_float()) - thresh = 1.0 - reader.parse_epoch_as_float() - self.assertTrue(thresh < 1e-6) - self.assertTrue(thresh > (-1 * 1e-6)) - - @mock.patch('cloudinit.subp.subp', return_value=('U=0', None)) - def test_systemctl_succeed_zero(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') - self.assertEqual(0.0, reader.parse_epoch_as_float()) - - @mock.patch('cloudinit.subp.subp', return_value=('U=1', None)) - def test_systemctl_succeed_distinct(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') - val1 = reader.parse_epoch_as_float() - m_subp.return_value = ('U=2', None) - reader2 = SystemctlReader('dummyProperty', 'dummyParameter') - val2 = reader2.parse_epoch_as_float() - self.assertNotEqual(val1, val2) - - @mock.patch('cloudinit.subp.subp', return_value=('100', None)) - def test_systemctl_epoch_not_splittable(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') - with self.assertRaises(IndexError): - reader.parse_epoch_as_float() - - @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None)) - def test_systemctl_cannot_convert_epoch_to_float(self, m_subp): - reader = SystemctlReader('dummyProperty', 'dummyParameter') - with self.assertRaises(ValueError): - reader.parse_epoch_as_float() - - -class TestAnalyzeBoot(CiTestCase): - - def set_up_dummy_file_ci(self, path, log_path): - infh = open(path, 'w+') - infh.write('2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. ' - '19.1-1-gbaa47854-0ubuntu1~18.04.1 running \'init-local\' ' - 'at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds.') - infh.close() - outfh = open(log_path, 'w+') - outfh.close() - - def set_up_dummy_file(self, path, log_path): - infh = open(path, 'w+') - infh.write('dummy data') - infh.close() - outfh = open(log_path, 'w+') - outfh.close() - - def remove_dummy_file(self, path, log_path): - if os.path.isfile(path): - os.remove(path) - if os.path.isfile(log_path): - os.remove(log_path) - - @mock.patch('cloudinit.analyze.show.dist_check_timestamp', - return_value=err_code) - def test_boot_invalid_distro(self, m_dist_check_timestamp): - - path = os.path.dirname(os.path.abspath(__file__)) - log_path = path + '/boot-test.log' - path += '/dummy.log' - self.set_up_dummy_file(path, log_path) - - parser = get_parser() - args = parser.parse_args(args=['boot', '-i', path, '-o', - log_path]) - name_default = '' - analyze_boot(name_default, args) - # now args have been tested, go into outfile and make sure error - # message is in the outfile - outfh = open(args.outfile, 'r') - data = outfh.read() - err_string = 'Your Linux distro or container does not support this ' \ - 'functionality.\nYou must be running a Kernel ' \ - 'Telemetry supported distro.\nPlease check ' \ - 'https://cloudinit.readthedocs.io/en/latest/topics' \ - '/analyze.html for more information on supported ' \ - 'distros.\n' - - self.remove_dummy_file(path, log_path) - self.assertEqual(err_string, data) - - @mock.patch("cloudinit.util.is_container", return_value=True) - @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) - def test_container_no_ci_log_line(self, m_is_container, m_subp): - path = os.path.dirname(os.path.abspath(__file__)) - log_path = path + '/boot-test.log' - path += '/dummy.log' - self.set_up_dummy_file(path, log_path) - - parser = get_parser() - args = parser.parse_args(args=['boot', '-i', path, '-o', - log_path]) - name_default = '' - - finish_code = analyze_boot(name_default, args) - - self.remove_dummy_file(path, log_path) - self.assertEqual(FAIL_CODE, finish_code) - - @mock.patch("cloudinit.util.is_container", return_value=True) - @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) - @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{ - 'name': 'init-local', 'description': 'starting search', 'timestamp': - 100000}]) - @mock.patch('cloudinit.analyze.show.dist_check_timestamp', - return_value=(CONTAINER_CODE, 1, 1, 1)) - def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g): - path = os.path.dirname(os.path.abspath(__file__)) - log_path = path + '/boot-test.log' - path += '/dummy.log' - self.set_up_dummy_file_ci(path, log_path) - - parser = get_parser() - args = parser.parse_args(args=['boot', '-i', path, '-o', - log_path]) - name_default = '' - finish_code = analyze_boot(name_default, args) - - self.remove_dummy_file(path, log_path) - self.assertEqual(CONTAINER_CODE, finish_code) diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py deleted file mode 100644 index dac1efb6..00000000 --- a/cloudinit/analyze/tests/test_dump.py +++ /dev/null @@ -1,208 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from datetime import datetime -from textwrap import dedent - -from cloudinit.analyze.dump import ( - dump_events, parse_ci_logline, parse_timestamp) -from cloudinit.util import write_file -from cloudinit.subp import which -from cloudinit.tests.helpers import CiTestCase, mock, skipIf - - -class TestParseTimestamp(CiTestCase): - - def test_parse_timestamp_handles_cloud_init_default_format(self): - """Logs with cloud-init detailed formats will be properly parsed.""" - trusty_fmt = '%Y-%m-%d %H:%M:%S,%f' - trusty_stamp = '2016-09-12 14:39:20,839' - dt = datetime.strptime(trusty_stamp, trusty_fmt) - self.assertEqual( - float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp)) - - def test_parse_timestamp_handles_syslog_adding_year(self): - """Syslog timestamps lack a year. Add year and properly parse.""" - syslog_fmt = '%b %d %H:%M:%S %Y' - syslog_stamp = 'Aug 08 15:12:51' - - # convert stamp ourselves by adding the missing year value - year = datetime.now().year - dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) - self.assertEqual( - float(dt.strftime('%s.%f')), - parse_timestamp(syslog_stamp)) - - def test_parse_timestamp_handles_journalctl_format_adding_year(self): - """Journalctl precise timestamps lack a year. Add year and parse.""" - journal_fmt = '%b %d %H:%M:%S.%f %Y' - journal_stamp = 'Aug 08 17:15:50.606811' - - # convert stamp ourselves by adding the missing year value - year = datetime.now().year - dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) - self.assertEqual( - float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp)) - - @skipIf(not which("date"), "'date' command not available.") - def test_parse_unexpected_timestamp_format_with_date_command(self): - """Dump sends unexpected timestamp formats to date for processing.""" - new_fmt = '%H:%M %m/%d %Y' - new_stamp = '17:15 08/08' - # convert stamp ourselves by adding the missing year value - year = datetime.now().year - dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) - - # use date(1) - with self.allow_subp(["date"]): - self.assertEqual( - float(dt.strftime('%s.%f')), parse_timestamp(new_stamp)) - - -class TestParseCILogLine(CiTestCase): - - def test_parse_logline_returns_none_without_separators(self): - """When no separators are found, parse_ci_logline returns None.""" - expected_parse_ignores = [ - '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT'] - for parse_ignores in expected_parse_ignores: - self.assertIsNone(parse_ci_logline(parse_ignores)) - - def test_parse_logline_returns_event_for_cloud_init_logs(self): - """parse_ci_logline returns an event parse from cloud-init format.""" - line = ( - "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9" - " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up" - " 6.26 seconds.") - dt = datetime.strptime( - '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f') - timestamp = float(dt.strftime('%s.%f')) - expected = { - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp} - self.assertEqual(expected, parse_ci_logline(line)) - - def test_parse_logline_returns_event_for_journalctl_logs(self): - """parse_ci_logline returns an event parse from journalctl format.""" - line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]" - " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at" - " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.") - year = datetime.now().year - dt = datetime.strptime( - 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') - timestamp = float(dt.strftime('%s.%f')) - expected = { - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp} - self.assertEqual(expected, parse_ci_logline(line)) - - @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") - def test_parse_logline_returns_event_for_finish_events(self, - m_parse_from_date): - """parse_ci_logline returns a finish event for a parsed log line.""" - line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]' - ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running' - ' modules for final') - expected = { - 'description': 'running modules for final', - 'event_type': 'finish', - 'name': 'modules-final', - 'origin': 'cloudinit', - 'result': 'SUCCESS', - 'timestamp': 1472594005.972} - m_parse_from_date.return_value = "1472594005.972" - self.assertEqual(expected, parse_ci_logline(line)) - m_parse_from_date.assert_has_calls( - [mock.call("2016-08-30 21:53:25.972325+00:00")]) - - def test_parse_logline_returns_event_for_amazon_linux_2_line(self): - line = ( - "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:" - " init-local/check-cache: attempting to read from cache [check]") - # Generate the expected value using `datetime`, so that TZ - # determination is consistent with the code under test. - timestamp_dt = datetime.strptime( - "Apr 30 19:39:11", "%b %d %H:%M:%S" - ).replace(year=datetime.now().year) - expected = { - 'description': 'attempting to read from cache [check]', - 'event_type': 'start', - 'name': 'init-local/check-cache', - 'origin': 'cloudinit', - 'timestamp': timestamp_dt.timestamp()} - self.assertEqual(expected, parse_ci_logline(line)) - - -SAMPLE_LOGS = dedent("""\ -Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ - Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\ - 06:51:06 +0000. Up 1.0 seconds. -2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\ - modules-final: SUCCESS: running modules for final -""") - - -class TestDumpEvents(CiTestCase): - maxDiff = None - - @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") - def test_dump_events_with_rawdata(self, m_parse_from_date): - """Rawdata is split and parsed into a tuple of events and data""" - m_parse_from_date.return_value = "1472594005.972" - events, data = dump_events(rawdata=SAMPLE_LOGS) - expected_data = SAMPLE_LOGS.splitlines() - self.assertEqual( - [mock.call("2016-08-30 21:53:25.972325+00:00")], - m_parse_from_date.call_args_list) - self.assertEqual(expected_data, data) - year = datetime.now().year - dt1 = datetime.strptime( - 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') - timestamp1 = float(dt1.strftime('%s.%f')) - expected_events = [{ - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp1}, { - 'description': 'running modules for final', - 'event_type': 'finish', - 'name': 'modules-final', - 'origin': 'cloudinit', - 'result': 'SUCCESS', - 'timestamp': 1472594005.972}] - self.assertEqual(expected_events, events) - - @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") - def test_dump_events_with_cisource(self, m_parse_from_date): - """Cisource file is read and parsed into a tuple of events and data.""" - tmpfile = self.tmp_path('logfile') - write_file(tmpfile, SAMPLE_LOGS) - m_parse_from_date.return_value = 1472594005.972 - - events, data = dump_events(cisource=open(tmpfile)) - year = datetime.now().year - dt1 = datetime.strptime( - 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') - timestamp1 = float(dt1.strftime('%s.%f')) - expected_events = [{ - 'description': 'starting search for local datasources', - 'event_type': 'start', - 'name': 'init-local', - 'origin': 'cloudinit', - 'timestamp': timestamp1}, { - 'description': 'running modules for final', - 'event_type': 'finish', - 'name': 'modules-final', - 'origin': 'cloudinit', - 'result': 'SUCCESS', - 'timestamp': 1472594005.972}] - self.assertEqual(expected_events, events) - self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data]) - m_parse_from_date.assert_has_calls( - [mock.call("2016-08-30 21:53:25.972325+00:00")]) diff --git a/cloudinit/cmd/devel/tests/__init__.py b/cloudinit/cmd/devel/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py deleted file mode 100644 index ddfd58e1..00000000 --- a/cloudinit/cmd/devel/tests/test_logs.py +++ /dev/null @@ -1,167 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from datetime import datetime -import os -from io import StringIO - -from cloudinit.cmd.devel import logs -from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE -from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, mock, wrap_and_call) -from cloudinit.subp import subp -from cloudinit.util import ensure_dir, load_file, write_file - - -@mock.patch('cloudinit.cmd.devel.logs.os.getuid') -class TestCollectLogs(FilesystemMockingTestCase): - - def setUp(self): - super(TestCollectLogs, self).setUp() - self.new_root = self.tmp_dir() - self.run_dir = self.tmp_path('run', self.new_root) - - def test_collect_logs_with_userdata_requires_root_user(self, m_getuid): - """collect-logs errors when non-root user collects userdata .""" - m_getuid.return_value = 100 # non-root - output_tarfile = self.tmp_path('logs.tgz') - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - self.assertEqual( - 1, logs.collect_logs(output_tarfile, include_userdata=True)) - self.assertEqual( - 'To include userdata, root user is required.' - ' Try sudo cloud-init collect-logs\n', - m_stderr.getvalue()) - - def test_collect_logs_creates_tarfile(self, m_getuid): - """collect-logs creates a tarfile with all related cloud-init info.""" - m_getuid.return_value = 100 - log1 = self.tmp_path('cloud-init.log', self.new_root) - write_file(log1, 'cloud-init-log') - log2 = self.tmp_path('cloud-init-output.log', self.new_root) - write_file(log2, 'cloud-init-output-log') - ensure_dir(self.run_dir) - write_file(self.tmp_path('results.json', self.run_dir), 'results') - write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), - 'sensitive') - output_tarfile = self.tmp_path('logs.tgz') - - date = datetime.utcnow().date().strftime('%Y-%m-%d') - date_logdir = 'cloud-init-logs-{0}'.format(date) - - version_out = '/usr/bin/cloud-init 18.2fake\n' - expected_subp = { - ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): - '0.7fake\n', - ('cloud-init', '--version'): version_out, - ('dmesg',): 'dmesg-out\n', - ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', - ('tar', 'czvf', output_tarfile, date_logdir): '' - } - - def fake_subp(cmd): - cmd_tuple = tuple(cmd) - if cmd_tuple not in expected_subp: - raise AssertionError( - 'Unexpected command provided to subp: {0}'.format(cmd)) - if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: - subp(cmd) # Pass through tar cmd so we can check output - return expected_subp[cmd_tuple], '' - - fake_stderr = mock.MagicMock() - - wrap_and_call( - 'cloudinit.cmd.devel.logs', - {'subp': {'side_effect': fake_subp}, - 'sys.stderr': {'new': fake_stderr}, - 'CLOUDINIT_LOGS': {'new': [log1, log2]}, - 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, - logs.collect_logs, output_tarfile, include_userdata=False) - # unpack the tarfile and check file contents - subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) - out_logdir = self.tmp_path(date_logdir, self.new_root) - self.assertFalse( - os.path.exists( - os.path.join(out_logdir, 'run', 'cloud-init', - INSTANCE_JSON_SENSITIVE_FILE)), - 'Unexpected file found: %s' % INSTANCE_JSON_SENSITIVE_FILE) - self.assertEqual( - '0.7fake\n', - load_file(os.path.join(out_logdir, 'dpkg-version'))) - self.assertEqual(version_out, - load_file(os.path.join(out_logdir, 'version'))) - self.assertEqual( - 'cloud-init-log', - load_file(os.path.join(out_logdir, 'cloud-init.log'))) - self.assertEqual( - 'cloud-init-output-log', - load_file(os.path.join(out_logdir, 'cloud-init-output.log'))) - self.assertEqual( - 'dmesg-out\n', - load_file(os.path.join(out_logdir, 'dmesg.txt'))) - self.assertEqual( - 'journal-out\n', - load_file(os.path.join(out_logdir, 'journal.txt'))) - self.assertEqual( - 'results', - load_file( - os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) - fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) - - def test_collect_logs_includes_optional_userdata(self, m_getuid): - """collect-logs include userdata when --include-userdata is set.""" - m_getuid.return_value = 0 - log1 = self.tmp_path('cloud-init.log', self.new_root) - write_file(log1, 'cloud-init-log') - log2 = self.tmp_path('cloud-init-output.log', self.new_root) - write_file(log2, 'cloud-init-output-log') - userdata = self.tmp_path('user-data.txt', self.new_root) - write_file(userdata, 'user-data') - ensure_dir(self.run_dir) - write_file(self.tmp_path('results.json', self.run_dir), 'results') - write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), - 'sensitive') - output_tarfile = self.tmp_path('logs.tgz') - - date = datetime.utcnow().date().strftime('%Y-%m-%d') - date_logdir = 'cloud-init-logs-{0}'.format(date) - - version_out = '/usr/bin/cloud-init 18.2fake\n' - expected_subp = { - ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): - '0.7fake', - ('cloud-init', '--version'): version_out, - ('dmesg',): 'dmesg-out\n', - ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', - ('tar', 'czvf', output_tarfile, date_logdir): '' - } - - def fake_subp(cmd): - cmd_tuple = tuple(cmd) - if cmd_tuple not in expected_subp: - raise AssertionError( - 'Unexpected command provided to subp: {0}'.format(cmd)) - if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: - subp(cmd) # Pass through tar cmd so we can check output - return expected_subp[cmd_tuple], '' - - fake_stderr = mock.MagicMock() - - wrap_and_call( - 'cloudinit.cmd.devel.logs', - {'subp': {'side_effect': fake_subp}, - 'sys.stderr': {'new': fake_stderr}, - 'CLOUDINIT_LOGS': {'new': [log1, log2]}, - 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, - 'USER_DATA_FILE': {'new': userdata}}, - logs.collect_logs, output_tarfile, include_userdata=True) - # unpack the tarfile and check file contents - subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) - out_logdir = self.tmp_path(date_logdir, self.new_root) - self.assertEqual( - 'user-data', - load_file(os.path.join(out_logdir, 'user-data.txt'))) - self.assertEqual( - 'sensitive', - load_file(os.path.join(out_logdir, 'run', 'cloud-init', - INSTANCE_JSON_SENSITIVE_FILE))) - fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py deleted file mode 100644 index a7fcf2ce..00000000 --- a/cloudinit/cmd/devel/tests/test_render.py +++ /dev/null @@ -1,144 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os -from io import StringIO - -from collections import namedtuple -from cloudinit.cmd.devel import render -from cloudinit.helpers import Paths -from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJinja -from cloudinit.util import ensure_dir, write_file - - -class TestRender(CiTestCase): - - with_logs = True - - args = namedtuple('renderargs', 'user_data instance_data debug') - - def setUp(self): - super(TestRender, self).setUp() - self.tmp = self.tmp_dir() - - def test_handle_args_error_on_missing_user_data(self): - """When user_data file path does not exist, log an error.""" - absent_file = self.tmp_path('user-data', dir=self.tmp) - instance_data = self.tmp_path('instance-data', dir=self.tmp) - write_file(instance_data, '{}') - args = self.args( - user_data=absent_file, instance_data=instance_data, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) - self.assertIn( - 'Missing user-data file: %s' % absent_file, - self.logs.getvalue()) - - def test_handle_args_error_on_missing_instance_data(self): - """When instance_data file path does not exist, log an error.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - absent_file = self.tmp_path('instance-data', dir=self.tmp) - args = self.args( - user_data=user_data, instance_data=absent_file, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) - self.assertIn( - 'Missing instance-data.json file: %s' % absent_file, - self.logs.getvalue()) - - def test_handle_args_defaults_instance_data(self): - """When no instance_data argument, default to configured run_dir.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - run_dir = self.tmp_path('run_dir', dir=self.tmp) - ensure_dir(run_dir) - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') - self.m_paths.return_value = paths - args = self.args( - user_data=user_data, instance_data=None, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) - json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) - self.assertIn( - 'Missing instance-data.json file: %s' % json_file, - self.logs.getvalue()) - - def test_handle_args_root_fallback_from_sensitive_instance_data(self): - """When root user defaults to sensitive.json.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - run_dir = self.tmp_path('run_dir', dir=self.tmp) - ensure_dir(run_dir) - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') - self.m_paths.return_value = paths - args = self.args( - user_data=user_data, instance_data=None, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 0 - self.assertEqual(1, render.handle_args('anyname', args)) - json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) - json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) - self.assertIn( - 'WARNING: Missing root-readable %s. Using redacted %s' % ( - json_sensitive, json_file), self.logs.getvalue()) - self.assertIn( - 'ERROR: Missing instance-data.json file: %s' % json_file, - self.logs.getvalue()) - - def test_handle_args_root_uses_sensitive_instance_data(self): - """When root user, and no instance-data arg, use sensitive.json.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') - run_dir = self.tmp_path('run_dir', dir=self.tmp) - ensure_dir(run_dir) - json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) - write_file(json_sensitive, '{"my-var": "jinja worked"}') - paths = Paths({'run_dir': run_dir}) - self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') - self.m_paths.return_value = paths - args = self.args( - user_data=user_data, instance_data=None, debug=False) - with mock.patch('sys.stderr', new_callable=StringIO): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 0 - self.assertEqual(0, render.handle_args('anyname', args)) - self.assertIn('rendering: jinja worked', m_stdout.getvalue()) - - @skipUnlessJinja() - def test_handle_args_renders_instance_data_vars_in_template(self): - """If user_data file is a jinja template render instance-data vars.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') - instance_data = self.tmp_path('instance-data', dir=self.tmp) - write_file(instance_data, '{"my-var": "jinja worked"}') - args = self.args( - user_data=user_data, instance_data=instance_data, debug=True) - with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err: - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, render.handle_args('anyname', args)) - self.assertIn( - 'DEBUG: Converted jinja variables\n{', self.logs.getvalue()) - self.assertIn( - 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue()) - self.assertEqual('rendering: jinja worked', m_stdout.getvalue()) - - @skipUnlessJinja() - def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self): - """If user_data file has invalid jinja operations log warnings.""" - user_data = self.tmp_path('user-data', dir=self.tmp) - write_file(user_data, '##template: jinja\nrendering: {{ my-var }}') - instance_data = self.tmp_path('instance-data', dir=self.tmp) - write_file(instance_data, '{"my-var": "jinja worked"}') - args = self.args( - user_data=user_data, instance_data=instance_data, debug=True) - with mock.patch('sys.stderr', new_callable=StringIO): - self.assertEqual(1, render.handle_args('anyname', args)) - self.assertIn( - 'WARNING: Ignoring jinja template for %s: Undefined jinja' - ' variable: "my-var". Jinja tried subtraction. Perhaps you meant' - ' "my_var"?' % user_data, - self.logs.getvalue()) - -# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/__init__.py b/cloudinit/cmd/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py deleted file mode 100644 index a848a810..00000000 --- a/cloudinit/cmd/tests/test_clean.py +++ /dev/null @@ -1,178 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.cmd import clean -from cloudinit.util import ensure_dir, sym_link, write_file -from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock -from collections import namedtuple -import os -from io import StringIO - -mypaths = namedtuple('MyPaths', 'cloud_dir') - - -class TestClean(CiTestCase): - - def setUp(self): - super(TestClean, self).setUp() - self.new_root = self.tmp_dir() - self.artifact_dir = self.tmp_path('artifacts', self.new_root) - self.log1 = self.tmp_path('cloud-init.log', self.new_root) - self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) - - class FakeInit(object): - cfg = {'def_log_file': self.log1, - 'output': {'all': '|tee -a {0}'.format(self.log2)}} - # Ensure cloud_dir has a trailing slash, to match real behaviour - paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir)) - - def __init__(self, ds_deps): - pass - - def read_cfg(self): - pass - - self.init_class = FakeInit - - def test_remove_artifacts_removes_logs(self): - """remove_artifacts removes logs when remove_logs is True.""" - write_file(self.log1, 'cloud-init-log') - write_file(self.log2, 'cloud-init-output-log') - - self.assertFalse( - os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') - retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=True) - self.assertFalse(os.path.exists(self.log1), 'Unexpected file') - self.assertFalse(os.path.exists(self.log2), 'Unexpected file') - self.assertEqual(0, retcode) - - def test_remove_artifacts_preserves_logs(self): - """remove_artifacts leaves logs when remove_logs is False.""" - write_file(self.log1, 'cloud-init-log') - write_file(self.log2, 'cloud-init-output-log') - - retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) - self.assertTrue(os.path.exists(self.log1), 'Missing expected file') - self.assertTrue(os.path.exists(self.log2), 'Missing expected file') - self.assertEqual(0, retcode) - - def test_remove_artifacts_removes_unlinks_symlinks(self): - """remove_artifacts cleans artifacts dir unlinking any symlinks.""" - dir1 = os.path.join(self.artifact_dir, 'dir1') - ensure_dir(dir1) - symlink = os.path.join(self.artifact_dir, 'mylink') - sym_link(dir1, symlink) - - retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) - self.assertEqual(0, retcode) - for path in (dir1, symlink): - self.assertFalse( - os.path.exists(path), - 'Unexpected {0} dir'.format(path)) - - def test_remove_artifacts_removes_artifacts_skipping_seed(self): - """remove_artifacts cleans artifacts dir with exception of seed dir.""" - dirs = [ - self.artifact_dir, - os.path.join(self.artifact_dir, 'seed'), - os.path.join(self.artifact_dir, 'dir1'), - os.path.join(self.artifact_dir, 'dir2')] - for _dir in dirs: - ensure_dir(_dir) - - retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) - self.assertEqual(0, retcode) - for expected_dir in dirs[:2]: - self.assertTrue( - os.path.exists(expected_dir), - 'Missing {0} dir'.format(expected_dir)) - for deleted_dir in dirs[2:]: - self.assertFalse( - os.path.exists(deleted_dir), - 'Unexpected {0} dir'.format(deleted_dir)) - - def test_remove_artifacts_removes_artifacts_removes_seed(self): - """remove_artifacts removes seed dir when remove_seed is True.""" - dirs = [ - self.artifact_dir, - os.path.join(self.artifact_dir, 'seed'), - os.path.join(self.artifact_dir, 'dir1'), - os.path.join(self.artifact_dir, 'dir2')] - for _dir in dirs: - ensure_dir(_dir) - - retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False, remove_seed=True) - self.assertEqual(0, retcode) - self.assertTrue( - os.path.exists(self.artifact_dir), 'Missing artifact dir') - for deleted_dir in dirs[1:]: - self.assertFalse( - os.path.exists(deleted_dir), - 'Unexpected {0} dir'.format(deleted_dir)) - - def test_remove_artifacts_returns_one_on_errors(self): - """remove_artifacts returns non-zero on failure and prints an error.""" - ensure_dir(self.artifact_dir) - ensure_dir(os.path.join(self.artifact_dir, 'dir1')) - - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'del_dir': {'side_effect': OSError('oops')}, - 'Init': {'side_effect': self.init_class}}, - clean.remove_artifacts, remove_logs=False) - self.assertEqual(1, retcode) - self.assertEqual( - 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, - m_stderr.getvalue()) - - def test_handle_clean_args_reboots(self): - """handle_clean_args_reboots when reboot arg is provided.""" - - called_cmds = [] - - def fake_subp(cmd, capture): - called_cmds.append((cmd, capture)) - return '', '' - - myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') - cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) - retcode = wrap_and_call( - 'cloudinit.cmd.clean', - {'subp': {'side_effect': fake_subp}, - 'Init': {'side_effect': self.init_class}}, - clean.handle_clean_args, name='does not matter', args=cmdargs) - self.assertEqual(0, retcode) - self.assertEqual( - [(['shutdown', '-r', 'now'], False)], called_cmds) - - def test_status_main(self): - '''clean.main can be run as a standalone script.''' - write_file(self.log1, 'cloud-init-log') - with self.assertRaises(SystemExit) as context_manager: - wrap_and_call( - 'cloudinit.cmd.clean', - {'Init': {'side_effect': self.init_class}, - 'sys.argv': {'new': ['clean', '--logs']}}, - clean.main) - - self.assertEqual(0, context_manager.exception.code) - self.assertFalse( - os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) - - -# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py deleted file mode 100644 index 3f3727fd..00000000 --- a/cloudinit/cmd/tests/test_cloud_id.py +++ /dev/null @@ -1,127 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for cloud-id command line utility.""" - -from cloudinit import util -from collections import namedtuple -from io import StringIO - -from cloudinit.cmd import cloud_id - -from cloudinit.tests.helpers import CiTestCase, mock - - -class TestCloudId(CiTestCase): - - args = namedtuple('cloudidargs', ('instance_data json long')) - - def setUp(self): - super(TestCloudId, self).setUp() - self.tmp = self.tmp_dir() - self.instance_data = self.tmp_path('instance-data.json', dir=self.tmp) - - def test_cloud_id_arg_parser_defaults(self): - """Validate the argument defaults when not provided by the end-user.""" - cmd = ['cloud-id'] - with mock.patch('sys.argv', cmd): - args = cloud_id.get_parser().parse_args() - self.assertEqual( - '/run/cloud-init/instance-data.json', - args.instance_data) - self.assertEqual(False, args.long) - self.assertEqual(False, args.json) - - def test_cloud_id_arg_parse_overrides(self): - """Override argument defaults by specifying values for each param.""" - util.write_file(self.instance_data, '{}') - cmd = ['cloud-id', '--instance-data', self.instance_data, '--long', - '--json'] - with mock.patch('sys.argv', cmd): - args = cloud_id.get_parser().parse_args() - self.assertEqual(self.instance_data, args.instance_data) - self.assertEqual(True, args.long) - self.assertEqual(True, args.json) - - def test_cloud_id_missing_instance_data_json(self): - """Exit error when the provided instance-data.json does not exist.""" - cmd = ['cloud-id', '--instance-data', self.instance_data] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with self.assertRaises(SystemExit) as context_manager: - cloud_id.main() - self.assertEqual(1, context_manager.exception.code) - self.assertIn( - "ERROR: File not found '%s'" % self.instance_data, - m_stderr.getvalue()) - - def test_cloud_id_non_json_instance_data(self): - """Exit error when the provided instance-data.json is not json.""" - cmd = ['cloud-id', '--instance-data', self.instance_data] - util.write_file(self.instance_data, '{') - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with self.assertRaises(SystemExit) as context_manager: - cloud_id.main() - self.assertEqual(1, context_manager.exception.code) - self.assertIn( - "ERROR: File '%s' is not valid json." % self.instance_data, - m_stderr.getvalue()) - - def test_cloud_id_from_cloud_name_in_instance_data(self): - """Report canonical cloud-id from cloud_name in instance-data.""" - util.write_file( - self.instance_data, - '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') - cmd = ['cloud-id', '--instance-data', self.instance_data] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with self.assertRaises(SystemExit) as context_manager: - cloud_id.main() - self.assertEqual(0, context_manager.exception.code) - self.assertEqual("mycloud\n", m_stdout.getvalue()) - - def test_cloud_id_long_name_from_instance_data(self): - """Report long cloud-id format from cloud_name and region.""" - util.write_file( - self.instance_data, - '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') - cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with self.assertRaises(SystemExit) as context_manager: - cloud_id.main() - self.assertEqual(0, context_manager.exception.code) - self.assertEqual("mycloud\tsomereg\n", m_stdout.getvalue()) - - def test_cloud_id_lookup_from_instance_data_region(self): - """Report discovered canonical cloud_id when region lookup matches.""" - util.write_file( - self.instance_data, - '{"v1": {"cloud_name": "aws", "region": "cn-north-1",' - ' "platform": "ec2"}}') - cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with self.assertRaises(SystemExit) as context_manager: - cloud_id.main() - self.assertEqual(0, context_manager.exception.code) - self.assertEqual("aws-china\tcn-north-1\n", m_stdout.getvalue()) - - def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(self): - """Report v1 instance-data content with cloud_id when --json set.""" - util.write_file( - self.instance_data, - '{"v1": {"cloud_name": "unknown", "region": "dfw",' - ' "platform": "openstack", "public_ssh_keys": []}}') - expected = util.json_dumps({ - 'cloud_id': 'openstack', 'cloud_name': 'unknown', - 'platform': 'openstack', 'public_ssh_keys': [], 'region': 'dfw'}) - cmd = ['cloud-id', '--instance-data', self.instance_data, '--json'] - with mock.patch('sys.argv', cmd): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - with self.assertRaises(SystemExit) as context_manager: - cloud_id.main() - self.assertEqual(0, context_manager.exception.code) - self.assertEqual(expected + '\n', m_stdout.getvalue()) - -# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py deleted file mode 100644 index 2e380848..00000000 --- a/cloudinit/cmd/tests/test_main.py +++ /dev/null @@ -1,188 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from collections import namedtuple -import copy -import os -from io import StringIO -from unittest import mock - -import pytest - -from cloudinit.cmd import main -from cloudinit import safeyaml -from cloudinit.util import ( - ensure_dir, load_file, write_file) -from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, wrap_and_call) - -mypaths = namedtuple('MyPaths', 'run_dir') -myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') - - -class TestMain(FilesystemMockingTestCase): - with_logs = True - allowed_subp = False - - def setUp(self): - super(TestMain, self).setUp() - self.new_root = self.tmp_dir() - self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) - os.makedirs(self.cloud_dir) - self.replicateTestRoot('simple_ubuntu', self.new_root) - self.cfg = { - 'datasource_list': ['None'], - 'runcmd': ['ls /etc'], # test ALL_DISTROS - 'system_info': {'paths': {'cloud_dir': self.cloud_dir, - 'run_dir': self.new_root}}, - 'write_files': [ - { - 'path': '/etc/blah.ini', - 'content': 'blah', - 'permissions': 0o755, - }, - ], - 'cloud_init_modules': ['write-files', 'runcmd'], - } - cloud_cfg = safeyaml.dumps(self.cfg) - ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - self.cloud_cfg_file = os.path.join( - self.new_root, 'etc', 'cloud', 'cloud.cfg') - write_file(self.cloud_cfg_file, cloud_cfg) - self.patchOS(self.new_root) - self.patchUtils(self.new_root) - self.stderr = StringIO() - self.patchStdoutAndStderr(stderr=self.stderr) - - def test_main_init_run_net_stops_on_file_no_net(self): - """When no-net file is present, main_init does not process modules.""" - stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file - write_file(stop_file, '') - cmdargs = myargs( - debug=False, files=None, force=False, local=False, reporter=None, - subcommand='init') - (_item1, item2) = wrap_and_call( - 'cloudinit.cmd.main', - {'util.close_stdin': True, - 'netinfo.debug_info': 'my net debug info', - 'util.fixup_output': ('outfmt', 'errfmt')}, - main.main_init, 'init', cmdargs) - # We should not run write_files module - self.assertFalse( - os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), - 'Unexpected run of write_files module produced blah.ini') - self.assertEqual([], item2) - # Instancify is called - instance_id_path = 'var/lib/cloud/data/instance-id' - self.assertFalse( - os.path.exists(os.path.join(self.new_root, instance_id_path)), - 'Unexpected call to datasource.instancify produced instance-id') - expected_logs = [ - "Exiting. stop file ['{stop_file}'] existed\n".format( - stop_file=stop_file), - 'my net debug info' # netinfo.debug_info - ] - for log in expected_logs: - self.assertIn(log, self.stderr.getvalue()) - - def test_main_init_run_net_runs_modules(self): - """Modules like write_files are run in 'net' mode.""" - cmdargs = myargs( - debug=False, files=None, force=False, local=False, reporter=None, - subcommand='init') - (_item1, item2) = wrap_and_call( - 'cloudinit.cmd.main', - {'util.close_stdin': True, - 'netinfo.debug_info': 'my net debug info', - 'util.fixup_output': ('outfmt', 'errfmt')}, - main.main_init, 'init', cmdargs) - self.assertEqual([], item2) - # Instancify is called - instance_id_path = 'var/lib/cloud/data/instance-id' - self.assertEqual( - 'iid-datasource-none\n', - os.path.join(load_file( - os.path.join(self.new_root, instance_id_path)))) - # modules are run (including write_files) - self.assertEqual( - 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) - expected_logs = [ - 'network config is disabled by fallback', # apply_network_config - 'my net debug info', # netinfo.debug_info - 'no previous run detected' - ] - for log in expected_logs: - self.assertIn(log, self.stderr.getvalue()) - - def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): - """When local-hostname metadata is present, call cc_set_hostname.""" - self.cfg['datasource'] = { - 'None': {'metadata': {'local-hostname': 'md-hostname'}}} - cloud_cfg = safeyaml.dumps(self.cfg) - write_file(self.cloud_cfg_file, cloud_cfg) - cmdargs = myargs( - debug=False, files=None, force=False, local=False, reporter=None, - subcommand='init') - - def set_hostname(name, cfg, cloud, log, args): - self.assertEqual('set-hostname', name) - updated_cfg = copy.deepcopy(self.cfg) - updated_cfg.update( - {'def_log_file': '/var/log/cloud-init.log', - 'log_cfgs': [], - 'syslog_fix_perms': [ - 'syslog:adm', 'root:adm', 'root:wheel', 'root:root' - ], - 'vendor_data': {'enabled': True, 'prefix': []}, - 'vendor_data2': {'enabled': True, 'prefix': []}}) - updated_cfg.pop('system_info') - - self.assertEqual(updated_cfg, cfg) - self.assertEqual(main.LOG, log) - self.assertIsNone(args) - - (_item1, item2) = wrap_and_call( - 'cloudinit.cmd.main', - {'util.close_stdin': True, - 'netinfo.debug_info': 'my net debug info', - 'cc_set_hostname.handle': {'side_effect': set_hostname}, - 'util.fixup_output': ('outfmt', 'errfmt')}, - main.main_init, 'init', cmdargs) - self.assertEqual([], item2) - # Instancify is called - instance_id_path = 'var/lib/cloud/data/instance-id' - self.assertEqual( - 'iid-datasource-none\n', - os.path.join(load_file( - os.path.join(self.new_root, instance_id_path)))) - # modules are run (including write_files) - self.assertEqual( - 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) - expected_logs = [ - 'network config is disabled by fallback', # apply_network_config - 'my net debug info', # netinfo.debug_info - 'no previous run detected' - ] - for log in expected_logs: - self.assertIn(log, self.stderr.getvalue()) - - -class TestShouldBringUpInterfaces: - @pytest.mark.parametrize('cfg_disable,args_local,expected', [ - (True, True, False), - (True, False, False), - (False, True, False), - (False, False, True), - ]) - def test_should_bring_up_interfaces( - self, cfg_disable, args_local, expected - ): - init = mock.Mock() - init.cfg = {'disable_network_activation': cfg_disable} - - args = mock.Mock() - args.local = args_local - - result = main._should_bring_up_interfaces(init, args) - assert result == expected - -# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py deleted file mode 100644 index d96c3945..00000000 --- a/cloudinit/cmd/tests/test_query.py +++ /dev/null @@ -1,392 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import errno -import gzip -from io import BytesIO -import json -from textwrap import dedent - -import pytest - -from collections import namedtuple -from cloudinit.cmd import query -from cloudinit.helpers import Paths -from cloudinit.sources import ( - REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) -from cloudinit.tests.helpers import mock - -from cloudinit.util import b64e, write_file - - -def _gzip_data(data): - with BytesIO() as iobuf: - with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp: - gzfp.write(data) - return iobuf.getvalue() - - -@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "") -class TestQuery: - - args = namedtuple( - 'queryargs', - ('debug dump_all format instance_data list_keys user_data vendor_data' - ' varname')) - - def _setup_paths(self, tmpdir, ud_val=None, vd_val=None): - """Write userdata and vendordata into a tmpdir. - - Return: - 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path) - """ - if ud_val: - user_data = tmpdir.join('user-data') - write_file(user_data.strpath, ud_val) - else: - user_data = None - if vd_val: - vendor_data = tmpdir.join('vendor-data') - write_file(vendor_data.strpath, vd_val) - else: - vendor_data = None - run_dir = tmpdir.join('run_dir') - run_dir.ensure_dir() - return ( - Paths({'run_dir': run_dir.strpath}), - run_dir, - user_data, - vendor_data - ) - - def test_handle_args_error_on_missing_param(self, caplog, capsys): - """Error when missing required parameters and print usage.""" - args = self.args( - debug=False, dump_all=False, format=None, instance_data=None, - list_keys=False, user_data=None, vendor_data=None, varname=None) - with mock.patch( - "cloudinit.cmd.query.addLogHandlerCLI", return_value="" - ) as m_cli_log: - assert 1 == query.handle_args('anyname', args) - expected_error = ( - 'Expected one of the options: --all, --format, --list-keys' - ' or varname\n') - assert expected_error in caplog.text - out, _err = capsys.readouterr() - assert 'usage: query' in out - assert 1 == m_cli_log.call_count - - @pytest.mark.parametrize( - "inst_data,varname,expected_error", ( - ( - '{"v1": {"key-2": "value-2"}}', - 'v1.absent_leaf', - "instance-data 'v1' has no 'absent_leaf'\n" - ), - ( - '{"v1": {"key-2": "value-2"}}', - 'absent_key', - "Undefined instance-data key 'absent_key'\n" - ), - ) - ) - def test_handle_args_error_on_invalid_vaname_paths( - self, inst_data, varname, expected_error, caplog, tmpdir - ): - """Error when varname is not a valid instance-data variable path.""" - instance_data = tmpdir.join('instance-data') - instance_data.write(inst_data) - args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, - list_keys=False, user_data=None, vendor_data=None, varname=varname - ) - paths, _, _, _ = self._setup_paths(tmpdir) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: - m_paths.return_value = paths - with mock.patch( - "cloudinit.cmd.query.addLogHandlerCLI", return_value="" - ): - assert 1 == query.handle_args('anyname', args) - assert expected_error in caplog.text - - def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): - """When instance_data file path does not exist, log an error.""" - absent_fn = tmpdir.join('absent') - args = self.args( - debug=False, dump_all=True, format=None, - instance_data=absent_fn.strpath, - list_keys=False, user_data='ud', vendor_data='vd', varname=None) - assert 1 == query.handle_args('anyname', args) - - msg = 'Missing instance-data file: %s' % absent_fn - assert msg in caplog.text - - def test_handle_args_error_when_no_read_permission_instance_data( - self, caplog, tmpdir - ): - """When instance_data file is unreadable, log an error.""" - noread_fn = tmpdir.join('unreadable') - noread_fn.write('thou shall not pass') - args = self.args( - debug=False, dump_all=True, format=None, - instance_data=noread_fn.strpath, - list_keys=False, user_data='ud', vendor_data='vd', varname=None) - with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: - m_load.side_effect = OSError(errno.EACCES, 'Not allowed') - assert 1 == query.handle_args('anyname', args) - msg = "No read permission on '%s'. Try sudo" % noread_fn - assert msg in caplog.text - - def test_handle_args_defaults_instance_data(self, caplog, tmpdir): - """When no instance_data argument, default to configured run_dir.""" - args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=None, vendor_data=None, varname=None) - paths, run_dir, _, _ = self._setup_paths(tmpdir) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: - m_paths.return_value = paths - assert 1 == query.handle_args('anyname', args) - json_file = run_dir.join(INSTANCE_JSON_FILE) - msg = 'Missing instance-data file: %s' % json_file.strpath - assert msg in caplog.text - - def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir): - """When no instance_data argument, root falls back to redacted json.""" - args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=None, vendor_data=None, varname=None) - paths, run_dir, _, _ = self._setup_paths(tmpdir) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: - m_paths.return_value = paths - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 0 - assert 1 == query.handle_args('anyname', args) - json_file = run_dir.join(INSTANCE_JSON_FILE) - sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) - msg = ( - 'Missing root-readable %s. Using redacted %s instead.' % - ( - sensitive_file.strpath, json_file.strpath - ) - ) - assert msg in caplog.text - - @pytest.mark.parametrize( - 'ud_src,ud_expected,vd_src,vd_expected', - ( - ('hi mom', 'hi mom', 'hi pops', 'hi pops'), - ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'), - (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'), - (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'), - ) - ) - def test_handle_args_root_processes_user_data( - self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir - ): - """Support reading multiple user-data file content types""" - paths, run_dir, user_data, vendor_data = self._setup_paths( - tmpdir, ud_val=ud_src, vd_val=vd_src - ) - sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) - sensitive_file.write('{"my-var": "it worked"}') - args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=user_data.strpath, - vendor_data=vendor_data.strpath, varname=None) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: - m_paths.return_value = paths - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 0 - assert 0 == query.handle_args('anyname', args) - out, _err = capsys.readouterr() - cmd_output = json.loads(out) - assert "it worked" == cmd_output['my-var'] - if ud_expected == "ci-b64:": - ud_expected = "ci-b64:{}".format(b64e(ud_src)) - if vd_expected == "ci-b64:": - vd_expected = "ci-b64:{}".format(b64e(vd_src)) - assert ud_expected == cmd_output['userdata'] - assert vd_expected == cmd_output['vendordata'] - - def test_handle_args_root_uses_instance_sensitive_data( - self, capsys, tmpdir - ): - """When no instance_data argument, root uses sensitive json.""" - paths, run_dir, user_data, vendor_data = self._setup_paths( - tmpdir, ud_val='ud', vd_val='vd' - ) - sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) - sensitive_file.write('{"my-var": "it worked"}') - args = self.args( - debug=False, dump_all=True, format=None, instance_data=None, - list_keys=False, user_data=user_data.strpath, - vendor_data=vendor_data.strpath, varname=None) - with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: - m_paths.return_value = paths - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 0 - assert 0 == query.handle_args('anyname', args) - expected = ( - '{\n "my-var": "it worked",\n ' - '"userdata": "ud",\n "vendordata": "vd"\n}\n' - ) - out, _err = capsys.readouterr() - assert expected == out - - def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir): - """When --all is specified query will dump all instance data vars.""" - instance_data = tmpdir.join('instance-data') - instance_data.write('{"my-var": "it worked"}') - args = self.args( - debug=False, dump_all=True, format=None, - instance_data=instance_data.strpath, list_keys=False, - user_data='ud', vendor_data='vd', varname=None) - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) - expected = ( - '{\n "my-var": "it worked",\n "userdata": "<%s> file:ud",\n' - ' "vendordata": "<%s> file:vd"\n}\n' % ( - REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE - ) - ) - out, _err = capsys.readouterr() - assert expected == out - - def test_handle_args_returns_top_level_varname(self, capsys, tmpdir): - """When the argument varname is passed, report its value.""" - instance_data = tmpdir.join('instance-data') - instance_data.write('{"my-var": "it worked"}') - args = self.args( - debug=False, dump_all=True, format=None, - instance_data=instance_data.strpath, list_keys=False, - user_data='ud', vendor_data='vd', varname='my_var') - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) - out, _err = capsys.readouterr() - assert 'it worked\n' == out - - @pytest.mark.parametrize( - 'inst_data,varname,expected', - ( - ( - '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}', - 'v1.key_2', - 'value-2\n' - ), - # Assert no jinja underscore-delimited aliases are reported on CLI - ( - '{"v1": {"something-hyphenated": {"no.underscores":"x",' - ' "no-alias": "y"}}, "my-var": "it worked"}', - 'v1.something_hyphenated', - '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n' - ), - ) - ) - def test_handle_args_returns_nested_varname( - self, inst_data, varname, expected, capsys, tmpdir - ): - """If user_data file is a jinja template render instance-data vars.""" - instance_data = tmpdir.join('instance-data') - instance_data.write(inst_data) - args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, user_data='ud', - vendor_data='vd', list_keys=False, varname=varname) - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) - out, _err = capsys.readouterr() - assert expected == out - - def test_handle_args_returns_standardized_vars_to_top_level_aliases( - self, capsys, tmpdir - ): - """Any standardized vars under v# are promoted as top-level aliases.""" - instance_data = tmpdir.join('instance-data') - instance_data.write( - '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' - ' "top": "gun"}') - expected = dedent("""\ - { - "top": "gun", - "userdata": " file:ud", - "v1": { - "v1_1": "val1.1" - }, - "v1_1": "val1.1", - "v2": { - "v2_2": "val2.2" - }, - "v2_2": "val2.2", - "vendordata": " file:vd" - } - """) - args = self.args( - debug=False, dump_all=True, format=None, - instance_data=instance_data.strpath, user_data='ud', - vendor_data='vd', list_keys=False, varname=None) - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) - out, _err = capsys.readouterr() - assert expected == out - - def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname( - self, capsys, tmpdir - ): - """Sort all top-level keys when only --list-keys provided.""" - instance_data = tmpdir.join('instance-data') - instance_data.write( - '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' - ' "top": "gun"}') - expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n' - args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, list_keys=True, - user_data='ud', vendor_data='vd', varname=None) - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) - out, _err = capsys.readouterr() - assert expected == out - - def test_handle_args_list_keys_sorts_nested_keys_when_varname( - self, capsys, tmpdir - ): - """Sort all nested keys of varname object when --list-keys provided.""" - instance_data = tmpdir.join('instance-data') - instance_data.write( - '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' + - ' {"v2_2": "val2.2"}, "top": "gun"}') - expected = 'v1_1\nv1_2\n' - args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, list_keys=True, - user_data='ud', vendor_data='vd', varname='v1') - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - assert 0 == query.handle_args('anyname', args) - out, _err = capsys.readouterr() - assert expected == out - - def test_handle_args_list_keys_errors_when_varname_is_not_a_dict( - self, caplog, tmpdir - ): - """Raise an error when --list-keys and varname specify a non-list.""" - instance_data = tmpdir.join('instance-data') - instance_data.write( - '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' + - '{"v2_2": "val2.2"}, "top": "gun"}') - expected_error = "--list-keys provided but 'top' is not a dict" - args = self.args( - debug=False, dump_all=False, format=None, - instance_data=instance_data.strpath, list_keys=True, - user_data='ud', vendor_data='vd', varname='top') - with mock.patch('os.getuid') as m_getuid: - m_getuid.return_value = 100 - assert 1 == query.handle_args('anyname', args) - assert expected_error in caplog.text - -# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py deleted file mode 100644 index 1c9eec37..00000000 --- a/cloudinit/cmd/tests/test_status.py +++ /dev/null @@ -1,391 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from collections import namedtuple -import os -from io import StringIO -from textwrap import dedent - -from cloudinit.atomic_helper import write_json -from cloudinit.cmd import status -from cloudinit.util import ensure_file -from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock - -mypaths = namedtuple('MyPaths', 'run_dir') -myargs = namedtuple('MyArgs', 'long wait') - - -class TestStatus(CiTestCase): - - def setUp(self): - super(TestStatus, self).setUp() - self.new_root = self.tmp_dir() - self.status_file = self.tmp_path('status.json', self.new_root) - self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) - self.paths = mypaths(run_dir=self.new_root) - - class FakeInit(object): - paths = self.paths - - def __init__(self, ds_deps): - pass - - def read_cfg(self): - pass - - self.init_class = FakeInit - - def test__is_cloudinit_disabled_false_on_sysvinit(self): - '''When not in an environment using systemd, return False.''' - ensure_file(self.disable_file) # Create the ignored disable file - (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': False, - 'get_cmdline': "root=/dev/my-root not-important"}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertFalse( - is_disabled, 'expected enabled cloud-init on sysvinit') - self.assertEqual('Cloud-init enabled on sysvinit', reason) - - def test__is_cloudinit_disabled_true_on_disable_file(self): - '''When using systemd and disable_file is present return disabled.''' - ensure_file(self.disable_file) # Create observed disable file - (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': "root=/dev/my-root not-important"}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertTrue(is_disabled, 'expected disabled cloud-init') - self.assertEqual( - 'Cloud-init disabled by {0}'.format(self.disable_file), reason) - - def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): - '''Not disabled when using systemd and enabled via commandline.''' - ensure_file(self.disable_file) # Create ignored disable file - (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something cloud-init=enabled else'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertFalse(is_disabled, 'expected enabled cloud-init') - self.assertEqual( - 'Cloud-init enabled by kernel command line cloud-init=enabled', - reason) - - def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): - '''When using systemd and disable_file is present return disabled.''' - (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something cloud-init=disabled else'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertTrue(is_disabled, 'expected disabled cloud-init') - self.assertEqual( - 'Cloud-init disabled by kernel parameter cloud-init=disabled', - reason) - - def test__is_cloudinit_disabled_true_when_generator_disables(self): - '''When cloud-init-generator doesn't write enabled file return True.''' - enabled_file = os.path.join(self.paths.run_dir, 'enabled') - self.assertFalse(os.path.exists(enabled_file)) - (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertTrue(is_disabled, 'expected disabled cloud-init') - self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) - - def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): - '''Report enabled when systemd generator creates the enabled file.''' - enabled_file = os.path.join(self.paths.run_dir, 'enabled') - ensure_file(enabled_file) - (is_disabled, reason) = wrap_and_call( - 'cloudinit.cmd.status', - {'uses_systemd': True, - 'get_cmdline': 'something ignored'}, - status._is_cloudinit_disabled, self.disable_file, self.paths) - self.assertFalse(is_disabled, 'expected enabled cloud-init') - self.assertEqual( - 'Cloud-init enabled by systemd cloud-init-generator', reason) - - def test_status_returns_not_run(self): - '''When status.json does not exist yet, return 'not run'.''' - self.assertFalse( - os.path.exists(self.status_file), 'Unexpected status.json found') - cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - self.assertEqual('status: not run\n', m_stdout.getvalue()) - - def test_status_returns_disabled_long_on_presence_of_disable_file(self): - '''When cloudinit is disabled, return disabled reason.''' - - checked_files = [] - - def fakeexists(filepath): - checked_files.append(filepath) - status_file = os.path.join(self.paths.run_dir, 'status.json') - return bool(not filepath == status_file) - - cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'os.path.exists': {'side_effect': fakeexists}, - '_is_cloudinit_disabled': (True, 'disabled for some reason'), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - self.assertEqual( - [os.path.join(self.paths.run_dir, 'status.json')], - checked_files) - expected = dedent('''\ - status: disabled - detail: - disabled for some reason - ''') - self.assertEqual(expected, m_stdout.getvalue()) - - def test_status_returns_running_on_no_results_json(self): - '''Report running when status.json exists but result.json does not.''' - result_file = self.tmp_path('result.json', self.new_root) - write_json(self.status_file, {}) - self.assertFalse( - os.path.exists(result_file), 'Unexpected result.json found') - cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - self.assertEqual('status: running\n', m_stdout.getvalue()) - - def test_status_returns_running(self): - '''Report running when status exists with an unfinished stage.''' - ensure_file(self.tmp_path('result.json', self.new_root)) - write_json(self.status_file, - {'v1': {'init': {'start': 1, 'finished': None}}}) - cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - self.assertEqual('status: running\n', m_stdout.getvalue()) - - def test_status_returns_done(self): - '''Report done results.json exists no stages are unfinished.''' - ensure_file(self.tmp_path('result.json', self.new_root)) - write_json( - self.status_file, - {'v1': {'stage': None, # No current stage running - 'datasource': ( - 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' - '[dsmode=net]'), - 'blah': {'finished': 123.456}, - 'init': {'errors': [], 'start': 124.567, - 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) - cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - self.assertEqual('status: done\n', m_stdout.getvalue()) - - def test_status_returns_done_long(self): - '''Long format of done status includes datasource info.''' - ensure_file(self.tmp_path('result.json', self.new_root)) - write_json( - self.status_file, - {'v1': {'stage': None, - 'datasource': ( - 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' - '[dsmode=net]'), - 'init': {'start': 124.567, 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) - cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - expected = dedent('''\ - status: done - time: Thu, 01 Jan 1970 00:02:05 +0000 - detail: - DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] - ''') - self.assertEqual(expected, m_stdout.getvalue()) - - def test_status_on_errors(self): - '''Reports error when any stage has errors.''' - write_json( - self.status_file, - {'v1': {'stage': None, - 'blah': {'errors': [], 'finished': 123.456}, - 'init': {'errors': ['error1'], 'start': 124.567, - 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) - cmdargs = myargs(long=False, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(1, retcode) - self.assertEqual('status: error\n', m_stdout.getvalue()) - - def test_status_on_errors_long(self): - '''Long format of error status includes all error messages.''' - write_json( - self.status_file, - {'v1': {'stage': None, - 'datasource': ( - 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' - '[dsmode=net]'), - 'init': {'errors': ['error1'], 'start': 124.567, - 'finished': 125.678}, - 'init-local': {'errors': ['error2', 'error3'], - 'start': 123.45, 'finished': 123.46}}}) - cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(1, retcode) - expected = dedent('''\ - status: error - time: Thu, 01 Jan 1970 00:02:05 +0000 - detail: - error1 - error2 - error3 - ''') - self.assertEqual(expected, m_stdout.getvalue()) - - def test_status_returns_running_long_format(self): - '''Long format reports the stage in which we are running.''' - write_json( - self.status_file, - {'v1': {'stage': 'init', - 'init': {'start': 124.456, 'finished': None}, - 'init-local': {'start': 123.45, 'finished': 123.46}}}) - cmdargs = myargs(long=True, wait=False) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - expected = dedent('''\ - status: running - time: Thu, 01 Jan 1970 00:02:04 +0000 - detail: - Running in stage: init - ''') - self.assertEqual(expected, m_stdout.getvalue()) - - def test_status_wait_blocks_until_done(self): - '''Specifying wait will poll every 1/4 second until done state.''' - running_json = { - 'v1': {'stage': 'init', - 'init': {'start': 124.456, 'finished': None}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} - done_json = { - 'v1': {'stage': None, - 'init': {'start': 124.456, 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} - - self.sleep_calls = 0 - - def fake_sleep(interval): - self.assertEqual(0.25, interval) - self.sleep_calls += 1 - if self.sleep_calls == 2: - write_json(self.status_file, running_json) - elif self.sleep_calls == 3: - write_json(self.status_file, done_json) - result_file = self.tmp_path('result.json', self.new_root) - ensure_file(result_file) - - cmdargs = myargs(long=False, wait=True) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'sleep': {'side_effect': fake_sleep}, - '_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(0, retcode) - self.assertEqual(4, self.sleep_calls) - self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) - - def test_status_wait_blocks_until_error(self): - '''Specifying wait will poll every 1/4 second until error state.''' - running_json = { - 'v1': {'stage': 'init', - 'init': {'start': 124.456, 'finished': None}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} - error_json = { - 'v1': {'stage': None, - 'init': {'errors': ['error1'], 'start': 124.456, - 'finished': 125.678}, - 'init-local': {'start': 123.45, 'finished': 123.46}}} - - self.sleep_calls = 0 - - def fake_sleep(interval): - self.assertEqual(0.25, interval) - self.sleep_calls += 1 - if self.sleep_calls == 2: - write_json(self.status_file, running_json) - elif self.sleep_calls == 3: - write_json(self.status_file, error_json) - - cmdargs = myargs(long=False, wait=True) - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - retcode = wrap_and_call( - 'cloudinit.cmd.status', - {'sleep': {'side_effect': fake_sleep}, - '_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.handle_status_args, 'ignored', cmdargs) - self.assertEqual(1, retcode) - self.assertEqual(4, self.sleep_calls) - self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) - - def test_status_main(self): - '''status.main can be run as a standalone script.''' - write_json(self.status_file, - {'v1': {'init': {'start': 1, 'finished': None}}}) - with self.assertRaises(SystemExit) as context_manager: - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - wrap_and_call( - 'cloudinit.cmd.status', - {'sys.argv': {'new': ['status']}, - '_is_cloudinit_disabled': (False, ''), - 'Init': {'side_effect': self.init_class}}, - status.main) - self.assertEqual(0, context_manager.exception.code) - self.assertEqual('status: running\n', m_stdout.getvalue()) - -# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py deleted file mode 100644 index 2a6bb10b..00000000 --- a/cloudinit/config/tests/test_apt_pipelining.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests cc_apt_pipelining handler""" - -import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining - -from cloudinit.tests.helpers import CiTestCase, mock - - -class TestAptPipelining(CiTestCase): - - @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') - def test_not_disabled_by_default(self, m_write_file): - """ensure that default behaviour is to not disable pipelining""" - cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None) - self.assertEqual(0, m_write_file.call_count) - - @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') - def test_false_disables_pipelining(self, m_write_file): - """ensure that pipelining can be disabled with correct config""" - cc_apt_pipelining.handle( - 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None) - self.assertEqual(1, m_write_file.call_count) - args, _ = m_write_file.call_args - self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0]) - self.assertIn('Pipeline-Depth "0"', args[1]) - -# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py deleted file mode 100644 index b00f2083..00000000 --- a/cloudinit/config/tests/test_disable_ec2_metadata.py +++ /dev/null @@ -1,48 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests cc_disable_ec2_metadata handler""" - -import cloudinit.config.cc_disable_ec2_metadata as ec2_meta - -from cloudinit.tests.helpers import CiTestCase, mock - -import logging - -LOG = logging.getLogger(__name__) - -DISABLE_CFG = {'disable_ec2_metadata': 'true'} - - -class TestEC2MetadataRoute(CiTestCase): - - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') - def test_disable_ifconfig(self, m_subp, m_which): - """Set the route if ifconfig command is available""" - m_which.side_effect = lambda x: x if x == 'ifconfig' else None - ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) - m_subp.assert_called_with( - ['route', 'add', '-host', '169.254.169.254', 'reject'], - capture=False) - - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') - def test_disable_ip(self, m_subp, m_which): - """Set the route if ip command is available""" - m_which.side_effect = lambda x: x if x == 'ip' else None - ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) - m_subp.assert_called_with( - ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], - capture=False) - - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') - @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') - def test_disable_no_tool(self, m_subp, m_which): - """Log error when neither route nor ip commands are available""" - m_which.return_value = None # Find neither ifconfig nor ip - ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) - self.assertEqual( - [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list) - m_subp.assert_not_called() - -# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_final_message.py b/cloudinit/config/tests/test_final_message.py deleted file mode 100644 index 46ba99b2..00000000 --- a/cloudinit/config/tests/test_final_message.py +++ /dev/null @@ -1,46 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -from unittest import mock - -import pytest - -from cloudinit.config.cc_final_message import handle - - -class TestHandle: - # TODO: Expand these tests to cover full functionality; currently they only - # cover the logic around how the boot-finished file is written (and not its - # contents). - - @pytest.mark.parametrize( - "instance_dir_exists,file_is_written,expected_log_substring", - [ - (True, True, None), - (False, False, "Failed to write boot finished file "), - ], - ) - def test_boot_finished_written( - self, - instance_dir_exists, - file_is_written, - expected_log_substring, - caplog, - tmpdir, - ): - instance_dir = tmpdir.join("var/lib/cloud/instance") - if instance_dir_exists: - instance_dir.ensure_dir() - boot_finished = instance_dir.join("boot-finished") - - m_cloud = mock.Mock( - paths=mock.Mock(boot_finished=boot_finished.strpath) - ) - - handle(None, {}, m_cloud, logging.getLogger(), []) - - # We should not change the status of the instance directory - assert instance_dir_exists == instance_dir.exists() - assert file_is_written == boot_finished.exists() - - if expected_log_substring: - assert expected_log_substring in caplog.text diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/cloudinit/config/tests/test_grub_dpkg.py deleted file mode 100644 index 99c05bb5..00000000 --- a/cloudinit/config/tests/test_grub_dpkg.py +++ /dev/null @@ -1,176 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import pytest - -from unittest import mock -from logging import Logger -from cloudinit.subp import ProcessExecutionError -from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle - - -class TestFetchIdevs: - """Tests cc_grub_dpkg.fetch_idevs()""" - - # Note: udevadm info returns devices in a large single line string - @pytest.mark.parametrize( - "grub_output,path_exists,expected_log_call,udevadm_output" - ",expected_idevs", - [ - # Inside a container, grub not installed - ( - ProcessExecutionError(reason=FileNotFoundError()), - False, - mock.call("'grub-probe' not found in $PATH"), - '', - '', - ), - # Inside a container, grub installed - ( - ProcessExecutionError(stderr="failed to get canonical path"), - False, - mock.call("grub-probe 'failed to get canonical path'"), - '', - '', - ), - # KVM Instance - ( - ['/dev/vda'], - True, - None, - ( - '/dev/disk/by-path/pci-0000:00:00.0 ', - '/dev/disk/by-path/virtio-pci-0000:00:00.0 ' - ), - '/dev/vda', - ), - # Xen Instance - ( - ['/dev/xvda'], - True, - None, - '', - '/dev/xvda', - ), - # NVMe Hardware Instance - ( - ['/dev/nvme1n1'], - True, - None, - ( - '/dev/disk/by-id/nvme-Company_hash000 ', - '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ', - '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ' - ), - '/dev/disk/by-id/nvme-Company_hash000', - ), - # SCSI Hardware Instance - ( - ['/dev/sda'], - True, - None, - ( - '/dev/disk/by-id/company-user-1 ', - '/dev/disk/by-id/scsi-0Company_user-1 ', - '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ' - ), - '/dev/disk/by-id/company-user-1', - ), - ], - ) - @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") - @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists") - @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") - def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output, - path_exists, expected_log_call, udevadm_output, - expected_idevs): - """Tests outputs from grub-probe and udevadm info against grub-dpkg""" - m_subp.side_effect = [ - grub_output, - ["".join(udevadm_output)] - ] - m_exists.return_value = path_exists - log = mock.Mock(spec=Logger) - idevs = fetch_idevs(log) - assert expected_idevs == idevs - if expected_log_call is not None: - assert expected_log_call in log.debug.call_args_list - - -class TestHandle: - """Tests cc_grub_dpkg.handle()""" - - @pytest.mark.parametrize( - "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output", - [ - ( - # No configuration - None, - None, - '/dev/disk/by-id/nvme-Company_hash000', - ( - "Setting grub debconf-set-selections with ", - "'/dev/disk/by-id/nvme-Company_hash000','false'" - ), - ), - ( - # idevs set, idevs_empty unset - '/dev/sda', - None, - '/dev/sda', - ( - "Setting grub debconf-set-selections with ", - "'/dev/sda','false'" - ), - ), - ( - # idevs unset, idevs_empty set - None, - 'true', - '/dev/xvda', - ( - "Setting grub debconf-set-selections with ", - "'/dev/xvda','true'" - ), - ), - ( - # idevs set, idevs_empty set - '/dev/vda', - 'false', - '/dev/disk/by-id/company-user-1', - ( - "Setting grub debconf-set-selections with ", - "'/dev/vda','false'" - ), - ), - ( - # idevs set, idevs_empty set - # Respect what the user defines, even if its logically wrong - '/dev/nvme0n1', - 'true', - '', - ( - "Setting grub debconf-set-selections with ", - "'/dev/nvme0n1','true'" - ), - ) - ], - ) - @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs") - @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str") - @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") - @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") - def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs, - cfg_idevs, cfg_idevs_empty, fetch_idevs_output, - expected_log_output): - """Test setting of correct debconf database entries""" - m_get_cfg_str.side_effect = [ - cfg_idevs, - cfg_idevs_empty - ] - m_fetch_idevs.return_value = fetch_idevs_output - log = mock.Mock(spec=Logger) - handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock()) - log.debug.assert_called_with("".join(expected_log_output)) - - -# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_keys_to_console.py b/cloudinit/config/tests/test_keys_to_console.py deleted file mode 100644 index 4083fc54..00000000 --- a/cloudinit/config/tests/test_keys_to_console.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Tests for cc_keys_to_console.""" -from unittest import mock - -import pytest - -from cloudinit.config import cc_keys_to_console - - -class TestHandle: - """Tests for cloudinit.config.cc_keys_to_console.handle. - - TODO: These tests only cover the emit_keys_to_console config option, they - should be expanded to cover the full functionality. - """ - - @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log") - @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists") - @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp") - @pytest.mark.parametrize("cfg,subp_called", [ - ({}, True), # Default to emitting keys - ({"ssh": {}}, True), # Default even if we have the parent key - ({"ssh": {"emit_keys_to_console": True}}, True), # Explicitly enabled - ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled - ]) - def test_emit_keys_to_console_config( - self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called - ): - # Ensure we always find the helper - m_path_exists.return_value = True - m_subp.return_value = ("", "") - - cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ()) - - assert subp_called == (m_subp.call_count == 1) diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py deleted file mode 100644 index 56510fd6..00000000 --- a/cloudinit/config/tests/test_mounts.py +++ /dev/null @@ -1,61 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from unittest import mock - -import pytest - -from cloudinit.config.cc_mounts import create_swapfile -from cloudinit.subp import ProcessExecutionError - - -M_PATH = 'cloudinit.config.cc_mounts.' - - -class TestCreateSwapfile: - - @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other')) - @mock.patch(M_PATH + 'util.get_mount_info') - @mock.patch(M_PATH + 'subp.subp') - def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir): - swap_file = tmpdir.join("swap-file") - fname = str(swap_file) - - # Some of the calls to subp.subp should create the swap file; this - # roughly approximates that - m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') - - m_get_mount_info.return_value = (mock.ANY, fstype) - - create_swapfile(fname, '') - assert mock.call(['mkswap', fname]) in m_subp.call_args_list - - @mock.patch(M_PATH + "util.get_mount_info") - @mock.patch(M_PATH + "subp.subp") - def test_fallback_from_fallocate_to_dd( - self, m_subp, m_get_mount_info, caplog, tmpdir - ): - swap_file = tmpdir.join("swap-file") - fname = str(swap_file) - - def subp_side_effect(cmd, *args, **kwargs): - # Mock fallocate failing, to initiate fallback - if cmd[0] == "fallocate": - raise ProcessExecutionError() - - m_subp.side_effect = subp_side_effect - # Use ext4 so both fallocate and dd are valid swap creation methods - m_get_mount_info.return_value = (mock.ANY, "ext4") - - create_swapfile(fname, "") - - cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list] - assert "fallocate" in cmds, "fallocate was not called" - assert "dd" in cmds, "fallocate failure did not fallback to dd" - - assert cmds.index("dd") > cmds.index( - "fallocate" - ), "dd ran before fallocate" - - assert mock.call(["mkswap", fname]) in m_subp.call_args_list - - msg = "fallocate swap creation failed, will attempt with dd" - assert msg in caplog.text diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py deleted file mode 100644 index aff110e5..00000000 --- a/cloudinit/config/tests/test_resolv_conf.py +++ /dev/null @@ -1,92 +0,0 @@ -import pytest - -from unittest import mock -from cloudinit.config.cc_resolv_conf import generate_resolv_conf -from tests.unittests.util import TestingDistro - -EXPECTED_HEADER = """\ -# Your system has been configured with 'manage-resolv-conf' set to true. -# As a result, cloud-init has written this file with configuration data -# that it has been provided. Cloud-init, by default, will write this file -# a single time (PER_ONCE). -#\n\n""" - - -class TestGenerateResolvConf: - - dist = TestingDistro() - tmpl_fn = "templates/resolv.conf.tmpl" - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_dist_resolv_conf_fn(self, m_render_to_file): - self.dist.resolve_conf_fn = "/tmp/resolv-test.conf" - generate_resolv_conf(self.tmpl_fn, - mock.MagicMock(), - self.dist.resolve_conf_fn) - - assert [ - mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY) - ] == m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_target_fname_is_used_if_passed(self, m_render_to_file): - path = "/use/this/path" - generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path) - - assert [ - mock.call(mock.ANY, path, mock.ANY) - ] == m_render_to_file.call_args_list - - # Patch in templater so we can assert on the actual generated content - @mock.patch("cloudinit.templater.util.write_file") - # Parameterise with the value to be passed to generate_resolv_conf as the - # params parameter, and the expected line after the header as - # expected_extra_line. - @pytest.mark.parametrize( - "params,expected_extra_line", - [ - # No options - ({}, None), - # Just a true flag - ({"options": {"foo": True}}, "options foo"), - # Just a false flag - ({"options": {"foo": False}}, None), - # Just an option - ({"options": {"foo": "some_value"}}, "options foo:some_value"), - # A true flag and an option - ( - {"options": {"foo": "some_value", "bar": True}}, - "options bar foo:some_value", - ), - # Two options - ( - {"options": {"foo": "some_value", "bar": "other_value"}}, - "options bar:other_value foo:some_value", - ), - # Everything - ( - { - "options": { - "foo": "some_value", - "bar": "other_value", - "baz": False, - "spam": True, - } - }, - "options spam bar:other_value foo:some_value", - ), - ], - ) - def test_flags_and_options( - self, m_write_file, params, expected_extra_line - ): - target_fn = "/etc/resolv.conf" - generate_resolv_conf(self.tmpl_fn, params, target_fn) - - expected_content = EXPECTED_HEADER - if expected_extra_line is not None: - # If we have any extra lines, expect a trailing newline - expected_content += "\n".join([expected_extra_line, ""]) - assert [ - mock.call(mock.ANY, expected_content, mode=mock.ANY) - ] == m_write_file.call_args_list diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py deleted file mode 100644 index 2a27f72f..00000000 --- a/cloudinit/config/tests/test_set_passwords.py +++ /dev/null @@ -1,168 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from unittest import mock - -from cloudinit.config import cc_set_passwords as setpass -from cloudinit.tests.helpers import CiTestCase -from cloudinit import util - -MODPATH = "cloudinit.config.cc_set_passwords." - - -class TestHandleSshPwauth(CiTestCase): - """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth.""" - - with_logs = True - - @mock.patch("cloudinit.distros.subp.subp") - def test_unknown_value_logs_warning(self, m_subp): - cloud = self.tmp_cloud(distro='ubuntu') - setpass.handle_ssh_pwauth("floo", cloud.distro) - self.assertIn("Unrecognized value: ssh_pwauth=floo", - self.logs.getvalue()) - m_subp.assert_not_called() - - @mock.patch(MODPATH + "update_ssh_config", return_value=True) - @mock.patch("cloudinit.distros.subp.subp") - def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): - """If systemctl in service cmd: systemctl restart name.""" - cloud = self.tmp_cloud(distro='ubuntu') - cloud.distro.init_cmd = ['systemctl'] - setpass.handle_ssh_pwauth(True, cloud.distro) - m_subp.assert_called_with( - ["systemctl", "restart", "ssh"], capture=True) - - @mock.patch(MODPATH + "update_ssh_config", return_value=False) - @mock.patch("cloudinit.distros.subp.subp") - def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): - """If config is not updated, then no system restart should be done.""" - cloud = self.tmp_cloud(distro='ubuntu') - setpass.handle_ssh_pwauth(True, cloud.distro) - m_subp.assert_not_called() - self.assertIn("No need to restart SSH", self.logs.getvalue()) - - @mock.patch(MODPATH + "update_ssh_config", return_value=True) - @mock.patch("cloudinit.distros.subp.subp") - def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): - """If 'unchanged', then no updates to config and no restart.""" - cloud = self.tmp_cloud(distro='ubuntu') - setpass.handle_ssh_pwauth("unchanged", cloud.distro) - m_update_ssh_config.assert_not_called() - m_subp.assert_not_called() - - @mock.patch("cloudinit.distros.subp.subp") - def test_valid_change_values(self, m_subp): - """If value is a valid changen value, then update should be called.""" - cloud = self.tmp_cloud(distro='ubuntu') - upname = MODPATH + "update_ssh_config" - optname = "PasswordAuthentication" - for value in util.FALSE_STRINGS + util.TRUE_STRINGS: - optval = "yes" if value in util.TRUE_STRINGS else "no" - with mock.patch(upname, return_value=False) as m_update: - setpass.handle_ssh_pwauth(value, cloud.distro) - m_update.assert_called_with({optname: optval}) - m_subp.assert_not_called() - - -class TestSetPasswordsHandle(CiTestCase): - """Test cc_set_passwords.handle""" - - with_logs = True - - def test_handle_on_empty_config(self, *args): - """handle logs that no password has changed when config is empty.""" - cloud = self.tmp_cloud(distro='ubuntu') - setpass.handle( - 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) - self.assertEqual( - "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. " - 'ssh_pwauth=None\n', - self.logs.getvalue()) - - @mock.patch(MODPATH + "subp.subp") - def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): - """handle parses command password hashes.""" - cloud = self.tmp_cloud(distro='ubuntu') - valid_hashed_pwds = [ - 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/' - 'Dlew1Va', - 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' - 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] - cfg = {'chpasswd': {'list': valid_hashed_pwds}} - with mock.patch(MODPATH + 'subp.subp') as m_subp: - setpass.handle( - 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) - self.assertIn( - 'DEBUG: Handling input for chpasswd as list.', - self.logs.getvalue()) - self.assertIn( - "DEBUG: Setting hashed password for ['root', 'ubuntu']", - self.logs.getvalue()) - self.assertEqual( - [mock.call(['chpasswd', '-e'], - '\n'.join(valid_hashed_pwds) + '\n')], - m_subp.call_args_list) - - @mock.patch(MODPATH + "util.is_BSD") - @mock.patch(MODPATH + "subp.subp") - def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords( - self, m_subp, m_is_bsd): - """BSD don't use chpasswd""" - m_is_bsd.return_value = True - cloud = self.tmp_cloud(distro='freebsd') - valid_pwds = ['ubuntu:passw0rd'] - cfg = {'chpasswd': {'list': valid_pwds}} - setpass.handle( - 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) - self.assertEqual([ - mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd', - logstring="chpasswd for ubuntu"), - mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], - m_subp.call_args_list) - - @mock.patch(MODPATH + "util.multi_log") - @mock.patch(MODPATH + "subp.subp") - def test_handle_on_chpasswd_list_creates_random_passwords( - self, m_subp, m_multi_log - ): - """handle parses command set random passwords.""" - cloud = self.tmp_cloud(distro='ubuntu') - valid_random_pwds = [ - 'root:R', - 'ubuntu:RANDOM'] - cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}} - with mock.patch(MODPATH + 'subp.subp') as m_subp: - setpass.handle( - 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) - self.assertIn( - 'DEBUG: Handling input for chpasswd as list.', - self.logs.getvalue()) - - self.assertEqual(1, m_subp.call_count) - args, _kwargs = m_subp.call_args - self.assertEqual(["chpasswd"], args[0]) - - stdin = args[1] - user_pass = { - user: password - for user, password - in (line.split(":") for line in stdin.splitlines()) - } - - self.assertEqual(1, m_multi_log.call_count) - self.assertEqual( - mock.call(mock.ANY, stderr=False, fallback_to_stdout=False), - m_multi_log.call_args - ) - - self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys())) - written_lines = m_multi_log.call_args[0][0].splitlines() - for password in user_pass.values(): - for line in written_lines: - if password in line: - break - else: - self.fail("Password not emitted to console") - - -# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py deleted file mode 100644 index 6d4c014a..00000000 --- a/cloudinit/config/tests/test_snap.py +++ /dev/null @@ -1,564 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import re -from io import StringIO - -from cloudinit.config.cc_snap import ( - ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, - run_commands, schema) -from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit import util -from cloudinit.tests.helpers import ( - CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) - - -SYSTEM_USER_ASSERTION = """\ -type: system-user -authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp -brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp -email: foo@bar.com -password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt -series: -- 16 -since: 2016-09-10T16:34:00+03:00 -until: 2017-11-10T16:34:00+03:00 -username: baz -sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj - -AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP -Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI -zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF -s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj -+to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP -Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS -d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q -BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H -f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V -v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""" - -ACCOUNT_ASSERTION = """\ -type: account-key -authority-id: canonical -revision: 2 -public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 -account-id: canonical -name: store -since: 2016-04-01T00:00:00.0Z -body-length: 717 -sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH - -AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j -qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 -vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ -UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK -Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG -o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl -VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 -2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an -Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc -vUvV7RjVzv17ut0AEQEAAQ== - -AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM -WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b -nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL -3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL -eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY -inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 -rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ -rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE -aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ -6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO -haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF -yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 -HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi -skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK -CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde -ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF -qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR -IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t -oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""" - - -class FakeCloud(object): - def __init__(self, distro): - self.distro = distro - - -class TestAddAssertions(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestAddAssertions, self).setUp() - self.tmp = self.tmp_dir() - - @mock.patch('cloudinit.config.cc_snap.subp.subp') - def test_add_assertions_on_empty_list(self, m_subp): - """When provided with an empty list, add_assertions does nothing.""" - add_assertions([]) - self.assertEqual('', self.logs.getvalue()) - m_subp.assert_not_called() - - def test_add_assertions_on_non_list_or_dict(self): - """When provided an invalid type, add_assertions raises an error.""" - with self.assertRaises(TypeError) as context_manager: - add_assertions(assertions="I'm Not Valid") - self.assertEqual( - "assertion parameter was not a list or dict: I'm Not Valid", - str(context_manager.exception)) - - @mock.patch('cloudinit.config.cc_snap.subp.subp') - def test_add_assertions_adds_assertions_as_list(self, m_subp): - """When provided with a list, add_assertions adds all assertions.""" - self.assertEqual( - ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) - assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION] - wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - add_assertions, assertions) - self.assertIn( - 'Importing user-provided snap assertions', self.logs.getvalue()) - self.assertIn( - 'sertions', self.logs.getvalue()) - self.assertEqual( - [mock.call(['snap', 'ack', assert_file], capture=True)], - m_subp.call_args_list) - compare_file = self.tmp_path('comparison', dir=self.tmp) - util.write_file(compare_file, '\n'.join(assertions).encode('utf-8')) - self.assertEqual( - util.load_file(compare_file), util.load_file(assert_file)) - - @mock.patch('cloudinit.config.cc_snap.subp.subp') - def test_add_assertions_adds_assertions_as_dict(self, m_subp): - """When provided with a dict, add_assertions adds all assertions.""" - self.assertEqual( - ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) - assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION} - wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - add_assertions, assertions) - self.assertIn( - 'Importing user-provided snap assertions', self.logs.getvalue()) - self.assertIn( - "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv", - self.logs.getvalue()) - self.assertIn( - "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic", - self.logs.getvalue()) - self.assertEqual( - [mock.call(['snap', 'ack', assert_file], capture=True)], - m_subp.call_args_list) - compare_file = self.tmp_path('comparison', dir=self.tmp) - combined = '\n'.join(assertions.values()) - util.write_file(compare_file, combined.encode('utf-8')) - self.assertEqual( - util.load_file(compare_file), util.load_file(assert_file)) - - -class TestRunCommands(CiTestCase): - - with_logs = True - allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] - - def setUp(self): - super(TestRunCommands, self).setUp() - self.tmp = self.tmp_dir() - - @mock.patch('cloudinit.config.cc_snap.subp.subp') - def test_run_commands_on_empty_list(self, m_subp): - """When provided with an empty list, run_commands does nothing.""" - run_commands([]) - self.assertEqual('', self.logs.getvalue()) - m_subp.assert_not_called() - - def test_run_commands_on_non_list_or_dict(self): - """When provided an invalid type, run_commands raises an error.""" - with self.assertRaises(TypeError) as context_manager: - run_commands(commands="I'm Not Valid") - self.assertEqual( - "commands parameter was not a list or dict: I'm Not Valid", - str(context_manager.exception)) - - def test_run_command_logs_commands_and_exit_codes_to_stderr(self): - """All exit codes are logged to stderr.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'bogus command' - cmd3 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2, cmd3] - - mock_path = 'cloudinit.config.cc_snap.sys.stderr' - with mock.patch(mock_path, new_callable=StringIO) as m_stderr: - with self.assertRaises(RuntimeError) as context_manager: - run_commands(commands=commands) - - self.assertIsNotNone( - re.search(r'bogus: (command )?not found', - str(context_manager.exception)), - msg='Expected bogus command not found') - expected_stderr_log = '\n'.join([ - 'Begin run command: {cmd}'.format(cmd=cmd1), - 'End run command: exit(0)', - 'Begin run command: {cmd}'.format(cmd=cmd2), - 'ERROR: End run command: exit(127)', - 'Begin run command: {cmd}'.format(cmd=cmd3), - 'End run command: exit(0)\n']) - self.assertEqual(expected_stderr_log, m_stderr.getvalue()) - - def test_run_command_as_lists(self): - """When commands are specified as a list, run them in order.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2] - mock_path = 'cloudinit.config.cc_snap.sys.stderr' - with mock.patch(mock_path, new_callable=StringIO): - run_commands(commands=commands) - - self.assertIn( - 'DEBUG: Running user-provided snap commands', - self.logs.getvalue()) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) - self.assertIn( - 'WARNING: Non-snap commands in snap config:', self.logs.getvalue()) - - def test_run_command_dict_sorted_as_command_script(self): - """When commands are a dict, sort them and run.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = {'02': cmd1, '01': cmd2} - mock_path = 'cloudinit.config.cc_snap.sys.stderr' - with mock.patch(mock_path, new_callable=StringIO): - run_commands(commands=commands) - - expected_messages = [ - 'DEBUG: Running user-provided snap commands'] - for message in expected_messages: - self.assertIn(message, self.logs.getvalue()) - self.assertEqual('MOM\nHI\n', util.load_file(outfile)) - - -@skipUnlessJsonSchema() -class TestSchema(CiTestCase, SchemaTestCaseMixin): - - with_logs = True - schema = schema - - def test_schema_warns_on_snap_not_as_dict(self): - """If the snap configuration is not a dict, emit a warning.""" - validate_cloudconfig_schema({'snap': 'wrong type'}, schema) - self.assertEqual( - "WARNING: Invalid config:\nsnap: 'wrong type' is not of type" - " 'object'\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.run_commands') - def test_schema_disallows_unknown_keys(self, _): - """Unknown keys in the snap configuration emit warnings.""" - validate_cloudconfig_schema( - {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema) - self.assertIn( - 'WARNING: Invalid config:\nsnap: Additional properties are not' - " allowed ('invalid-key' was unexpected)", - self.logs.getvalue()) - - def test_warn_schema_requires_either_commands_or_assertions(self): - """Warn when snap configuration lacks both commands and assertions.""" - validate_cloudconfig_schema( - {'snap': {}}, schema) - self.assertIn( - 'WARNING: Invalid config:\nsnap: {} does not have enough' - ' properties', - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.run_commands') - def test_warn_schema_commands_is_not_list_or_dict(self, _): - """Warn when snap:commands config is not a list or dict.""" - validate_cloudconfig_schema( - {'snap': {'commands': 'broken'}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type" - " 'object', 'array'\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.run_commands') - def test_warn_schema_when_commands_is_empty(self, _): - """Emit warnings when snap:commands is an empty list or dict.""" - validate_cloudconfig_schema( - {'snap': {'commands': []}}, schema) - validate_cloudconfig_schema( - {'snap': {'commands': {}}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nsnap.commands: [] is too short\n" - "WARNING: Invalid config:\nsnap.commands: {} does not have enough" - " properties\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.run_commands') - def test_schema_when_commands_are_list_or_dict(self, _): - """No warnings when snap:commands are either a list or dict.""" - validate_cloudconfig_schema( - {'snap': {'commands': ['valid']}}, schema) - validate_cloudconfig_schema( - {'snap': {'commands': {'01': 'also valid'}}}, schema) - self.assertEqual('', self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.run_commands') - def test_schema_when_commands_values_are_invalid_type(self, _): - """Warnings when snap:commands values are invalid type (e.g. int)""" - validate_cloudconfig_schema( - {'snap': {'commands': [123]}}, schema) - validate_cloudconfig_schema( - {'snap': {'commands': {'01': 123}}}, schema) - self.assertEqual( - "WARNING: Invalid config:\n" - "snap.commands.0: 123 is not valid under any of the given" - " schemas\n" - "WARNING: Invalid config:\n" - "snap.commands.01: 123 is not valid under any of the given" - " schemas\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.run_commands') - def test_schema_when_commands_list_values_are_invalid_type(self, _): - """Warnings when snap:commands list values are wrong type (e.g. int)""" - validate_cloudconfig_schema( - {'snap': {'commands': [["snap", "install", 123]]}}, schema) - validate_cloudconfig_schema( - {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema) - self.assertEqual( - "WARNING: Invalid config:\n" - "snap.commands.0: ['snap', 'install', 123] is not valid under any" - " of the given schemas\n", - "WARNING: Invalid config:\n" - "snap.commands.0: ['snap', 'install', 123] is not valid under any" - " of the given schemas\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.run_commands') - def test_schema_when_assertions_values_are_invalid_type(self, _): - """Warnings when snap:assertions values are invalid type (e.g. int)""" - validate_cloudconfig_schema( - {'snap': {'assertions': [123]}}, schema) - validate_cloudconfig_schema( - {'snap': {'assertions': {'01': 123}}}, schema) - self.assertEqual( - "WARNING: Invalid config:\n" - "snap.assertions.0: 123 is not of type 'string'\n" - "WARNING: Invalid config:\n" - "snap.assertions.01: 123 is not of type 'string'\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.add_assertions') - def test_warn_schema_assertions_is_not_list_or_dict(self, _): - """Warn when snap:assertions config is not a list or dict.""" - validate_cloudconfig_schema( - {'snap': {'assertions': 'broken'}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of" - " type 'object', 'array'\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.add_assertions') - def test_warn_schema_when_assertions_is_empty(self, _): - """Emit warnings when snap:assertions is an empty list or dict.""" - validate_cloudconfig_schema( - {'snap': {'assertions': []}}, schema) - validate_cloudconfig_schema( - {'snap': {'assertions': {}}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nsnap.assertions: [] is too short\n" - "WARNING: Invalid config:\nsnap.assertions: {} does not have" - " enough properties\n", - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.add_assertions') - def test_schema_when_assertions_are_list_or_dict(self, _): - """No warnings when snap:assertions are a list or dict.""" - validate_cloudconfig_schema( - {'snap': {'assertions': ['valid']}}, schema) - validate_cloudconfig_schema( - {'snap': {'assertions': {'01': 'also valid'}}}, schema) - self.assertEqual('', self.logs.getvalue()) - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - {'commands': [["echo", "bye"], ["echo", "bye"]]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - {'commands': ["echo bye", "echo bye"]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_array(self): - """Duplicated commands dict/array entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_string(self): - """Duplicated commands dict/string entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': "echo bye", '01': "echo bye"}}, - "command entries can be duplicate.") - - -class TestHandle(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestHandle, self).setUp() - self.tmp = self.tmp_dir() - - @mock.patch('cloudinit.config.cc_snap.run_commands') - @mock.patch('cloudinit.config.cc_snap.add_assertions') - @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema') - def test_handle_no_config(self, m_schema, m_add, m_run): - """When no snap-related configuration is provided, nothing happens.""" - cfg = {} - handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertIn( - "DEBUG: Skipping module named snap, no 'snap' key in config", - self.logs.getvalue()) - m_schema.assert_not_called() - m_add.assert_not_called() - m_run.assert_not_called() - - @mock.patch('cloudinit.config.cc_snap.run_commands') - @mock.patch('cloudinit.config.cc_snap.add_assertions') - @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') - def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add, - m_run): - """When squashfuse_in_container is unset, don't attempt to install.""" - handle( - 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None) - handle( - 'snap', cfg={'snap': {'squashfuse_in_container': None}}, - cloud=None, log=self.logger, args=None) - handle( - 'snap', cfg={'snap': {'squashfuse_in_container': False}}, - cloud=None, log=self.logger, args=None) - self.assertEqual([], m_squash.call_args_list) # No calls - # snap configuration missing assertions and commands will default to [] - self.assertIn(mock.call([]), m_add.call_args_list) - self.assertIn(mock.call([]), m_run.call_args_list) - - @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') - def test_handle_tries_to_install_squashfuse(self, m_squash): - """If squashfuse_in_container is True, try installing squashfuse.""" - cfg = {'snap': {'squashfuse_in_container': True}} - mycloud = FakeCloud(None) - handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None) - self.assertEqual( - [mock.call(mycloud)], m_squash.call_args_list) - - def test_handle_runs_commands_provided(self): - """If commands are specified as a list, run them.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cfg = { - 'snap': {'commands': ['echo "HI" >> %s' % outfile, - 'echo "MOM" >> %s' % outfile]}} - mock_path = 'cloudinit.config.cc_snap.sys.stderr' - with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): - with mock.patch(mock_path, new_callable=StringIO): - handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) - - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) - - @mock.patch('cloudinit.config.cc_snap.subp.subp') - def test_handle_adds_assertions(self, m_subp): - """Any configured snap assertions are provided to add_assertions.""" - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) - compare_file = self.tmp_path('comparison', dir=self.tmp) - cfg = { - 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}} - wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) - content = '\n'.join(cfg['snap']['assertions']) - util.write_file(compare_file, content.encode('utf-8')) - self.assertEqual( - util.load_file(compare_file), util.load_file(assert_file)) - - @mock.patch('cloudinit.config.cc_snap.subp.subp') - @skipUnlessJsonSchema() - def test_handle_validates_schema(self, m_subp): - """Any provided configuration is runs validate_cloudconfig_schema.""" - assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) - cfg = {'snap': {'invalid': ''}} # Generates schema warning - wrap_and_call( - 'cloudinit.config.cc_snap', - {'ASSERTIONS_FILE': {'new': assert_file}}, - handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertEqual( - "WARNING: Invalid config:\nsnap: Additional properties are not" - " allowed ('invalid' was unexpected)\n", - self.logs.getvalue()) - - -class TestMaybeInstallSquashFuse(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestMaybeInstallSquashFuse, self).setUp() - self.tmp = self.tmp_dir() - - @mock.patch('cloudinit.config.cc_snap.util.is_container') - def test_maybe_install_squashfuse_skips_non_containers(self, m_container): - """maybe_install_squashfuse does nothing when not on a container.""" - m_container.return_value = False - maybe_install_squashfuse(cloud=FakeCloud(None)) - self.assertEqual([mock.call()], m_container.call_args_list) - self.assertEqual('', self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.util.is_container') - def test_maybe_install_squashfuse_raises_install_errors(self, m_container): - """maybe_install_squashfuse logs and raises package install errors.""" - m_container.return_value = True - distro = mock.MagicMock() - distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') - with self.assertRaises(RuntimeError) as context_manager: - maybe_install_squashfuse(cloud=FakeCloud(distro)) - self.assertEqual('Some apt error', str(context_manager.exception)) - self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.util.is_container') - def test_maybe_install_squashfuse_raises_update_errors(self, m_container): - """maybe_install_squashfuse logs and raises package update errors.""" - m_container.return_value = True - distro = mock.MagicMock() - distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') - with self.assertRaises(RuntimeError) as context_manager: - maybe_install_squashfuse(cloud=FakeCloud(distro)) - self.assertEqual('Some apt error', str(context_manager.exception)) - self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_snap.util.is_container') - def test_maybe_install_squashfuse_happy_path(self, m_container): - """maybe_install_squashfuse logs and raises package install errors.""" - m_container.return_value = True - distro = mock.MagicMock() # No errors raised - maybe_install_squashfuse(cloud=FakeCloud(distro)) - self.assertEqual( - [mock.call()], distro.update_package_sources.call_args_list) - self.assertEqual( - [mock.call(['squashfuse'])], - distro.install_packages.call_args_list) - -# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py deleted file mode 100644 index 87ccdb60..00000000 --- a/cloudinit/config/tests/test_ssh.py +++ /dev/null @@ -1,405 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os.path - -from cloudinit.config import cc_ssh -from cloudinit import ssh_util -from cloudinit.tests.helpers import CiTestCase, mock -import logging - -LOG = logging.getLogger(__name__) - -MODPATH = "cloudinit.config.cc_ssh." -KEY_NAMES_NO_DSA = [name for name in cc_ssh.GENERATE_KEY_NAMES - if name not in 'dsa'] - - -@mock.patch(MODPATH + "ssh_util.setup_user_keys") -class TestHandleSsh(CiTestCase): - """Test cc_ssh handling of ssh config.""" - - def _publish_hostkey_test_setup(self): - self.test_hostkeys = { - 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), - 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), - 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), - 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), - } - self.test_hostkey_files = [] - hostkey_tmpdir = self.tmp_dir() - for key_type in cc_ssh.GENERATE_KEY_NAMES: - key_data = self.test_hostkeys[key_type] - filename = 'ssh_host_%s_key.pub' % key_type - filepath = os.path.join(hostkey_tmpdir, filename) - self.test_hostkey_files.append(filepath) - with open(filepath, 'w') as f: - f.write(' '.join(key_data)) - - cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') - - def test_apply_credentials_with_user(self, m_setup_keys): - """Apply keys for the given user and root.""" - keys = ["key1"] - user = "clouduser" - cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options="")], - m_setup_keys.call_args_list) - - def test_apply_credentials_with_no_user(self, m_setup_keys): - """Apply keys for root only.""" - keys = ["key1"] - user = None - cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) - self.assertEqual([mock.call(set(keys), "root", options="")], - m_setup_keys.call_args_list) - - def test_apply_credentials_with_user_disable_root(self, m_setup_keys): - """Apply keys for the given user and disable root ssh.""" - keys = ["key1"] - user = "clouduser" - options = ssh_util.DISABLE_USER_OPTS - cc_ssh.apply_credentials(keys, user, True, options) - options = options.replace("$USER", user) - options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) - - def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys): - """Apply keys no user and disable root ssh.""" - keys = ["key1"] - user = None - options = ssh_util.DISABLE_USER_OPTS - cc_ssh.apply_credentials(keys, user, True, options) - options = options.replace("$USER", "NONE") - options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_no_cfg(self, m_path_exists, m_nug, - m_glob, m_setup_keys): - """Test handle with no config ignores generating existing keyfiles.""" - cfg = {} - keys = ["key1"] - m_glob.return_value = [] # Return no matching keys to prevent removal - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ([], {}) - cc_ssh.PUBLISH_HOST_KEYS = False - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, LOG, None) - options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") - options = options.replace("$DISABLE_USER", "root") - m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') - self.assertIn( - [mock.call('/etc/ssh/ssh_host_rsa_key'), - mock.call('/etc/ssh/ssh_host_dsa_key'), - mock.call('/etc/ssh/ssh_host_ecdsa_key'), - mock.call('/etc/ssh/ssh_host_ed25519_key')], - m_path_exists.call_args_list) - self.assertEqual([mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug, - m_glob, m_setup_keys): - """Test allow_public_ssh_keys=False ignores ssh public keys from - platform. - """ - cfg = {"allow_public_ssh_keys": False} - keys = ["key1"] - user = "clouduser" - m_glob.return_value = [] # Return no matching keys to prevent removal - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, LOG, None) - - options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) - options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(), user), - mock.call(set(), "root", options=options)], - m_setup_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug, - m_glob, m_setup_keys): - """Test handle with no config and a default distro user.""" - cfg = {} - keys = ["key1"] - user = "clouduser" - m_glob.return_value = [] # Return no matching keys to prevent removal - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, LOG, None) - - options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) - options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug, - m_glob, m_setup_keys): - """Test handle with explicit disable_root and a default distro user.""" - # This test is identical to test_handle_no_cfg_and_default_root, - # except this uses an explicit cfg value - cfg = {"disable_root": True} - keys = ["key1"] - user = "clouduser" - m_glob.return_value = [] # Return no matching keys to prevent removal - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, LOG, None) - - options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) - options = options.replace("$DISABLE_USER", "root") - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options=options)], - m_setup_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug, - m_glob, m_setup_keys): - """Test handle with disable_root == False.""" - # When disable_root == False, the ssh redirect for root is skipped - cfg = {"disable_root": False} - keys = ["key1"] - user = "clouduser" - m_glob.return_value = [] # Return no matching keys to prevent removal - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cloud.get_public_ssh_keys = mock.Mock(return_value=keys) - cc_ssh.handle("name", cfg, cloud, LOG, None) - - self.assertEqual([mock.call(set(keys), user), - mock.call(set(keys), "root", options="")], - m_setup_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_publish_hostkeys_default( - self, m_path_exists, m_nug, m_glob, m_setup_keys): - """Test handle with various configs for ssh_publish_hostkeys.""" - self._publish_hostkey_test_setup() - cc_ssh.PUBLISH_HOST_KEYS = True - keys = ["key1"] - user = "clouduser" - # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cloud.datasource.publish_host_keys = mock.Mock() - - cfg = {} - expected_call = [self.test_hostkeys[key_type] for key_type - in KEY_NAMES_NO_DSA] - cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_publish_hostkeys_config_enable( - self, m_path_exists, m_nug, m_glob, m_setup_keys): - """Test handle with various configs for ssh_publish_hostkeys.""" - self._publish_hostkey_test_setup() - cc_ssh.PUBLISH_HOST_KEYS = False - keys = ["key1"] - user = "clouduser" - # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cloud.datasource.publish_host_keys = mock.Mock() - - cfg = {'ssh_publish_hostkeys': {'enabled': True}} - expected_call = [self.test_hostkeys[key_type] for key_type - in KEY_NAMES_NO_DSA] - cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_publish_hostkeys_config_disable( - self, m_path_exists, m_nug, m_glob, m_setup_keys): - """Test handle with various configs for ssh_publish_hostkeys.""" - self._publish_hostkey_test_setup() - cc_ssh.PUBLISH_HOST_KEYS = True - keys = ["key1"] - user = "clouduser" - # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cloud.datasource.publish_host_keys = mock.Mock() - - cfg = {'ssh_publish_hostkeys': {'enabled': False}} - cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) - cloud.datasource.publish_host_keys.assert_not_called() - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_publish_hostkeys_config_blacklist( - self, m_path_exists, m_nug, m_glob, m_setup_keys): - """Test handle with various configs for ssh_publish_hostkeys.""" - self._publish_hostkey_test_setup() - cc_ssh.PUBLISH_HOST_KEYS = True - keys = ["key1"] - user = "clouduser" - # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cloud.datasource.publish_host_keys = mock.Mock() - - cfg = {'ssh_publish_hostkeys': {'enabled': True, - 'blacklist': ['dsa', 'rsa']}} - expected_call = [self.test_hostkeys[key_type] for key_type - in ['ecdsa', 'ed25519']] - cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) - - @mock.patch(MODPATH + "glob.glob") - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "os.path.exists") - def test_handle_publish_hostkeys_empty_blacklist( - self, m_path_exists, m_nug, m_glob, m_setup_keys): - """Test handle with various configs for ssh_publish_hostkeys.""" - self._publish_hostkey_test_setup() - cc_ssh.PUBLISH_HOST_KEYS = True - keys = ["key1"] - user = "clouduser" - # Return no matching keys for first glob, test keys for second. - m_glob.side_effect = iter([ - [], - self.test_hostkey_files, - ]) - # Mock os.path.exits to True to short-circuit the key writing logic - m_path_exists.return_value = True - m_nug.return_value = ({user: {"default": user}}, {}) - cloud = self.tmp_cloud( - distro='ubuntu', metadata={'public-keys': keys}) - cloud.datasource.publish_host_keys = mock.Mock() - - cfg = {'ssh_publish_hostkeys': {'enabled': True, - 'blacklist': []}} - expected_call = [self.test_hostkeys[key_type] for key_type - in cc_ssh.GENERATE_KEY_NAMES] - cc_ssh.handle("name", cfg, cloud, LOG, None) - self.assertEqual([mock.call(expected_call)], - cloud.datasource.publish_host_keys.call_args_list) - - @mock.patch(MODPATH + "ug_util.normalize_users_groups") - @mock.patch(MODPATH + "util.write_file") - def test_handle_ssh_keys_in_cfg(self, m_write_file, m_nug, m_setup_keys): - """Test handle with ssh keys and certificate.""" - # Populate a config dictionary to pass to handle() as well - # as the expected file-writing calls. - cfg = {"ssh_keys": {}} - - expected_calls = [] - for key_type in cc_ssh.GENERATE_KEY_NAMES: - private_name = "{}_private".format(key_type) - public_name = "{}_public".format(key_type) - cert_name = "{}_certificate".format(key_type) - - # Actual key contents don"t have to be realistic - private_value = "{}_PRIVATE_KEY".format(key_type) - public_value = "{}_PUBLIC_KEY".format(key_type) - cert_value = "{}_CERT_KEY".format(key_type) - - cfg["ssh_keys"][private_name] = private_value - cfg["ssh_keys"][public_name] = public_value - cfg["ssh_keys"][cert_name] = cert_value - - expected_calls.extend([ - mock.call( - '/etc/ssh/ssh_host_{}_key'.format(key_type), - private_value, - 384 - ), - mock.call( - '/etc/ssh/ssh_host_{}_key.pub'.format(key_type), - public_value, - 384 - ), - mock.call( - '/etc/ssh/ssh_host_{}_key-cert.pub'.format(key_type), - cert_value, - 384 - ), - mock.call( - '/etc/ssh/sshd_config', - ('HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub' - '\n'.format(key_type)), - preserve_mode=True - ) - ]) - - # Run the handler. - m_nug.return_value = ([], {}) - with mock.patch(MODPATH + 'ssh_util.parse_ssh_config', - return_value=[]): - cc_ssh.handle("name", cfg, self.tmp_cloud(distro='ubuntu'), - LOG, None) - - # Check that all expected output has been done. - for call_ in expected_calls: - self.assertIn(call_, m_write_file.call_args_list) diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py deleted file mode 100644 index db7fb726..00000000 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ /dev/null @@ -1,333 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config.cc_ubuntu_advantage import ( - configure_ua, handle, maybe_install_ua_tools, schema) -from cloudinit.config.schema import validate_cloudconfig_schema -from cloudinit import subp -from cloudinit.tests.helpers import ( - CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) - - -# Module path used in mocks -MPATH = 'cloudinit.config.cc_ubuntu_advantage' - - -class FakeCloud(object): - def __init__(self, distro): - self.distro = distro - - -class TestConfigureUA(CiTestCase): - - with_logs = True - allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] - - def setUp(self): - super(TestConfigureUA, self).setUp() - self.tmp = self.tmp_dir() - - @mock.patch('%s.subp.subp' % MPATH) - def test_configure_ua_attach_error(self, m_subp): - """Errors from ua attach command are raised.""" - m_subp.side_effect = subp.ProcessExecutionError( - 'Invalid token SomeToken') - with self.assertRaises(RuntimeError) as context_manager: - configure_ua(token='SomeToken') - self.assertEqual( - 'Failure attaching Ubuntu Advantage:\nUnexpected error while' - ' running command.\nCommand: -\nExit code: -\nReason: -\n' - 'Stdout: Invalid token SomeToken\nStderr: -', - str(context_manager.exception)) - - @mock.patch('%s.subp.subp' % MPATH) - def test_configure_ua_attach_with_token(self, m_subp): - """When token is provided, attach the machine to ua using the token.""" - configure_ua(token='SomeToken') - m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) - self.assertEqual( - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) - - @mock.patch('%s.subp.subp' % MPATH) - def test_configure_ua_attach_on_service_error(self, m_subp): - """all services should be enabled and then any failures raised""" - - def fake_subp(cmd, capture=None): - fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] - if cmd in fail_cmds and capture: - svc = cmd[-1] - raise subp.ProcessExecutionError( - 'Invalid {} credentials'.format(svc.upper())) - - m_subp.side_effect = fake_subp - - with self.assertRaises(RuntimeError) as context_manager: - configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) - self.assertEqual( - m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken']), - mock.call(['ua', 'enable', 'esm'], capture=True), - mock.call(['ua', 'enable', 'cc'], capture=True), - mock.call(['ua', 'enable', 'fips'], capture=True)]) - self.assertIn( - 'WARNING: Failure enabling "esm":\nUnexpected error' - ' while running command.\nCommand: -\nExit code: -\nReason: -\n' - 'Stdout: Invalid ESM credentials\nStderr: -\n', - self.logs.getvalue()) - self.assertIn( - 'WARNING: Failure enabling "cc":\nUnexpected error' - ' while running command.\nCommand: -\nExit code: -\nReason: -\n' - 'Stdout: Invalid CC credentials\nStderr: -\n', - self.logs.getvalue()) - self.assertEqual( - 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', - str(context_manager.exception)) - - @mock.patch('%s.subp.subp' % MPATH) - def test_configure_ua_attach_with_empty_services(self, m_subp): - """When services is an empty list, do not auto-enable attach.""" - configure_ua(token='SomeToken', enable=[]) - m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) - self.assertEqual( - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) - - @mock.patch('%s.subp.subp' % MPATH) - def test_configure_ua_attach_with_specific_services(self, m_subp): - """When services a list, only enable specific services.""" - configure_ua(token='SomeToken', enable=['fips']) - self.assertEqual( - m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken']), - mock.call(['ua', 'enable', 'fips'], capture=True)]) - self.assertEqual( - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) - - @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) - @mock.patch('%s.subp.subp' % MPATH) - def test_configure_ua_attach_with_string_services(self, m_subp): - """When services a string, treat as singleton list and warn""" - configure_ua(token='SomeToken', enable='fips') - self.assertEqual( - m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken']), - mock.call(['ua', 'enable', 'fips'], capture=True)]) - self.assertEqual( - 'WARNING: ubuntu_advantage: enable should be a list, not a' - ' string; treating as a single enable\n' - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) - - @mock.patch('%s.subp.subp' % MPATH) - def test_configure_ua_attach_with_weird_services(self, m_subp): - """When services not string or list, warn but still attach""" - configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) - self.assertEqual( - m_subp.call_args_list, - [mock.call(['ua', 'attach', 'SomeToken'])]) - self.assertEqual( - 'WARNING: ubuntu_advantage: enable should be a list, not a' - ' dict; skipping enabling services\n' - 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', - self.logs.getvalue()) - - -@skipUnlessJsonSchema() -class TestSchema(CiTestCase, SchemaTestCaseMixin): - - with_logs = True - schema = schema - - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) - def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _): - """If ubuntu_advantage configuration is not a dict, emit a warning.""" - validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema) - self.assertEqual( - "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not" - " of type 'object'\n", - self.logs.getvalue()) - - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) - def test_schema_disallows_unknown_keys(self, _cfg, _): - """Unknown keys in ubuntu_advantage configuration emit warnings.""" - validate_cloudconfig_schema( - {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}}, - schema) - self.assertIn( - 'WARNING: Invalid config:\nubuntu_advantage: Additional properties' - " are not allowed ('invalid-key' was unexpected)", - self.logs.getvalue()) - - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) - def test_warn_schema_requires_token(self, _cfg, _): - """Warn if ubuntu_advantage configuration lacks token.""" - validate_cloudconfig_schema( - {'ubuntu_advantage': {'enable': ['esm']}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nubuntu_advantage:" - " 'token' is a required property\n", self.logs.getvalue()) - - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - @mock.patch('%s.configure_ua' % MPATH) - def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _): - """Warn when ubuntu_advantage:enable config is not a list.""" - validate_cloudconfig_schema( - {'ubuntu_advantage': {'enable': 'needslist'}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nubuntu_advantage: 'token' is a" - " required property\nubuntu_advantage.enable: 'needslist'" - " is not of type 'array'\n", - self.logs.getvalue()) - - -class TestHandle(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestHandle, self).setUp() - self.tmp = self.tmp_dir() - - @mock.patch('%s.validate_cloudconfig_schema' % MPATH) - def test_handle_no_config(self, m_schema): - """When no ua-related configuration is provided, nothing happens.""" - cfg = {} - handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertIn( - "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'" - ' configuration found', - self.logs.getvalue()) - m_schema.assert_not_called() - - @mock.patch('%s.configure_ua' % MPATH) - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_tries_to_install_ubuntu_advantage_tools( - self, m_install, m_cfg): - """If ubuntu_advantage is provided, try installing ua-tools package.""" - cfg = {'ubuntu_advantage': {'token': 'valid'}} - mycloud = FakeCloud(None) - handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) - m_install.assert_called_once_with(mycloud) - - @mock.patch('%s.configure_ua' % MPATH) - @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_passes_credentials_and_services_to_configure_ua( - self, m_install, m_configure_ua): - """All ubuntu_advantage config keys are passed to configure_ua.""" - cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}} - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) - m_configure_ua.assert_called_once_with( - token='token', enable=['esm']) - - @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) - @mock.patch('%s.configure_ua' % MPATH) - def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config( - self, m_configure_ua): - """Warning when ubuntu-advantage key is present with new config""" - cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}} - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertEqual( - 'WARNING: Deprecated configuration key "ubuntu-advantage"' - ' provided. Expected underscore delimited "ubuntu_advantage";' - ' will attempt to continue.', - self.logs.getvalue().splitlines()[0]) - m_configure_ua.assert_called_once_with( - token='token', enable=['esm']) - - def test_handle_error_on_deprecated_commands_key_dashed(self): - """Error when commands is present in ubuntu-advantage key.""" - cfg = {'ubuntu-advantage': {'commands': 'nogo'}} - with self.assertRaises(RuntimeError) as context_manager: - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertEqual( - 'Deprecated configuration "ubuntu-advantage: commands" provided.' - ' Expected "token"', - str(context_manager.exception)) - - def test_handle_error_on_deprecated_commands_key_underscored(self): - """Error when commands is present in ubuntu_advantage key.""" - cfg = {'ubuntu_advantage': {'commands': 'nogo'}} - with self.assertRaises(RuntimeError) as context_manager: - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertEqual( - 'Deprecated configuration "ubuntu-advantage: commands" provided.' - ' Expected "token"', - str(context_manager.exception)) - - @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) - @mock.patch('%s.configure_ua' % MPATH) - def test_handle_prefers_new_style_config( - self, m_configure_ua): - """ubuntu_advantage should be preferred over ubuntu-advantage""" - cfg = { - 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']}, - 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}, - } - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) - self.assertEqual( - 'WARNING: Deprecated configuration key "ubuntu-advantage"' - ' provided. Expected underscore delimited "ubuntu_advantage";' - ' will attempt to continue.', - self.logs.getvalue().splitlines()[0]) - m_configure_ua.assert_called_once_with( - token='token', enable=['esm']) - - -class TestMaybeInstallUATools(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestMaybeInstallUATools, self).setUp() - self.tmp = self.tmp_dir() - - @mock.patch('%s.subp.which' % MPATH) - def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): - """Do nothing if ubuntu-advantage-tools already exists.""" - m_which.return_value = '/usr/bin/ua' # already installed - distro = mock.MagicMock() - distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') - maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError - - @mock.patch('%s.subp.which' % MPATH) - def test_maybe_install_ua_tools_raises_update_errors(self, m_which): - """maybe_install_ua_tools logs and raises apt update errors.""" - m_which.return_value = None - distro = mock.MagicMock() - distro.update_package_sources.side_effect = RuntimeError( - 'Some apt error') - with self.assertRaises(RuntimeError) as context_manager: - maybe_install_ua_tools(cloud=FakeCloud(distro)) - self.assertEqual('Some apt error', str(context_manager.exception)) - self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) - - @mock.patch('%s.subp.which' % MPATH) - def test_maybe_install_ua_raises_install_errors(self, m_which): - """maybe_install_ua_tools logs and raises package install errors.""" - m_which.return_value = None - distro = mock.MagicMock() - distro.update_package_sources.return_value = None - distro.install_packages.side_effect = RuntimeError( - 'Some install error') - with self.assertRaises(RuntimeError) as context_manager: - maybe_install_ua_tools(cloud=FakeCloud(distro)) - self.assertEqual('Some install error', str(context_manager.exception)) - self.assertIn( - 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue()) - - @mock.patch('%s.subp.which' % MPATH) - def test_maybe_install_ua_tools_happy_path(self, m_which): - """maybe_install_ua_tools installs ubuntu-advantage-tools.""" - m_which.return_value = None - distro = mock.MagicMock() # No errors raised - maybe_install_ua_tools(cloud=FakeCloud(distro)) - distro.update_package_sources.assert_called_once_with() - distro.install_packages.assert_called_once_with( - ['ubuntu-advantage-tools']) - -# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py deleted file mode 100644 index 504ba356..00000000 --- a/cloudinit/config/tests/test_ubuntu_drivers.py +++ /dev/null @@ -1,244 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import os - -from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock -from cloudinit.config.schema import ( - SchemaValidationError, validate_cloudconfig_schema) -from cloudinit.config import cc_ubuntu_drivers as drivers -from cloudinit.subp import ProcessExecutionError - -MPATH = "cloudinit.config.cc_ubuntu_drivers." -M_TMP_PATH = MPATH + "temp_utils.mkdtemp" -OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( - "ubuntu-drivers: error: argument : invalid choice: 'install' " - "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") - - -# The tests in this module call helper methods which are decorated with -# mock.patch. pylint doesn't understand that mock.patch passes parameters to -# the decorated function, so it incorrectly reports that we aren't passing -# values for all parameters. Instead of annotating every single call, we -# disable it for the entire module: -# pylint: disable=no-value-for-parameter - -class AnyTempScriptAndDebconfFile(object): - - def __init__(self, tmp_dir, debconf_file): - self.tmp_dir = tmp_dir - self.debconf_file = debconf_file - - def __eq__(self, cmd): - if not len(cmd) == 2: - return False - script, debconf_file = cmd - if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')): - return debconf_file == self.debconf_file - return False - - -class TestUbuntuDrivers(CiTestCase): - cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} - install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] - - with_logs = True - - @skipUnlessJsonSchema() - def test_schema_requires_boolean_for_license_accepted(self): - with self.assertRaisesRegex( - SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): - validate_cloudconfig_schema( - {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, - schema=drivers.schema, strict=True) - - @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp", return_value=('', '')) - @mock.patch(MPATH + "subp.which", return_value=False) - def _assert_happy_path_taken( - self, config, m_which, m_subp, m_tmp): - """Positive path test through handle. Package should be installed.""" - tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') - m_tmp.return_value = tdir - myCloud = mock.MagicMock() - drivers.handle('ubuntu_drivers', config, myCloud, None, None) - self.assertEqual([mock.call(['ubuntu-drivers-common'])], - myCloud.distro.install_packages.call_args_list) - self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) - - def test_handle_does_package_install(self): - self._assert_happy_path_taken(self.cfg_accepted) - - def test_trueish_strings_are_considered_approval(self): - for true_value in ['yes', 'true', 'on', '1']: - new_config = copy.deepcopy(self.cfg_accepted) - new_config['drivers']['nvidia']['license-accepted'] = true_value - self._assert_happy_path_taken(new_config) - - @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp") - @mock.patch(MPATH + "subp.which", return_value=False) - def test_handle_raises_error_if_no_drivers_found( - self, m_which, m_subp, m_tmp): - """If ubuntu-drivers doesn't install any drivers, raise an error.""" - tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') - m_tmp.return_value = tdir - myCloud = mock.MagicMock() - - def fake_subp(cmd): - if cmd[0].startswith(tdir): - return - raise ProcessExecutionError( - stdout='No drivers found for installation.\n', exit_code=1) - m_subp.side_effect = fake_subp - - with self.assertRaises(Exception): - drivers.handle( - 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) - self.assertEqual([mock.call(['ubuntu-drivers-common'])], - myCloud.distro.install_packages.call_args_list) - self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) - self.assertIn('ubuntu-drivers found no drivers for installation', - self.logs.getvalue()) - - @mock.patch(MPATH + "subp.subp", return_value=('', '')) - @mock.patch(MPATH + "subp.which", return_value=False) - def _assert_inert_with_config(self, config, m_which, m_subp): - """Helper to reduce repetition when testing negative cases""" - myCloud = mock.MagicMock() - drivers.handle('ubuntu_drivers', config, myCloud, None, None) - self.assertEqual(0, myCloud.distro.install_packages.call_count) - self.assertEqual(0, m_subp.call_count) - - def test_handle_inert_if_license_not_accepted(self): - """Ensure we don't do anything if the license is rejected.""" - self._assert_inert_with_config( - {'drivers': {'nvidia': {'license-accepted': False}}}) - - def test_handle_inert_if_garbage_in_license_field(self): - """Ensure we don't do anything if unknown text is in license field.""" - self._assert_inert_with_config( - {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) - - def test_handle_inert_if_no_license_key(self): - """Ensure we don't do anything if no license key.""" - self._assert_inert_with_config({'drivers': {'nvidia': {}}}) - - def test_handle_inert_if_no_nvidia_key(self): - """Ensure we don't do anything if other license accepted.""" - self._assert_inert_with_config( - {'drivers': {'acme': {'license-accepted': True}}}) - - def test_handle_inert_if_string_given(self): - """Ensure we don't do anything if string refusal given.""" - for false_value in ['no', 'false', 'off', '0']: - self._assert_inert_with_config( - {'drivers': {'nvidia': {'license-accepted': false_value}}}) - - @mock.patch(MPATH + "install_drivers") - def test_handle_no_drivers_does_nothing(self, m_install_drivers): - """If no 'drivers' key in the config, nothing should be done.""" - myCloud = mock.MagicMock() - myLog = mock.MagicMock() - drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) - self.assertIn('Skipping module named', - myLog.debug.call_args_list[0][0][0]) - self.assertEqual(0, m_install_drivers.call_count) - - @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp", return_value=('', '')) - @mock.patch(MPATH + "subp.which", return_value=True) - def test_install_drivers_no_install_if_present( - self, m_which, m_subp, m_tmp): - """If 'ubuntu-drivers' is present, no package install should occur.""" - tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') - m_tmp.return_value = tdir - pkg_install = mock.MagicMock() - drivers.install_drivers(self.cfg_accepted['drivers'], - pkg_install_func=pkg_install) - self.assertEqual(0, pkg_install.call_count) - self.assertEqual([mock.call('ubuntu-drivers')], - m_which.call_args_list) - self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) - - def test_install_drivers_rejects_invalid_config(self): - """install_drivers should raise TypeError if not given a config dict""" - pkg_install = mock.MagicMock() - with self.assertRaisesRegex(TypeError, ".*expected dict.*"): - drivers.install_drivers("mystring", pkg_install_func=pkg_install) - self.assertEqual(0, pkg_install.call_count) - - @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp") - @mock.patch(MPATH + "subp.which", return_value=False) - def test_install_drivers_handles_old_ubuntu_drivers_gracefully( - self, m_which, m_subp, m_tmp): - """Older ubuntu-drivers versions should emit message and raise error""" - tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') - m_tmp.return_value = tdir - myCloud = mock.MagicMock() - - def fake_subp(cmd): - if cmd[0].startswith(tdir): - return - raise ProcessExecutionError( - stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2) - m_subp.side_effect = fake_subp - - with self.assertRaises(Exception): - drivers.handle( - 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) - self.assertEqual([mock.call(['ubuntu-drivers-common'])], - myCloud.distro.install_packages.call_args_list) - self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(self.install_gpgpu)], - m_subp.call_args_list) - self.assertIn('WARNING: the available version of ubuntu-drivers is' - ' too old to perform requested driver installation', - self.logs.getvalue()) - - -# Sub-class TestUbuntuDrivers to run the same test cases, but with a version -class TestUbuntuDriversWithVersion(TestUbuntuDrivers): - cfg_accepted = { - 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} - install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] - - @mock.patch(M_TMP_PATH) - @mock.patch(MPATH + "subp.subp", return_value=('', '')) - @mock.patch(MPATH + "subp.which", return_value=False) - def test_version_none_uses_latest(self, m_which, m_subp, m_tmp): - tdir = self.tmp_dir() - debconf_file = os.path.join(tdir, 'nvidia.template') - m_tmp.return_value = tdir - myCloud = mock.MagicMock() - version_none_cfg = { - 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} - drivers.handle( - 'ubuntu_drivers', version_none_cfg, myCloud, None, None) - self.assertEqual( - [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), - mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], - m_subp.call_args_list) - - def test_specifying_a_version_doesnt_override_license_acceptance(self): - self._assert_inert_with_config({ - 'drivers': {'nvidia': {'license-accepted': False, - 'version': '123'}} - }) - -# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py deleted file mode 100644 index df89ddb3..00000000 --- a/cloudinit/config/tests/test_users_groups.py +++ /dev/null @@ -1,172 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - - -from cloudinit.config import cc_users_groups -from cloudinit.tests.helpers import CiTestCase, mock - -MODPATH = "cloudinit.config.cc_users_groups" - - -@mock.patch('cloudinit.distros.ubuntu.Distro.create_group') -@mock.patch('cloudinit.distros.ubuntu.Distro.create_user') -class TestHandleUsersGroups(CiTestCase): - """Test cc_users_groups handling of config.""" - - with_logs = True - - def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group): - """Test handle with no config will not create users or groups.""" - cfg = {} # merged cloud-config - # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {} - cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - m_user.assert_not_called() - m_group.assert_not_called() - - def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group): - """When users in config, create users with distro.create_user.""" - cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config - # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {} - cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertCountEqual( - m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', default=False)]) - m_group.assert_not_called() - - @mock.patch('cloudinit.distros.freebsd.Distro.create_group') - @mock.patch('cloudinit.distros.freebsd.Distro.create_user') - def test_handle_users_in_cfg_calls_create_users_on_bsd( - self, - m_fbsd_user, - m_fbsd_group, - m_linux_user, - m_linux_group, - ): - """When users in config, create users with freebsd.create_user.""" - cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config - # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True, - 'groups': ['wheel'], - 'shell': '/bin/tcsh'}} - metadata = {} - cloud = self.tmp_cloud( - distro='freebsd', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertCountEqual( - m_fbsd_user.call_args_list, - [mock.call('freebsd', groups='wheel', lock_passwd=True, - shell='/bin/tcsh'), - mock.call('me2', default=False)]) - m_fbsd_group.assert_not_called() - m_linux_group.assert_not_called() - m_linux_user.assert_not_called() - - def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group): - """When ssh_redirect_user is True pass default user and cloud keys.""" - cfg = { - 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} - # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} - cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertCountEqual( - m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, - ssh_redirect_user='ubuntu')]) - m_group.assert_not_called() - - def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group): - """When ssh_redirect_user is 'default' pass default username.""" - cfg = { - 'users': ['default', {'name': 'me2', - 'ssh_redirect_user': 'default'}]} - # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} - cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertCountEqual( - m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, - ssh_redirect_user='ubuntu')]) - m_group.assert_not_called() - - def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group): - """Warn when ssh_redirect_user is not 'default'.""" - cfg = { - 'users': ['default', {'name': 'me2', - 'ssh_redirect_user': 'snowflake'}]} - # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} - cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - with self.assertRaises(ValueError) as context_manager: - cc_users_groups.handle('modulename', cfg, cloud, None, None) - m_group.assert_not_called() - self.assertEqual( - 'Not creating user me2. Invalid value of ssh_redirect_user:' - ' snowflake. Expected values: true, default or false.', - str(context_manager.exception)) - - def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group): - """When unspecified ssh_redirect_user is false and not set up.""" - cfg = {'users': ['default', {'name': 'me2'}]} - # System config defines a default user for the distro. - sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, - 'groups': ['lxd', 'sudo'], - 'shell': '/bin/bash'}} - metadata = {'public-keys': ['key1']} - cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - self.assertCountEqual( - m_user.call_args_list, - [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, - shell='/bin/bash'), - mock.call('me2', default=False)]) - m_group.assert_not_called() - - def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group): - """Warn when ssh_redirect_user is True and no default user present.""" - cfg = { - 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} - # System config defines *no* default user for the distro. - sys_cfg = {} - metadata = {} # no public-keys defined - cloud = self.tmp_cloud( - distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) - cc_users_groups.handle('modulename', cfg, cloud, None, None) - m_user.assert_called_once_with('me2', default=False) - m_group.assert_not_called() - self.assertEqual( - 'WARNING: Ignoring ssh_redirect_user: True for me2. No' - ' default_user defined. Perhaps missing' - ' cloud configuration users: [default, ..].\n', - self.logs.getvalue()) diff --git a/cloudinit/distros/tests/__init__.py b/cloudinit/distros/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py deleted file mode 100644 index fd64a322..00000000 --- a/cloudinit/distros/tests/test_init.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (C) 2020 Canonical Ltd. -# -# Author: Daniel Watkins -# -# This file is part of cloud-init. See LICENSE file for license information. -"""Tests for cloudinit/distros/__init__.py""" - -from unittest import mock - -import pytest - -from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS - -# In newer versions of Python, these characters will be omitted instead -# of substituted because of security concerns. -# See https://bugs.python.org/issue43882 -SECURITY_URL_CHARS = '\n\r\t' - -# Define a set of characters we would expect to be replaced -INVALID_URL_CHARS = [ - chr(x) for x in range(127) - if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS -] -for separator in [":", ".", "/", "#", "?", "@", "[", "]"]: - # Remove from the set characters that either separate hostname parts (":", - # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be - # unable to parse URLs ("[", "]"). - INVALID_URL_CHARS.remove(separator) - - -class TestGetPackageMirrorInfo: - """ - Tests for cloudinit.distros._get_package_mirror_info. - - These supplement the tests in tests/unittests/test_distros/test_generic.py - which are more focused on testing a single production-like configuration. - These tests are more focused on specific aspects of the unit under test. - """ - - @pytest.mark.parametrize('mirror_info,expected', [ - # Empty info gives empty return - ({}, {}), - # failsafe values used if present - ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}}, - {'primary': 'http://value', 'security': 'http://other'}), - # search values used if present - ({'search': {'primary': ['http://value'], - 'security': ['http://other']}}, - {'primary': ['http://value'], 'security': ['http://other']}), - # failsafe values used if search value not present - ({'search': {'primary': ['http://value']}, - 'failsafe': {'security': 'http://other'}}, - {'primary': ['http://value'], 'security': 'http://other'}) - ]) - def test_get_package_mirror_info_failsafe(self, mirror_info, expected): - """ - Test the interaction between search and failsafe inputs - - (This doesn't test the case where the mirror_filter removes all search - options; test_failsafe_used_if_all_search_results_filtered_out covers - that.) - """ - assert expected == _get_package_mirror_info(mirror_info, - mirror_filter=lambda x: x) - - def test_failsafe_used_if_all_search_results_filtered_out(self): - """Test the failsafe option used if all search options eliminated.""" - mirror_info = { - 'search': {'primary': ['http://value']}, - 'failsafe': {'primary': 'http://other'} - } - assert {'primary': 'http://other'} == _get_package_mirror_info( - mirror_info, mirror_filter=lambda x: False) - - @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [ - (True, 'ec2') - ]) - @pytest.mark.parametrize('availability_zone,region,patterns,expected', ( - # Test ec2_region alone - ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'], - ['http://ec2-fk-fake-1/ubuntu']), - # Test availability_zone alone - ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'], - ['http://az-fk-fake-1f/ubuntu']), - # Test region alone - (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'], - ['http://rg-fk-fake-1/ubuntu']), - # Test that ec2_region is not available for non-matching AZs - ('fake-fake-1f', None, - ['http://EC2-%(ec2_region)s/ubuntu', - 'http://AZ-%(availability_zone)s/ubuntu'], - ['http://az-fake-fake-1f/ubuntu']), - # Test that template order maintained - (None, 'fake-region', - ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'], - ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']), - # Test that non-ASCII hostnames are IDNA encoded; - # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" - (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'], - ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']), - # Test that non-ASCII hostnames with a port are IDNA encoded; - # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" - (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'], - ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']), - # Test that non-ASCII non-hostname parts of URLs are unchanged - (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'], - ['http://www.example.com/ТεЅТ̣/ubuntu']), - # Test that IPv4 addresses are unchanged - (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'], - ['http://192.168.1.1:8080/fk-fake-1/ubuntu']), - # Test that IPv6 addresses are unchanged - (None, 'fk-fake-1', - ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'], - ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']), - # Test that unparseable URLs are filtered out of the mirror list - (None, 'inv[lid', - ['http://%(region)s.in.hostname/should/be/filtered', - 'http://but.not.in.the.path/%(region)s'], - ['http://but.not.in.the.path/inv[lid']), - (None, '-some-region-', - ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'], - ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']), - ) + tuple( - # Dynamically generate a test case for each non-LDH - # (Letters/Digits/Hyphen) ASCII character, testing that it is - # substituted with a hyphen - (None, 'fk{0}fake{0}1'.format(invalid_char), - ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu']) - for invalid_char in INVALID_URL_CHARS - )) - def test_valid_substitution(self, - allow_ec2_mirror, - platform_type, - availability_zone, - region, - patterns, - expected): - """Test substitution works as expected.""" - flag_path = "cloudinit.distros." \ - "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" - - m_data_source = mock.Mock( - availability_zone=availability_zone, - region=region, - platform_type=platform_type - ) - mirror_info = {'search': {'primary': patterns}} - - with mock.patch(flag_path, allow_ec2_mirror): - ret = _get_package_mirror_info( - mirror_info, - data_source=m_data_source, - mirror_filter=lambda x: x - ) - print(allow_ec2_mirror) - print(platform_type) - print(availability_zone) - print(region) - print(patterns) - print(expected) - assert {'primary': expected} == ret diff --git a/cloudinit/distros/tests/test_networking.py b/cloudinit/distros/tests/test_networking.py deleted file mode 100644 index ec508f4d..00000000 --- a/cloudinit/distros/tests/test_networking.py +++ /dev/null @@ -1,223 +0,0 @@ -from unittest import mock - -import pytest - -from cloudinit import net -from cloudinit.distros.networking import ( - BSDNetworking, - LinuxNetworking, - Networking, -) - -# See https://docs.pytest.org/en/stable/example -# /parametrize.html#parametrizing-conditional-raising -from contextlib import ExitStack as does_not_raise - - -@pytest.yield_fixture -def generic_networking_cls(): - """Returns a direct Networking subclass which errors on /sys usage. - - This enables the direct testing of functionality only present on the - ``Networking`` super-class, and provides a check on accidentally using /sys - in that context. - """ - - class TestNetworking(Networking): - def is_physical(self, *args, **kwargs): - raise NotImplementedError - - def settle(self, *args, **kwargs): - raise NotImplementedError - - def try_set_link_up(self, *args, **kwargs): - raise NotImplementedError - - error = AssertionError("Unexpectedly used /sys in generic networking code") - with mock.patch( - "cloudinit.net.get_sys_class_path", side_effect=error, - ): - yield TestNetworking - - -@pytest.yield_fixture -def sys_class_net(tmpdir): - sys_class_net_path = tmpdir.join("sys/class/net") - sys_class_net_path.ensure_dir() - with mock.patch( - "cloudinit.net.get_sys_class_path", - return_value=sys_class_net_path.strpath + "/", - ): - yield sys_class_net_path - - -class TestBSDNetworkingIsPhysical: - def test_raises_notimplementederror(self): - with pytest.raises(NotImplementedError): - BSDNetworking().is_physical("eth0") - - -class TestLinuxNetworkingIsPhysical: - def test_returns_false_by_default(self, sys_class_net): - assert not LinuxNetworking().is_physical("eth0") - - def test_returns_false_if_devname_exists_but_not_physical( - self, sys_class_net - ): - devname = "eth0" - sys_class_net.join(devname).mkdir() - assert not LinuxNetworking().is_physical(devname) - - def test_returns_true_if_device_is_physical(self, sys_class_net): - devname = "eth0" - device_dir = sys_class_net.join(devname) - device_dir.mkdir() - device_dir.join("device").write("") - - assert LinuxNetworking().is_physical(devname) - - -class TestBSDNetworkingTrySetLinkUp: - def test_raises_notimplementederror(self): - with pytest.raises(NotImplementedError): - BSDNetworking().try_set_link_up("eth0") - - -@mock.patch("cloudinit.net.is_up") -@mock.patch("cloudinit.distros.networking.subp.subp") -class TestLinuxNetworkingTrySetLinkUp: - def test_calls_subp_return_true(self, m_subp, m_is_up): - devname = "eth0" - m_is_up.return_value = True - is_success = LinuxNetworking().try_set_link_up(devname) - - assert (mock.call(['ip', 'link', 'set', devname, 'up']) == - m_subp.call_args_list[-1]) - assert is_success - - def test_calls_subp_return_false(self, m_subp, m_is_up): - devname = "eth0" - m_is_up.return_value = False - is_success = LinuxNetworking().try_set_link_up(devname) - - assert (mock.call(['ip', 'link', 'set', devname, 'up']) == - m_subp.call_args_list[-1]) - assert not is_success - - -class TestBSDNetworkingSettle: - def test_settle_doesnt_error(self): - # This also implicitly tests that it doesn't use subp.subp - BSDNetworking().settle() - - -@pytest.mark.usefixtures("sys_class_net") -@mock.patch("cloudinit.distros.networking.util.udevadm_settle", autospec=True) -class TestLinuxNetworkingSettle: - def test_no_arguments(self, m_udevadm_settle): - LinuxNetworking().settle() - - assert [mock.call(exists=None)] == m_udevadm_settle.call_args_list - - def test_exists_argument(self, m_udevadm_settle): - LinuxNetworking().settle(exists="ens3") - - expected_path = net.sys_dev_path("ens3") - assert [ - mock.call(exists=expected_path) - ] == m_udevadm_settle.call_args_list - - -class TestNetworkingWaitForPhysDevs: - @pytest.fixture - def wait_for_physdevs_netcfg(self): - """This config is shared across all the tests in this class.""" - - def ethernet(mac, name, driver=None, device_id=None): - v2_cfg = {"set-name": name, "match": {"macaddress": mac}} - if driver: - v2_cfg["match"].update({"driver": driver}) - if device_id: - v2_cfg["match"].update({"device_id": device_id}) - - return v2_cfg - - physdevs = [ - ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"], - ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"], - ] - netcfg = { - "version": 2, - "ethernets": {args[1]: ethernet(*args) for args in physdevs}, - } - return netcfg - - def test_skips_settle_if_all_present( - self, generic_networking_cls, wait_for_physdevs_netcfg, - ): - networking = generic_networking_cls() - with mock.patch.object( - networking, "get_interfaces_by_mac" - ) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.side_effect = iter( - [{"aa:bb:cc:dd:ee:ff": "eth0", "00:11:22:33:44:55": "ens3"}] - ) - with mock.patch.object( - networking, "settle", autospec=True - ) as m_settle: - networking.wait_for_physdevs(wait_for_physdevs_netcfg) - assert 0 == m_settle.call_count - - def test_calls_udev_settle_on_missing( - self, generic_networking_cls, wait_for_physdevs_netcfg, - ): - networking = generic_networking_cls() - with mock.patch.object( - networking, "get_interfaces_by_mac" - ) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.side_effect = iter( - [ - { - "aa:bb:cc:dd:ee:ff": "eth0" - }, # first call ens3 is missing - { - "aa:bb:cc:dd:ee:ff": "eth0", - "00:11:22:33:44:55": "ens3", - }, # second call has both - ] - ) - with mock.patch.object( - networking, "settle", autospec=True - ) as m_settle: - networking.wait_for_physdevs(wait_for_physdevs_netcfg) - m_settle.assert_called_with(exists="ens3") - - @pytest.mark.parametrize( - "strict,expectation", - [(True, pytest.raises(RuntimeError)), (False, does_not_raise())], - ) - def test_retrying_and_strict_behaviour( - self, - strict, - expectation, - generic_networking_cls, - wait_for_physdevs_netcfg, - ): - networking = generic_networking_cls() - with mock.patch.object( - networking, "get_interfaces_by_mac" - ) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {} - - with mock.patch.object( - networking, "settle", autospec=True - ) as m_settle: - with expectation: - networking.wait_for_physdevs( - wait_for_physdevs_netcfg, strict=strict - ) - - assert ( - 5 * len(wait_for_physdevs_netcfg["ethernets"]) - == m_settle.call_count - ) diff --git a/cloudinit/net/tests/__init__.py b/cloudinit/net/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py deleted file mode 100644 index 28b4ecf7..00000000 --- a/cloudinit/net/tests/test_dhcp.py +++ /dev/null @@ -1,647 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import httpretty -import os -import signal -from textwrap import dedent - -import cloudinit.net as net -from cloudinit.net.dhcp import ( - InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, - parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, - parse_static_routes) -from cloudinit.util import ensure_file, write_file -from cloudinit.tests.helpers import ( - CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) - - -class TestParseDHCPLeasesFile(CiTestCase): - - def test_parse_empty_lease_file_errors(self): - """parse_dhcp_lease_file errors when file content is empty.""" - empty_file = self.tmp_path('leases') - ensure_file(empty_file) - with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: - parse_dhcp_lease_file(empty_file) - error = context_manager.exception - self.assertIn('Cannot parse empty dhcp lease file', str(error)) - - def test_parse_malformed_lease_file_content_errors(self): - """parse_dhcp_lease_file errors when file content isn't dhcp leases.""" - non_lease_file = self.tmp_path('leases') - write_file(non_lease_file, 'hi mom.') - with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: - parse_dhcp_lease_file(non_lease_file) - error = context_manager.exception - self.assertIn('Cannot parse dhcp lease file', str(error)) - - def test_parse_multiple_leases(self): - """parse_dhcp_lease_file returns a list of all leases within.""" - lease_file = self.tmp_path('leases') - content = dedent(""" - lease { - interface "wlp3s0"; - fixed-address 192.168.2.74; - filename "http://192.168.2.50/boot.php?mac=${netX}"; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - renew 4 2017/07/27 18:02:30; - expire 5 2017/07/28 07:08:15; - } - lease { - interface "wlp3s0"; - fixed-address 192.168.2.74; - filename "http://192.168.2.50/boot.php?mac=${netX}"; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - } - """) - expected = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15', - 'filename': 'http://192.168.2.50/boot.php?mac=${netX}'}, - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'filename': 'http://192.168.2.50/boot.php?mac=${netX}', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}] - write_file(lease_file, content) - self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) - - -class TestDHCPRFC3442(CiTestCase): - - def test_parse_lease_finds_rfc3442_classless_static_routes(self): - """parse_dhcp_lease_file returns rfc3442-classless-static-routes.""" - lease_file = self.tmp_path('leases') - content = dedent(""" - lease { - interface "wlp3s0"; - fixed-address 192.168.2.74; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - option rfc3442-classless-static-routes 0,130,56,240,1; - renew 4 2017/07/27 18:02:30; - expire 5 2017/07/28 07:08:15; - } - """) - expected = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'rfc3442-classless-static-routes': '0,130,56,240,1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] - write_file(lease_file, content) - self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) - - def test_parse_lease_finds_classless_static_routes(self): - """ - parse_dhcp_lease_file returns classless-static-routes - for Centos lease format. - """ - lease_file = self.tmp_path('leases') - content = dedent(""" - lease { - interface "wlp3s0"; - fixed-address 192.168.2.74; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - option classless-static-routes 0 130.56.240.1; - renew 4 2017/07/27 18:02:30; - expire 5 2017/07/28 07:08:15; - } - """) - expected = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'classless-static-routes': '0 130.56.240.1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] - write_file(lease_file, content) - self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) - - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): - """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network""" - lease = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'rfc3442-classless-static-routes': '0,130,56,240,1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] - m_maybe.return_value = lease - eph = net.dhcp.EphemeralDHCPv4() - eph.obtain_lease() - expected_kwargs = { - 'interface': 'wlp3s0', - 'ip': '192.168.2.74', - 'prefix_or_mask': '255.255.255.0', - 'broadcast': '192.168.2.255', - 'static_routes': [('0.0.0.0/0', '130.56.240.1')], - 'router': '192.168.2.1'} - m_ipv4.assert_called_with(**expected_kwargs) - - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4): - """ - EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network - for Centos Lease format - """ - lease = [ - {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', - 'classless-static-routes': '0 130.56.240.1', - 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}] - m_maybe.return_value = lease - eph = net.dhcp.EphemeralDHCPv4() - eph.obtain_lease() - expected_kwargs = { - 'interface': 'wlp3s0', - 'ip': '192.168.2.74', - 'prefix_or_mask': '255.255.255.0', - 'broadcast': '192.168.2.255', - 'static_routes': [('0.0.0.0/0', '130.56.240.1')], - 'router': '192.168.2.1'} - m_ipv4.assert_called_with(**expected_kwargs) - - -class TestDHCPParseStaticRoutes(CiTestCase): - - with_logs = True - - def parse_static_routes_empty_string(self): - self.assertEqual([], parse_static_routes("")) - - def test_parse_static_routes_invalid_input_returns_empty_list(self): - rfc3442 = "32,169,254,169,254,130,56,248" - self.assertEqual([], parse_static_routes(rfc3442)) - - def test_parse_static_routes_bogus_width_returns_empty_list(self): - rfc3442 = "33,169,254,169,254,130,56,248" - self.assertEqual([], parse_static_routes(rfc3442)) - - def test_parse_static_routes_single_ip(self): - rfc3442 = "32,169,254,169,254,130,56,248,255" - self.assertEqual([('169.254.169.254/32', '130.56.248.255')], - parse_static_routes(rfc3442)) - - def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): - rfc3442 = "32,169,254,169,254,130,56,248,255;" - self.assertEqual([('169.254.169.254/32', '130.56.248.255')], - parse_static_routes(rfc3442)) - - def test_parse_static_routes_default_route(self): - rfc3442 = "0,130,56,240,1" - self.assertEqual([('0.0.0.0/0', '130.56.240.1')], - parse_static_routes(rfc3442)) - - def test_unspecified_gateway(self): - rfc3442 = "32,169,254,169,254,0,0,0,0" - self.assertEqual([('169.254.169.254/32', '0.0.0.0')], - parse_static_routes(rfc3442)) - - def test_parse_static_routes_class_c_b_a(self): - class_c = "24,192,168,74,192,168,0,4" - class_b = "16,172,16,172,16,0,4" - class_a = "8,10,10,0,0,4" - rfc3442 = ",".join([class_c, class_b, class_a]) - self.assertEqual(sorted([ - ("192.168.74.0/24", "192.168.0.4"), - ("172.16.0.0/16", "172.16.0.4"), - ("10.0.0.0/8", "10.0.0.4") - ]), sorted(parse_static_routes(rfc3442))) - - def test_parse_static_routes_logs_error_truncated(self): - bad_rfc3442 = { - "class_c": "24,169,254,169,10", - "class_b": "16,172,16,10", - "class_a": "8,10,10", - "gateway": "0,0", - "netlen": "33,0", - } - for rfc3442 in bad_rfc3442.values(): - self.assertEqual([], parse_static_routes(rfc3442)) - - logs = self.logs.getvalue() - self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines())) - - def test_parse_static_routes_returns_valid_routes_until_parse_err(self): - class_c = "24,192,168,74,192,168,0,4" - class_b = "16,172,16,172,16,0,4" - class_a_error = "8,10,10,0,0" - rfc3442 = ",".join([class_c, class_b, class_a_error]) - self.assertEqual(sorted([ - ("192.168.74.0/24", "192.168.0.4"), - ("172.16.0.0/16", "172.16.0.4"), - ]), sorted(parse_static_routes(rfc3442))) - - logs = self.logs.getvalue() - self.assertIn(rfc3442, logs.splitlines()[0]) - - def test_redhat_format(self): - redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1" - self.assertEqual(sorted([ - ("191.168.128.0/24", "192.168.128.1"), - ("0.0.0.0/0", "192.168.128.1") - ]), sorted(parse_static_routes(redhat_format))) - - def test_redhat_format_with_a_space_too_much_after_comma(self): - redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1" - self.assertEqual(sorted([ - ("191.168.128.0/24", "192.168.128.1"), - ("0.0.0.0/0", "192.168.128.1") - ]), sorted(parse_static_routes(redhat_format))) - - -class TestDHCPDiscoveryClean(CiTestCase): - with_logs = True - - @mock.patch('cloudinit.net.dhcp.find_fallback_nic') - def test_no_fallback_nic_found(self, m_fallback_nic): - """Log and do nothing when nic is absent and no fallback is found.""" - m_fallback_nic.return_value = None # No fallback nic found - self.assertEqual([], maybe_perform_dhcp_discovery()) - self.assertIn( - 'Skip dhcp_discovery: Unable to find fallback nic.', - self.logs.getvalue()) - - def test_provided_nic_does_not_exist(self): - """When the provided nic doesn't exist, log a message and no-op.""" - self.assertEqual([], maybe_perform_dhcp_discovery('idontexist')) - self.assertIn( - 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.', - self.logs.getvalue()) - - @mock.patch('cloudinit.net.dhcp.subp.which') - @mock.patch('cloudinit.net.dhcp.find_fallback_nic') - def test_absent_dhclient_command(self, m_fallback, m_which): - """When dhclient doesn't exist in the OS, log the issue and no-op.""" - m_fallback.return_value = 'eth9' - m_which.return_value = None # dhclient isn't found - self.assertEqual([], maybe_perform_dhcp_discovery()) - self.assertIn( - 'Skip dhclient configuration: No dhclient command found.', - self.logs.getvalue()) - - @mock.patch('cloudinit.temp_utils.os.getuid') - @mock.patch('cloudinit.net.dhcp.dhcp_discovery') - @mock.patch('cloudinit.net.dhcp.subp.which') - @mock.patch('cloudinit.net.dhcp.find_fallback_nic') - def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid): - """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery.""" - m_uid.return_value = 0 # Fake root user for tmpdir - m_fback.return_value = 'eth9' - m_which.return_value = '/sbin/dhclient' - m_dhcp.return_value = {'address': '192.168.2.2'} - retval = wrap_and_call( - 'cloudinit.temp_utils', - {'_TMPDIR': {'new': None}, - 'os.getuid': 0}, - maybe_perform_dhcp_discovery) - self.assertEqual({'address': '192.168.2.2'}, retval) - self.assertEqual( - 1, m_dhcp.call_count, 'dhcp_discovery not called once') - call = m_dhcp.call_args_list[0] - self.assertEqual('/sbin/dhclient', call[0][0]) - self.assertEqual('eth9', call[0][1]) - self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) - - @mock.patch('time.sleep', mock.MagicMock()) - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, - m_kill): - """dhcp_discovery logs a warning when pidfile contains invalid content. - - Lease processing still occurs and no proc kill is attempted. - """ - m_subp.return_value = ('', '') - tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' - write_file(dhclient_script, script_content, mode=0o755) - write_file(self.tmp_path('dhclient.pid', tmpdir), '') # Empty pid '' - lease_content = dedent(""" - lease { - interface "eth9"; - fixed-address 192.168.2.74; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - } - """) - write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content) - - self.assertCountEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) - self.assertIn( - "dhclient(pid=, parentpid=unknown) failed " - "to daemonize after 10.0 seconds", - self.logs.getvalue()) - m_kill.assert_not_called() - - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.util.wait_for_files') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, - m_subp, - m_wait, - m_kill, - m_getppid): - """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" - m_subp.return_value = ('', '') - tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' - write_file(dhclient_script, script_content, mode=0o755) - # Don't create pid or leases file - pidfile = self.tmp_path('dhclient.pid', tmpdir) - leasefile = self.tmp_path('dhcp.leases', tmpdir) - m_wait.return_value = [pidfile] # Return the missing pidfile wait for - m_getppid.return_value = 1 # Indicate that dhclient has daemonized - self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) - self.assertEqual( - mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), - m_wait.call_args_list[0]) - self.assertIn( - 'WARNING: dhclient did not produce expected files: dhclient.pid', - self.logs.getvalue()) - m_kill.assert_not_called() - - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): - """dhcp_discovery brings up the interface and runs dhclient. - - It also returns the parsed dhcp.leases file generated in the sandbox. - """ - m_subp.return_value = ('', '') - tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' - write_file(dhclient_script, script_content, mode=0o755) - lease_content = dedent(""" - lease { - interface "eth9"; - fixed-address 192.168.2.74; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - } - """) - lease_file = os.path.join(tmpdir, 'dhcp.leases') - write_file(lease_file, lease_content) - pid_file = os.path.join(tmpdir, 'dhclient.pid') - my_pid = 1 - write_file(pid_file, "%d\n" % my_pid) - m_getppid.return_value = 1 # Indicate that dhclient has daemonized - - self.assertCountEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) - # dhclient script got copied - with open(os.path.join(tmpdir, 'dhclient')) as stream: - self.assertEqual(script_content, stream.read()) - # Interface was brought up before dhclient called from sandbox - m_subp.assert_has_calls([ - mock.call( - ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), - mock.call( - [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf', - lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), - 'eth9', '-sf', '/bin/true'], capture=True)]) - m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) - - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid): - """dhcp_discovery brings up the interface and runs dhclient. - - It also returns the parsed dhcp.leases file generated in the sandbox. - """ - m_subp.return_value = ('', '') - tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' - write_file(dhclient_script, script_content, mode=0o755) - lease_content = dedent(""" - lease { - interface "eth9"; - fixed-address 192.168.2.74; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - } - """) - lease_file = os.path.join(tmpdir, 'dhcp.leases') - write_file(lease_file, lease_content) - pid_file = os.path.join(tmpdir, 'dhclient.pid') - my_pid = 1 - write_file(pid_file, "%d\n" % my_pid) - m_getppid.return_value = 1 # Indicate that dhclient has daemonized - - with mock.patch('os.access', return_value=False): - self.assertCountEqual( - [{'interface': 'eth9', 'fixed-address': '192.168.2.74', - 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], - dhcp_discovery(dhclient_script, 'eth9', tmpdir)) - # dhclient script got copied - with open(os.path.join(tmpdir, 'dhclient.orig')) as stream: - self.assertEqual(script_content, stream.read()) - # Interface was brought up before dhclient called from sandbox - m_subp.assert_has_calls([ - mock.call( - ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), - mock.call( - [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf', - lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), - 'eth9', '-sf', '/bin/true'], capture=True)]) - m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) - - @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') - @mock.patch('cloudinit.net.dhcp.os.kill') - @mock.patch('cloudinit.net.dhcp.subp.subp') - def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid): - """"dhcp_log_func is called with the output and error streams of - dhclinet when the callable is passed.""" - dhclient_err = 'FAKE DHCLIENT ERROR' - dhclient_out = 'FAKE DHCLIENT OUT' - m_subp.return_value = (dhclient_out, dhclient_err) - tmpdir = self.tmp_dir() - dhclient_script = os.path.join(tmpdir, 'dhclient.orig') - script_content = '#!/bin/bash\necho fake-dhclient' - write_file(dhclient_script, script_content, mode=0o755) - lease_content = dedent(""" - lease { - interface "eth9"; - fixed-address 192.168.2.74; - option subnet-mask 255.255.255.0; - option routers 192.168.2.1; - } - """) - lease_file = os.path.join(tmpdir, 'dhcp.leases') - write_file(lease_file, lease_content) - pid_file = os.path.join(tmpdir, 'dhclient.pid') - my_pid = 1 - write_file(pid_file, "%d\n" % my_pid) - m_getppid.return_value = 1 # Indicate that dhclient has daemonized - - def dhcp_log_func(out, err): - self.assertEqual(out, dhclient_out) - self.assertEqual(err, dhclient_err) - - dhcp_discovery( - dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func) - - -class TestSystemdParseLeases(CiTestCase): - - lxd_lease = dedent("""\ - # This is private data. Do not parse. - ADDRESS=10.75.205.242 - NETMASK=255.255.255.0 - ROUTER=10.75.205.1 - SERVER_ADDRESS=10.75.205.1 - NEXT_SERVER=10.75.205.1 - BROADCAST=10.75.205.255 - T1=1580 - T2=2930 - LIFETIME=3600 - DNS=10.75.205.1 - DOMAINNAME=lxd - HOSTNAME=a1 - CLIENTID=ffe617693400020000ab110c65a6a0866931c2 - """) - - lxd_parsed = { - 'ADDRESS': '10.75.205.242', - 'NETMASK': '255.255.255.0', - 'ROUTER': '10.75.205.1', - 'SERVER_ADDRESS': '10.75.205.1', - 'NEXT_SERVER': '10.75.205.1', - 'BROADCAST': '10.75.205.255', - 'T1': '1580', - 'T2': '2930', - 'LIFETIME': '3600', - 'DNS': '10.75.205.1', - 'DOMAINNAME': 'lxd', - 'HOSTNAME': 'a1', - 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2', - } - - azure_lease = dedent("""\ - # This is private data. Do not parse. - ADDRESS=10.132.0.5 - NETMASK=255.255.255.255 - ROUTER=10.132.0.1 - SERVER_ADDRESS=169.254.169.254 - NEXT_SERVER=10.132.0.1 - MTU=1460 - T1=43200 - T2=75600 - LIFETIME=86400 - DNS=169.254.169.254 - NTP=169.254.169.254 - DOMAINNAME=c.ubuntu-foundations.internal - DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal - HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal - ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 - CLIENTID=ff405663a200020000ab11332859494d7a8b4c - OPTION_245=624c3620 - """) - - azure_parsed = { - 'ADDRESS': '10.132.0.5', - 'NETMASK': '255.255.255.255', - 'ROUTER': '10.132.0.1', - 'SERVER_ADDRESS': '169.254.169.254', - 'NEXT_SERVER': '10.132.0.1', - 'MTU': '1460', - 'T1': '43200', - 'T2': '75600', - 'LIFETIME': '86400', - 'DNS': '169.254.169.254', - 'NTP': '169.254.169.254', - 'DOMAINNAME': 'c.ubuntu-foundations.internal', - 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal', - 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal', - 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1', - 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c', - 'OPTION_245': '624c3620'} - - def setUp(self): - super(TestSystemdParseLeases, self).setUp() - self.lease_d = self.tmp_dir() - - def test_no_leases_returns_empty_dict(self): - """A leases dir with no lease files should return empty dictionary.""" - self.assertEqual({}, networkd_load_leases(self.lease_d)) - - def test_no_leases_dir_returns_empty_dict(self): - """A non-existing leases dir should return empty dict.""" - enodir = os.path.join(self.lease_d, 'does-not-exist') - self.assertEqual({}, networkd_load_leases(enodir)) - - def test_single_leases_file(self): - """A leases dir with one leases file.""" - populate_dir(self.lease_d, {'2': self.lxd_lease}) - self.assertEqual( - {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d)) - - def test_single_azure_leases_file(self): - """On Azure, option 245 should be present, verify it specifically.""" - populate_dir(self.lease_d, {'1': self.azure_lease}) - self.assertEqual( - {'1': self.azure_parsed}, networkd_load_leases(self.lease_d)) - - def test_multiple_files(self): - """Multiple leases files on azure with one found return that value.""" - self.maxDiff = None - populate_dir(self.lease_d, {'1': self.azure_lease, - '9': self.lxd_lease}) - self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed}, - networkd_load_leases(self.lease_d)) - - -class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): - - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp): - """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" - url = 'http://example.org/index.html' - - httpretty.register_uri(httpretty.GET, url) - with net.dhcp.EphemeralDHCPv4( - connectivity_url_data={'url': url}, - ) as lease: - self.assertIsNone(lease) - # Ensure that no teardown happens: - m_dhcp.assert_not_called() - - @mock.patch('cloudinit.net.dhcp.subp.subp') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_ephemeral_dhcp_setup_network_if_url_connectivity( - self, m_dhcp, m_subp): - """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" - url = 'http://example.org/index.html' - fake_lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.2', - 'subnet-mask': '255.255.0.0'} - m_dhcp.return_value = [fake_lease] - m_subp.return_value = ('', '') - - httpretty.register_uri(httpretty.GET, url, body={}, status=404) - with net.dhcp.EphemeralDHCPv4( - connectivity_url_data={'url': url}, - ) as lease: - self.assertEqual(fake_lease, lease) - # Ensure that dhcp discovery occurs - m_dhcp.called_once_with() - -# vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py deleted file mode 100644 index f9102f7b..00000000 --- a/cloudinit/net/tests/test_init.py +++ /dev/null @@ -1,1402 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import errno -import ipaddress -import os -import textwrap -from unittest import mock - -import httpretty -import pytest -import requests - -import cloudinit.net as net -from cloudinit import safeyaml as yaml -from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase -from cloudinit.subp import ProcessExecutionError -from cloudinit.util import ensure_file, write_file - - -class TestSysDevPath(CiTestCase): - - def test_sys_dev_path(self): - """sys_dev_path returns a path under SYS_CLASS_NET for a device.""" - dev = 'something' - path = 'attribute' - expected = net.SYS_CLASS_NET + dev + '/' + path - self.assertEqual(expected, net.sys_dev_path(dev, path)) - - def test_sys_dev_path_without_path(self): - """When path param isn't provided it defaults to empty string.""" - dev = 'something' - expected = net.SYS_CLASS_NET + dev + '/' - self.assertEqual(expected, net.sys_dev_path(dev)) - - -class TestReadSysNet(CiTestCase): - with_logs = True - - def setUp(self): - super(TestReadSysNet, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - - def test_read_sys_net_strips_contents_of_sys_path(self): - """read_sys_net strips whitespace from the contents of a sys file.""" - content = 'some stuff with trailing whitespace\t\r\n' - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - self.assertEqual(content.strip(), net.read_sys_net('dev', 'attr')) - - def test_read_sys_net_reraises_oserror(self): - """read_sys_net raises OSError/IOError when file doesn't exist.""" - # Non-specific Exception because versions of python OSError vs IOError. - with self.assertRaises(Exception) as context_manager: # noqa: H202 - net.read_sys_net('dev', 'attr') - error = context_manager.exception - self.assertIn('No such file or directory', str(error)) - - def test_read_sys_net_handles_error_with_on_enoent(self): - """read_sys_net handles OSError/IOError with on_enoent if provided.""" - handled_errors = [] - - def on_enoent(e): - handled_errors.append(e) - - net.read_sys_net('dev', 'attr', on_enoent=on_enoent) - error = handled_errors[0] - self.assertIsInstance(error, Exception) - self.assertIn('No such file or directory', str(error)) - - def test_read_sys_net_translates_content(self): - """read_sys_net translates content when translate dict is provided.""" - content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - translate = {"you're welcome": 'de nada'} - self.assertEqual( - 'de nada', - net.read_sys_net('dev', 'attr', translate=translate)) - - def test_read_sys_net_errors_on_translation_failures(self): - """read_sys_net raises a KeyError and logs details on failure.""" - content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - with self.assertRaises(KeyError) as context_manager: - net.read_sys_net('dev', 'attr', translate={}) - error = context_manager.exception - self.assertEqual('"you\'re welcome"', str(error)) - self.assertIn( - "Found unexpected (not translatable) value 'you're welcome' in " - "'{0}dev/attr".format(self.sysdir), - self.logs.getvalue()) - - def test_read_sys_net_handles_handles_with_onkeyerror(self): - """read_sys_net handles translation errors calling on_keyerror.""" - content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - handled_errors = [] - - def on_keyerror(e): - handled_errors.append(e) - - net.read_sys_net('dev', 'attr', translate={}, on_keyerror=on_keyerror) - error = handled_errors[0] - self.assertIsInstance(error, KeyError) - self.assertEqual('"you\'re welcome"', str(error)) - - def test_read_sys_net_safe_false_on_translate_failure(self): - """read_sys_net_safe returns False on translation failures.""" - content = "you're welcome\n" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) - self.assertFalse(net.read_sys_net_safe('dev', 'attr', translate={})) - - def test_read_sys_net_safe_returns_false_on_noent_failure(self): - """read_sys_net_safe returns False on file not found failures.""" - self.assertFalse(net.read_sys_net_safe('dev', 'attr')) - - def test_read_sys_net_int_returns_none_on_error(self): - """read_sys_net_safe returns None on failures.""" - self.assertFalse(net.read_sys_net_int('dev', 'attr')) - - def test_read_sys_net_int_returns_none_on_valueerror(self): - """read_sys_net_safe returns None when content is not an int.""" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), 'NOTINT\n') - self.assertFalse(net.read_sys_net_int('dev', 'attr')) - - def test_read_sys_net_int_returns_integer_from_content(self): - """read_sys_net_safe returns None on failures.""" - write_file(os.path.join(self.sysdir, 'dev', 'attr'), '1\n') - self.assertEqual(1, net.read_sys_net_int('dev', 'attr')) - - def test_is_up_true(self): - """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'.""" - for state in ['up', 'unknown']: - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) - self.assertTrue(net.is_up('eth0')) - - def test_is_up_false(self): - """is_up is False if sys/net/devname/operstate is 'down' or invalid.""" - for state in ['down', 'incomprehensible']: - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) - self.assertFalse(net.is_up('eth0')) - - def test_is_bridge(self): - """is_bridge is True when /sys/net/devname/bridge exists.""" - self.assertFalse(net.is_bridge('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge')) - self.assertTrue(net.is_bridge('eth0')) - - def test_is_bond(self): - """is_bond is True when /sys/net/devname/bonding exists.""" - self.assertFalse(net.is_bond('eth0')) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) - self.assertTrue(net.is_bond('eth0')) - - def test_get_master(self): - """get_master returns the path when /sys/net/devname/master exists.""" - self.assertIsNone(net.get_master('enP1s1')) - master_path = os.path.join(self.sysdir, 'enP1s1', 'master') - ensure_file(master_path) - self.assertEqual(master_path, net.get_master('enP1s1')) - - def test_master_is_bridge_or_bond(self): - bridge_mac = 'aa:bb:cc:aa:bb:cc' - bond_mac = 'cc:bb:aa:cc:bb:aa' - - # No master => False - write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) - write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) - - self.assertFalse(net.master_is_bridge_or_bond('eth1')) - self.assertFalse(net.master_is_bridge_or_bond('eth2')) - - # masters without bridge/bonding => False - write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) - write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) - - os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) - os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) - - self.assertFalse(net.master_is_bridge_or_bond('eth1')) - self.assertFalse(net.master_is_bridge_or_bond('eth2')) - - # masters with bridge/bonding => True - write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') - write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') - - self.assertTrue(net.master_is_bridge_or_bond('eth1')) - self.assertTrue(net.master_is_bridge_or_bond('eth2')) - - def test_master_is_openvswitch(self): - ovs_mac = 'bb:cc:aa:bb:cc:aa' - - # No master => False - write_file(os.path.join(self.sysdir, 'eth1', 'address'), ovs_mac) - - self.assertFalse(net.master_is_bridge_or_bond('eth1')) - - # masters without ovs-system => False - write_file(os.path.join(self.sysdir, 'ovs-system', 'address'), ovs_mac) - - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1', - 'master')) - - self.assertFalse(net.master_is_openvswitch('eth1')) - - # masters with ovs-system => True - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1', - 'upper_ovs-system')) - - self.assertTrue(net.master_is_openvswitch('eth1')) - - def test_is_vlan(self): - """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan.""" - ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent')) - self.assertFalse(net.is_vlan('eth0')) - content = 'junk\nDEVTYPE=vlan\njunk\n' - write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content) - self.assertTrue(net.is_vlan('eth0')) - - -class TestGenerateFallbackConfig(CiTestCase): - - def setUp(self): - super(TestGenerateFallbackConfig, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - self.add_patch('cloudinit.net.util.is_container', 'm_is_container', - return_value=False) - self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') - self.add_patch('cloudinit.net.is_netfailover', 'm_netfail', - return_value=False) - self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master', - return_value=False) - - def test_generate_fallback_finds_connected_eth_with_mac(self): - """generate_fallback_config finds any connected device with a mac.""" - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) - expected = { - 'ethernets': {'eth1': {'match': {'macaddress': mac}, - 'dhcp4': True, 'set-name': 'eth1'}}, - 'version': 2} - self.assertEqual(expected, net.generate_fallback_config()) - - def test_generate_fallback_finds_dormant_eth_with_mac(self): - """generate_fallback_config finds any dormant device with a mac.""" - write_file(os.path.join(self.sysdir, 'eth0', 'dormant'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) - expected = { - 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, - 'set-name': 'eth0'}}, - 'version': 2} - self.assertEqual(expected, net.generate_fallback_config()) - - def test_generate_fallback_finds_eth_by_operstate(self): - """generate_fallback_config finds any dormant device with a mac.""" - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) - expected = { - 'ethernets': { - 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, - 'set-name': 'eth0'}}, - 'version': 2} - valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] - for state in valid_operstates: - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) - self.assertEqual(expected, net.generate_fallback_config()) - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'noworky') - self.assertIsNone(net.generate_fallback_config()) - - def test_generate_fallback_config_skips_veth(self): - """generate_fallback_config will skip any veth interfaces.""" - # A connected veth which gets ignored - write_file(os.path.join(self.sysdir, 'veth0', 'carrier'), '1') - self.assertIsNone(net.generate_fallback_config()) - - def test_generate_fallback_config_skips_bridges(self): - """generate_fallback_config will skip any bridges interfaces.""" - # A connected veth which gets ignored - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge')) - self.assertIsNone(net.generate_fallback_config()) - - def test_generate_fallback_config_skips_bonds(self): - """generate_fallback_config will skip any bonded interfaces.""" - # A connected veth which gets ignored - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) - ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) - self.assertIsNone(net.generate_fallback_config()) - - def test_generate_fallback_config_skips_netfail_devs(self): - """gen_fallback_config ignores netfail primary,sby no mac on master.""" - mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac - for iface in ['ens3', 'ens3sby', 'enP0s1f3']: - write_file(os.path.join(self.sysdir, iface, 'carrier'), '1') - write_file( - os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') - write_file( - os.path.join(self.sysdir, iface, 'address'), mac) - - def is_netfail(iface, _driver=None): - # ens3 is the master - if iface == 'ens3': - return False - return True - self.m_netfail.side_effect = is_netfail - - def is_netfail_master(iface, _driver=None): - # ens3 is the master - if iface == 'ens3': - return True - return False - self.m_netfail_master.side_effect = is_netfail_master - expected = { - 'ethernets': { - 'ens3': {'dhcp4': True, 'match': {'name': 'ens3'}, - 'set-name': 'ens3'}}, - 'version': 2} - result = net.generate_fallback_config() - self.assertEqual(expected, result) - - -class TestNetFindFallBackNic(CiTestCase): - - def setUp(self): - super(TestNetFindFallBackNic, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - self.add_patch('cloudinit.net.util.is_container', 'm_is_container', - return_value=False) - self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') - - def test_generate_fallback_finds_first_connected_eth_with_mac(self): - """find_fallback_nic finds any connected device with a mac.""" - write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') - write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) - self.assertEqual('eth1', net.find_fallback_nic()) - - -class TestGetDeviceList(CiTestCase): - - def setUp(self): - super(TestGetDeviceList, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - - def test_get_devicelist_raise_oserror(self): - """get_devicelist raise any non-ENOENT OSerror.""" - error = OSError('Can not do it') - error.errno = errno.EPERM # Set non-ENOENT - self.m_sys_path.side_effect = error - with self.assertRaises(OSError) as context_manager: - net.get_devicelist() - exception = context_manager.exception - self.assertEqual('Can not do it', str(exception)) - - def test_get_devicelist_empty_without_sys_net(self): - """get_devicelist returns empty list when missing SYS_CLASS_NET.""" - self.m_sys_path.return_value = 'idontexist' - self.assertEqual([], net.get_devicelist()) - - def test_get_devicelist_empty_with_no_devices_in_sys_net(self): - """get_devicelist returns empty directoty listing for SYS_CLASS_NET.""" - self.assertEqual([], net.get_devicelist()) - - def test_get_devicelist_lists_any_subdirectories_in_sys_net(self): - """get_devicelist returns a directory listing for SYS_CLASS_NET.""" - write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up') - write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up') - self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) - - -@mock.patch( - "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False), -) -class TestGetInterfaceMAC(CiTestCase): - - def setUp(self): - super(TestGetInterfaceMAC, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - - def test_get_interface_mac_false_with_no_mac(self): - """get_device_list returns False when no mac is reported.""" - ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) - mac_path = os.path.join(self.sysdir, 'eth0', 'address') - self.assertFalse(os.path.exists(mac_path)) - self.assertFalse(net.get_interface_mac('eth0')) - - def test_get_interface_mac(self): - """get_interfaces returns the mac from SYS_CLASS_NET/dev/address.""" - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) - self.assertEqual(mac, net.get_interface_mac('eth1')) - - def test_get_interface_mac_grabs_bonding_address(self): - """get_interfaces returns the source device mac for bonded devices.""" - source_dev_mac = 'aa:bb:cc:aa:bb:cc' - bonded_mac = 'dd:ee:ff:dd:ee:ff' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), bonded_mac) - write_file( - os.path.join(self.sysdir, 'eth1', 'bonding_slave', 'perm_hwaddr'), - source_dev_mac) - self.assertEqual(source_dev_mac, net.get_interface_mac('eth1')) - - def test_get_interfaces_empty_list_without_sys_net(self): - """get_interfaces returns an empty list when missing SYS_CLASS_NET.""" - self.m_sys_path.return_value = 'idontexist' - self.assertEqual([], net.get_interfaces()) - - def test_get_interfaces_by_mac_skips_empty_mac(self): - """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac.""" - empty_mac = '00:00:00:00:00:00' - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'address'), empty_mac) - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac) - expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] - self.assertEqual(expected, net.get_interfaces()) - - def test_get_interfaces_by_mac_skips_missing_mac(self): - """Ignore interfaces without an address from get_interfaces_by_mac.""" - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') - address_path = os.path.join(self.sysdir, 'eth1', 'address') - self.assertFalse(os.path.exists(address_path)) - mac = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac) - expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] - self.assertEqual(expected, net.get_interfaces()) - - def test_get_interfaces_by_mac_skips_master_devs(self): - """Ignore interfaces with a master device which would have dup mac.""" - mac1 = mac2 = 'aa:bb:cc:aa:bb:cc' - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac1) - write_file(os.path.join(self.sysdir, 'eth1', 'master'), "blah") - write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') - write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac2) - expected = [('eth2', mac2, None, None)] - self.assertEqual(expected, net.get_interfaces()) - - @mock.patch('cloudinit.net.is_netfailover') - def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail): - """Ignore interfaces if netfailover primary or standby.""" - mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac - for iface in ['ens3', 'ens3sby', 'enP0s1f3']: - write_file( - os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') - write_file( - os.path.join(self.sysdir, iface, 'address'), mac) - - def is_netfail(iface, _driver=None): - # ens3 is the master - if iface == 'ens3': - return False - else: - return True - m_netfail.side_effect = is_netfail - expected = [('ens3', mac, None, None)] - self.assertEqual(expected, net.get_interfaces()) - - def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds( - self - ): - bridge_mac = 'aa:bb:cc:aa:bb:cc' - bond_mac = 'cc:bb:aa:cc:bb:aa' - ovs_mac = 'bb:cc:aa:bb:cc:aa' - - write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) - write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') - - write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) - write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') - - write_file(os.path.join(self.sysdir, 'ovs-system', 'address'), - ovs_mac) - - write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) - os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) - - write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) - os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) - - write_file(os.path.join(self.sysdir, 'eth3', 'address'), ovs_mac) - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3', - 'master')) - os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3', - 'upper_ovs-system')) - - interface_names = [interface[0] for interface in net.get_interfaces()] - self.assertEqual(['eth1', 'eth2', 'eth3', 'ovs-system'], - sorted(interface_names)) - - -class TestInterfaceHasOwnMAC(CiTestCase): - - def setUp(self): - super(TestInterfaceHasOwnMAC, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - - def test_interface_has_own_mac_false_when_stolen(self): - """Return False from interface_has_own_mac when address is stolen.""" - write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '2') - self.assertFalse(net.interface_has_own_mac('eth1')) - - def test_interface_has_own_mac_true_when_not_stolen(self): - """Return False from interface_has_own_mac when mac isn't stolen.""" - valid_assign_types = ['0', '1', '3'] - assign_path = os.path.join(self.sysdir, 'eth1', 'addr_assign_type') - for _type in valid_assign_types: - write_file(assign_path, _type) - self.assertTrue(net.interface_has_own_mac('eth1')) - - def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self): - """When addr_assign_type is absent, interface_has_own_mac errors.""" - with self.assertRaises(ValueError): - net.interface_has_own_mac('eth1', strict=True) - - -@mock.patch('cloudinit.net.subp.subp') -class TestEphemeralIPV4Network(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestEphemeralIPV4Network, self).setUp() - sys_mock = mock.patch('cloudinit.net.get_sys_class_path') - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + '/' - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - - def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp): - """No required params for EphemeralIPv4Network can be None.""" - required_params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} - for key in required_params.keys(): - params = copy.deepcopy(required_params) - params[key] = None - with self.assertRaises(ValueError) as context_manager: - net.EphemeralIPv4Network(**params) - error = context_manager.exception - self.assertIn('Cannot init network on', str(error)) - self.assertEqual(0, m_subp.call_count) - - def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): - """Raise an error when prefix_or_mask is not a netmask or prefix.""" - params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'broadcast': '192.168.2.255'} - invalid_masks = ('invalid', 'invalid.', '123.123.123') - for error_val in invalid_masks: - params['prefix_or_mask'] = error_val - with self.assertRaises(ValueError) as context_manager: - with net.EphemeralIPv4Network(**params): - pass - error = context_manager.exception - self.assertIn('Cannot setup network: netmask', str(error)) - self.assertEqual(0, m_subp.call_count) - - def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): - """EphemeralIPv4Network performs teardown on the device if setup.""" - expected_setup_calls = [ - mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'}), - mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], - capture=True)] - expected_teardown_calls = [ - mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', - 'down'], capture=True), - mock.call( - ['ip', '-family', 'inet', 'addr', 'del', '192.168.2.2/24', - 'dev', 'eth0'], capture=True)] - params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} - with net.EphemeralIPv4Network(**params): - self.assertEqual(expected_setup_calls, m_subp.call_args_list) - m_subp.assert_has_calls(expected_teardown_calls) - - @mock.patch('cloudinit.net.readurl') - def test_ephemeral_ipv4_no_network_if_url_connectivity( - self, m_readurl, m_subp): - """No network setup is performed if we can successfully connect to - connectivity_url.""" - params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', - 'connectivity_url_data': {'url': 'http://example.org/index.html'} - } - - with net.EphemeralIPv4Network(**params): - self.assertEqual( - [mock.call(url='http://example.org/index.html', timeout=5)], - m_readurl.call_args_list - ) - # Ensure that no teardown happens: - m_subp.assert_has_calls([]) - - def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp): - """EphemeralIPv4Network handles exception when address is setup. - - It performs no cleanup as the interface was already setup. - """ - params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} - m_subp.side_effect = ProcessExecutionError( - '', 'RTNETLINK answers: File exists', 2) - expected_calls = [ - mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'})] - with net.EphemeralIPv4Network(**params): - pass - self.assertEqual(expected_calls, m_subp.call_args_list) - self.assertIn( - 'Skip ephemeral network setup, eth0 already has address', - self.logs.getvalue()) - - def test_ephemeral_ipv4_network_with_prefix(self, m_subp): - """EphemeralIPv4Network takes a valid prefix to setup the network.""" - params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '24', 'broadcast': '192.168.2.255'} - for prefix_val in ['24', 16]: # prefix can be int or string - params['prefix_or_mask'] = prefix_val - with net.EphemeralIPv4Network(**params): - pass - m_subp.assert_has_calls([mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'})]) - m_subp.assert_has_calls([mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/16', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'})]) - - def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): - """Add the route when router is set and no default route exists.""" - params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', - 'router': '192.168.2.1'} - m_subp.return_value = '', '' # Empty response from ip route gw check - expected_setup_calls = [ - mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'}), - mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], - capture=True), - mock.call( - ['ip', 'route', 'show', '0.0.0.0/0'], capture=True), - mock.call(['ip', '-4', 'route', 'add', '192.168.2.1', - 'dev', 'eth0', 'src', '192.168.2.2'], capture=True), - mock.call( - ['ip', '-4', 'route', 'add', 'default', 'via', - '192.168.2.1', 'dev', 'eth0'], capture=True)] - expected_teardown_calls = [ - mock.call(['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'], - capture=True), - mock.call(['ip', '-4', 'route', 'del', '192.168.2.1', - 'dev', 'eth0', 'src', '192.168.2.2'], capture=True), - ] - - with net.EphemeralIPv4Network(**params): - self.assertEqual(expected_setup_calls, m_subp.call_args_list) - m_subp.assert_has_calls(expected_teardown_calls) - - def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): - params = { - 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.255', 'broadcast': '192.168.2.255', - 'static_routes': [('192.168.2.1/32', '0.0.0.0'), - ('169.254.169.254/32', '192.168.2.1'), - ('0.0.0.0/0', '192.168.2.1')], - 'router': '192.168.2.1'} - expected_setup_calls = [ - mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/32', - 'broadcast', '192.168.2.255', 'dev', 'eth0'], - capture=True, update_env={'LANG': 'C'}), - mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], - capture=True), - mock.call( - ['ip', '-4', 'route', 'add', '192.168.2.1/32', - 'dev', 'eth0'], capture=True), - mock.call( - ['ip', '-4', 'route', 'add', '169.254.169.254/32', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), - mock.call( - ['ip', '-4', 'route', 'add', '0.0.0.0/0', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] - expected_teardown_calls = [ - mock.call( - ['ip', '-4', 'route', 'del', '0.0.0.0/0', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), - mock.call( - ['ip', '-4', 'route', 'del', '169.254.169.254/32', - 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), - mock.call( - ['ip', '-4', 'route', 'del', '192.168.2.1/32', - 'dev', 'eth0'], capture=True), - mock.call( - ['ip', '-family', 'inet', 'link', 'set', 'dev', - 'eth0', 'down'], capture=True), - mock.call( - ['ip', '-family', 'inet', 'addr', 'del', - '192.168.2.2/32', 'dev', 'eth0'], capture=True) - ] - with net.EphemeralIPv4Network(**params): - self.assertEqual(expected_setup_calls, m_subp.call_args_list) - m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls) - - -class TestApplyNetworkCfgNames(CiTestCase): - V1_CONFIG = textwrap.dedent("""\ - version: 1 - config: - - type: physical - name: interface0 - mac_address: "52:54:00:12:34:00" - subnets: - - type: static - address: 10.0.2.15 - netmask: 255.255.255.0 - gateway: 10.0.2.2 - """) - V2_CONFIG = textwrap.dedent("""\ - version: 2 - ethernets: - interface0: - match: - macaddress: "52:54:00:12:34:00" - addresses: - - 10.0.2.15/24 - gateway4: 10.0.2.2 - set-name: interface0 - """) - - V2_CONFIG_NO_SETNAME = textwrap.dedent("""\ - version: 2 - ethernets: - interface0: - match: - macaddress: "52:54:00:12:34:00" - addresses: - - 10.0.2.15/24 - gateway4: 10.0.2.2 - """) - - V2_CONFIG_NO_MAC = textwrap.dedent("""\ - version: 2 - ethernets: - interface0: - match: - driver: virtio-net - addresses: - - 10.0.2.15/24 - gateway4: 10.0.2.2 - set-name: interface0 - """) - - @mock.patch('cloudinit.net.device_devid') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net._rename_interfaces') - def test_apply_v1_renames(self, m_rename_interfaces, m_device_driver, - m_device_devid): - m_device_driver.return_value = 'virtio_net' - m_device_devid.return_value = '0x15d8' - - net.apply_network_config_names(yaml.load(self.V1_CONFIG)) - - call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] - m_rename_interfaces.assert_called_with([call]) - - @mock.patch('cloudinit.net.device_devid') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net._rename_interfaces') - def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver, - m_device_devid): - m_device_driver.return_value = 'virtio_net' - m_device_devid.return_value = '0x15d8' - - net.apply_network_config_names(yaml.load(self.V2_CONFIG)) - - call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] - m_rename_interfaces.assert_called_with([call]) - - @mock.patch('cloudinit.net._rename_interfaces') - def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces): - net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME)) - m_rename_interfaces.assert_called_with([]) - - @mock.patch('cloudinit.net._rename_interfaces') - def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces): - net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC)) - m_rename_interfaces.assert_called_with([]) - - def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): - with self.assertRaises(RuntimeError): - net.apply_network_config_names(yaml.load("version: 3")) - - -class TestHasURLConnectivity(HttprettyTestCase): - - def setUp(self): - super(TestHasURLConnectivity, self).setUp() - self.url = 'http://fake/' - self.kwargs = {'allow_redirects': True, 'timeout': 5.0} - - @mock.patch('cloudinit.net.readurl') - def test_url_timeout_on_connectivity_check(self, m_readurl): - """A timeout of 5 seconds is provided when reading a url.""" - self.assertTrue( - net.has_url_connectivity({'url': self.url}), - 'Expected True on url connect') - - def test_true_on_url_connectivity_success(self): - httpretty.register_uri(httpretty.GET, self.url) - self.assertTrue( - net.has_url_connectivity({'url': self.url}), - 'Expected True on url connect') - - @mock.patch('requests.Session.request') - def test_true_on_url_connectivity_timeout(self, m_request): - """A timeout raised accessing the url will return False.""" - m_request.side_effect = requests.Timeout('Fake Connection Timeout') - self.assertFalse( - net.has_url_connectivity({'url': self.url}), - 'Expected False on url timeout') - - def test_true_on_url_connectivity_failure(self): - httpretty.register_uri(httpretty.GET, self.url, body={}, status=404) - self.assertFalse( - net.has_url_connectivity({'url': self.url}), - 'Expected False on url fail') - - -def _mk_v1_phys(mac, name, driver, device_id): - v1_cfg = {'type': 'physical', 'name': name, 'mac_address': mac} - params = {} - if driver: - params.update({'driver': driver}) - if device_id: - params.update({'device_id': device_id}) - - if params: - v1_cfg.update({'params': params}) - - return v1_cfg - - -def _mk_v2_phys(mac, name, driver=None, device_id=None): - v2_cfg = {'set-name': name, 'match': {'macaddress': mac}} - if driver: - v2_cfg['match'].update({'driver': driver}) - if device_id: - v2_cfg['match'].update({'device_id': device_id}) - - return v2_cfg - - -class TestExtractPhysdevs(CiTestCase): - - def setUp(self): - super(TestExtractPhysdevs, self).setUp() - self.add_patch('cloudinit.net.device_driver', 'm_driver') - self.add_patch('cloudinit.net.device_devid', 'm_devid') - - def test_extract_physdevs_looks_up_driver_v1(self): - driver = 'virtio' - self.m_driver.return_value = driver - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], - ] - netcfg = { - 'version': 1, - 'config': [_mk_v1_phys(*args) for args in physdevs], - } - # insert the driver value for verification - physdevs[0][2] = driver - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_driver.assert_called_with('eth0') - - def test_extract_physdevs_looks_up_driver_v2(self): - driver = 'virtio' - self.m_driver.return_value = driver - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], - ] - netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, - } - # insert the driver value for verification - physdevs[0][2] = driver - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_driver.assert_called_with('eth0') - - def test_extract_physdevs_looks_up_devid_v1(self): - devid = '0x1000' - self.m_devid.return_value = devid - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], - ] - netcfg = { - 'version': 1, - 'config': [_mk_v1_phys(*args) for args in physdevs], - } - # insert the driver value for verification - physdevs[0][3] = devid - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_devid.assert_called_with('eth0') - - def test_extract_physdevs_looks_up_devid_v2(self): - devid = '0x1000' - self.m_devid.return_value = devid - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], - ] - netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, - } - # insert the driver value for verification - physdevs[0][3] = devid - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - self.m_devid.assert_called_with('eth0') - - def test_get_v1_type_physical(self): - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], - ] - netcfg = { - 'version': 1, - 'config': [_mk_v1_phys(*args) for args in physdevs], - } - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - - def test_get_v2_type_physical(self): - physdevs = [ - ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], - ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], - ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], - ] - netcfg = { - 'version': 2, - 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, - } - self.assertEqual(sorted(physdevs), - sorted(net.extract_physdevs(netcfg))) - - def test_get_v2_type_physical_skips_if_no_set_name(self): - netcfg = { - 'version': 2, - 'ethernets': { - 'ens3': { - 'match': {'macaddress': '00:11:22:33:44:55'}, - } - } - } - self.assertEqual([], net.extract_physdevs(netcfg)) - - def test_runtime_error_on_unknown_netcfg_version(self): - with self.assertRaises(RuntimeError): - net.extract_physdevs({'version': 3, 'awesome_config': []}) - - -class TestNetFailOver(CiTestCase): - - def setUp(self): - super(TestNetFailOver, self).setUp() - self.add_patch('cloudinit.net.util', 'm_util') - self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net') - self.add_patch('cloudinit.net.device_driver', 'm_device_driver') - - def test_get_dev_features(self): - devname = self.random_string() - features = self.random_string() - self.m_read_sys_net.return_value = features - - self.assertEqual(features, net.get_dev_features(devname)) - self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual(mock.call(devname, 'device/features'), - self.m_read_sys_net.call_args_list[0]) - - def test_get_dev_features_none_returns_empty_string(self): - devname = self.random_string() - self.m_read_sys_net.side_effect = Exception('error') - self.assertEqual('', net.get_dev_features(devname)) - self.assertEqual(1, self.m_read_sys_net.call_count) - self.assertEqual(mock.call(devname, 'device/features'), - self.m_read_sys_net.call_args_list[0]) - - @mock.patch('cloudinit.net.get_dev_features') - def test_has_netfail_standby_feature(self, m_dev_features): - devname = self.random_string() - standby_features = ('0' * 62) + '1' + '0' - m_dev_features.return_value = standby_features - self.assertTrue(net.has_netfail_standby_feature(devname)) - - @mock.patch('cloudinit.net.get_dev_features') - def test_has_netfail_standby_feature_short_is_false(self, m_dev_features): - devname = self.random_string() - standby_features = self.random_string() - m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) - - @mock.patch('cloudinit.net.get_dev_features') - def test_has_netfail_standby_feature_not_present_is_false(self, - m_dev_features): - devname = self.random_string() - standby_features = '0' * 64 - m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) - - @mock.patch('cloudinit.net.get_dev_features') - def test_has_netfail_standby_feature_no_features_is_false(self, - m_dev_features): - devname = self.random_string() - standby_features = None - m_dev_features.return_value = standby_features - self.assertFalse(net.has_netfail_standby_feature(devname)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_master(self, m_exists, m_standby): - devname = self.random_string() - driver = 'virtio_net' - m_exists.return_value = False # no master sysfs attr - m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_master(devname, driver)) - - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_master_checks_master_attr(self, m_sysdev): - devname = self.random_string() - driver = 'virtio_net' - m_sysdev.return_value = self.random_string() - self.assertFalse(net.is_netfail_master(devname, driver)) - self.assertEqual(1, m_sysdev.call_count) - self.assertEqual(mock.call(devname, path='master'), - m_sysdev.call_args_list[0]) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_master_wrong_driver(self, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() - self.assertFalse(net.is_netfail_master(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_master_has_master_attr(self, m_exists, m_standby): - devname = self.random_string() - driver = 'virtio_net' - m_exists.return_value = True # has master sysfs attr - self.assertFalse(net.is_netfail_master(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby): - devname = self.random_string() - driver = 'virtio_net' - m_exists.return_value = False # no master sysfs attr - m_standby.return_value = False # no standby feature flag - self.assertFalse(net.is_netfail_master(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() - m_sysdev.return_value = "%s/%s" % (self.random_string(), - master_devname) - m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = 'virtio_net' # master virtio_net - m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_primary(devname, driver)) - self.assertEqual(1, self.m_device_driver.call_count) - self.assertEqual(mock.call(master_devname), - self.m_device_driver.call_args_list[0]) - self.assertEqual(1, m_standby.call_count) - self.assertEqual(mock.call(master_devname), - m_standby.call_args_list[0]) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists, - m_standby): - devname = self.random_string() - driver = 'virtio_net' - self.assertFalse(net.is_netfail_primary(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - m_exists.return_value = False # no master sysfs attr - self.assertFalse(net.is_netfail_primary(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists, - m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() - m_sysdev.return_value = "%s/%s" % (self.random_string(), - master_devname) - m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = 'XXXX' # master not virtio_net - self.assertFalse(net.is_netfail_primary(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - @mock.patch('cloudinit.net.sys_dev_path') - def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists, - m_standby): - devname = self.random_string() - driver = self.random_string() # device not virtio_net - master_devname = self.random_string() - m_sysdev.return_value = "%s/%s" % (self.random_string(), - master_devname) - m_exists.return_value = True # has master sysfs attr - self.m_device_driver.return_value = 'virtio_net' # master virtio_net - m_standby.return_value = False # master has no standby feature flag - self.assertFalse(net.is_netfail_primary(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_standby(self, m_exists, m_standby): - devname = self.random_string() - driver = 'virtio_net' - m_exists.return_value = True # has master sysfs attr - m_standby.return_value = True # has standby feature flag - self.assertTrue(net.is_netfail_standby(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby): - devname = self.random_string() - driver = self.random_string() - self.assertFalse(net.is_netfail_standby(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_standby_no_master(self, m_exists, m_standby): - devname = self.random_string() - driver = 'virtio_net' - m_exists.return_value = False # has master sysfs attr - self.assertFalse(net.is_netfail_standby(devname, driver)) - - @mock.patch('cloudinit.net.has_netfail_standby_feature') - @mock.patch('cloudinit.net.os.path.exists') - def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby): - devname = self.random_string() - driver = 'virtio_net' - m_exists.return_value = True # has master sysfs attr - m_standby.return_value = False # has standby feature flag - self.assertFalse(net.is_netfail_standby(devname, driver)) - - @mock.patch('cloudinit.net.is_netfail_standby') - @mock.patch('cloudinit.net.is_netfail_primary') - def test_is_netfailover_primary(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() - m_primary.return_value = True - m_standby.return_value = False - self.assertTrue(net.is_netfailover(devname, driver)) - - @mock.patch('cloudinit.net.is_netfail_standby') - @mock.patch('cloudinit.net.is_netfail_primary') - def test_is_netfailover_standby(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() - m_primary.return_value = False - m_standby.return_value = True - self.assertTrue(net.is_netfailover(devname, driver)) - - @mock.patch('cloudinit.net.is_netfail_standby') - @mock.patch('cloudinit.net.is_netfail_primary') - def test_is_netfailover_returns_false(self, m_primary, m_standby): - devname = self.random_string() - driver = self.random_string() - m_primary.return_value = False - m_standby.return_value = False - self.assertFalse(net.is_netfailover(devname, driver)) - - -class TestOpenvswitchIsInstalled: - """Test cloudinit.net.openvswitch_is_installed. - - Uses the ``clear_lru_cache`` local autouse fixture to allow us to test - despite the ``lru_cache`` decorator on the unit under test. - """ - - @pytest.fixture(autouse=True) - def clear_lru_cache(self): - net.openvswitch_is_installed.cache_clear() - - @pytest.mark.parametrize( - "expected,which_return", [(True, "/some/path"), (False, None)] - ) - @mock.patch("cloudinit.net.subp.which") - def test_mirrors_which_result(self, m_which, expected, which_return): - m_which.return_value = which_return - assert expected == net.openvswitch_is_installed() - - @mock.patch("cloudinit.net.subp.which") - def test_only_calls_which_once(self, m_which): - net.openvswitch_is_installed() - net.openvswitch_is_installed() - assert 1 == m_which.call_count - - -@mock.patch("cloudinit.net.subp.subp", return_value=("", "")) -class TestGetOVSInternalInterfaces: - """Test cloudinit.net.get_ovs_internal_interfaces. - - Uses the ``clear_lru_cache`` local autouse fixture to allow us to test - despite the ``lru_cache`` decorator on the unit under test. - """ - @pytest.fixture(autouse=True) - def clear_lru_cache(self): - net.get_ovs_internal_interfaces.cache_clear() - - def test_command_used(self, m_subp): - """Test we use the correct command when we call subp""" - net.get_ovs_internal_interfaces() - - assert [ - mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD) - ] == m_subp.call_args_list - - def test_subp_contents_split_and_returned(self, m_subp): - """Test that the command output is appropriately mangled.""" - stdout = "iface1\niface2\niface3\n" - m_subp.return_value = (stdout, "") - - assert [ - "iface1", - "iface2", - "iface3", - ] == net.get_ovs_internal_interfaces() - - def test_database_connection_error_handled_gracefully(self, m_subp): - """Test that the error indicating OVS is down is handled gracefully.""" - m_subp.side_effect = ProcessExecutionError( - stderr="database connection failed" - ) - - assert [] == net.get_ovs_internal_interfaces() - - def test_other_errors_raised(self, m_subp): - """Test that only database connection errors are handled.""" - m_subp.side_effect = ProcessExecutionError() - - with pytest.raises(ProcessExecutionError): - net.get_ovs_internal_interfaces() - - def test_only_runs_once(self, m_subp): - """Test that we cache the value.""" - net.get_ovs_internal_interfaces() - net.get_ovs_internal_interfaces() - - assert 1 == m_subp.call_count - - -@mock.patch("cloudinit.net.get_ovs_internal_interfaces") -@mock.patch("cloudinit.net.openvswitch_is_installed") -class TestIsOpenVSwitchInternalInterface: - def test_false_if_ovs_not_installed( - self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces - ): - """Test that OVS' absence returns False.""" - m_openvswitch_is_installed.return_value = False - - assert not net.is_openvswitch_internal_interface("devname") - - @pytest.mark.parametrize( - "detected_interfaces,devname,expected_return", - [ - ([], "devname", False), - (["notdevname"], "devname", False), - (["devname"], "devname", True), - (["some", "other", "devices", "and", "ours"], "ours", True), - ], - ) - def test_return_value_based_on_detected_interfaces( - self, - m_openvswitch_is_installed, - m_get_ovs_internal_interfaces, - detected_interfaces, - devname, - expected_return, - ): - """Test that the detected interfaces are used correctly.""" - m_openvswitch_is_installed.return_value = True - m_get_ovs_internal_interfaces.return_value = detected_interfaces - assert expected_return == net.is_openvswitch_internal_interface( - devname - ) - - -class TestIsIpAddress: - """Tests for net.is_ip_address. - - Instead of testing with values we rely on the ipaddress stdlib module to - handle all values correctly, so simply test that is_ip_address defers to - the ipaddress module correctly. - """ - - @pytest.mark.parametrize('ip_address_side_effect,expected_return', ( - (ValueError, False), - (lambda _: ipaddress.IPv4Address('192.168.0.1'), True), - (lambda _: ipaddress.IPv6Address('2001:db8::'), True), - )) - def test_is_ip_address(self, ip_address_side_effect, expected_return): - with mock.patch('cloudinit.net.ipaddress.ip_address', - side_effect=ip_address_side_effect) as m_ip_address: - ret = net.is_ip_address(mock.sentinel.ip_address_in) - assert expected_return == ret - expected_call = mock.call(mock.sentinel.ip_address_in) - assert [expected_call] == m_ip_address.call_args_list - - -class TestIsIpv4Address: - """Tests for net.is_ipv4_address. - - Instead of testing with values we rely on the ipaddress stdlib module to - handle all values correctly, so simply test that is_ipv4_address defers to - the ipaddress module correctly. - """ - - @pytest.mark.parametrize('ipv4address_mock,expected_return', ( - (mock.Mock(side_effect=ValueError), False), - (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True), - )) - def test_is_ip_address(self, ipv4address_mock, expected_return): - with mock.patch('cloudinit.net.ipaddress.IPv4Address', - ipv4address_mock) as m_ipv4address: - ret = net.is_ipv4_address(mock.sentinel.ip_address_in) - assert expected_return == ret - expected_call = mock.call(mock.sentinel.ip_address_in) - assert [expected_call] == m_ipv4address.call_args_list - - -# vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py deleted file mode 100644 index 45e99171..00000000 --- a/cloudinit/net/tests/test_network_state.py +++ /dev/null @@ -1,164 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from unittest import mock - -import pytest - -from cloudinit import safeyaml -from cloudinit.net import network_state -from cloudinit.tests.helpers import CiTestCase - -netstate_path = 'cloudinit.net.network_state' - - -_V1_CONFIG_NAMESERVERS = """\ -network: - version: 1 - config: - - type: nameserver - interface: {iface} - address: - - 192.168.1.1 - - 8.8.8.8 - search: - - spam.local - - type: nameserver - address: - - 192.168.1.0 - - 4.4.4.4 - search: - - eggs.local - - type: physical - name: eth0 - mac_address: '00:11:22:33:44:55' - - type: physical - name: eth1 - mac_address: '66:77:88:99:00:11' -""" - -V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface='eth1') -V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface='eth90') - -V2_CONFIG_NAMESERVERS = """\ -network: - version: 2 - ethernets: - eth0: - match: - macaddress: '00:11:22:33:44:55' - nameservers: - search: [spam.local, eggs.local] - addresses: [8.8.8.8] - eth1: - match: - macaddress: '66:77:88:99:00:11' - set-name: "ens92" - nameservers: - search: [foo.local, bar.local] - addresses: [4.4.4.4] -""" - - -class TestNetworkStateParseConfig(CiTestCase): - - def setUp(self): - super(TestNetworkStateParseConfig, self).setUp() - nsi_path = netstate_path + '.NetworkStateInterpreter' - self.add_patch(nsi_path, 'm_nsi') - - def test_missing_version_returns_none(self): - ncfg = {} - with self.assertRaises(RuntimeError): - network_state.parse_net_config_data(ncfg) - - def test_unknown_versions_returns_none(self): - ncfg = {'version': 13.2} - with self.assertRaises(RuntimeError): - network_state.parse_net_config_data(ncfg) - - def test_version_2_passes_self_as_config(self): - ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} - network_state.parse_net_config_data(ncfg) - self.assertEqual([mock.call(version=2, config=ncfg)], - self.m_nsi.call_args_list) - - def test_valid_config_gets_network_state(self): - ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} - result = network_state.parse_net_config_data(ncfg) - self.assertNotEqual(None, result) - - def test_empty_v1_config_gets_network_state(self): - ncfg = {'version': 1, 'config': []} - result = network_state.parse_net_config_data(ncfg) - self.assertNotEqual(None, result) - - def test_empty_v2_config_gets_network_state(self): - ncfg = {'version': 2} - result = network_state.parse_net_config_data(ncfg) - self.assertNotEqual(None, result) - - -class TestNetworkStateParseConfigV2(CiTestCase): - - def test_version_2_ignores_renderer_key(self): - ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}} - nsi = network_state.NetworkStateInterpreter(version=ncfg['version'], - config=ncfg) - nsi.parse_config(skip_broken=False) - self.assertEqual(ncfg, nsi.as_dict()['config']) - - -class TestNetworkStateParseNameservers: - def _parse_network_state_from_config(self, config): - yaml = safeyaml.load(config) - return network_state.parse_net_config_data(yaml['network']) - - def test_v1_nameservers_valid(self): - config = self._parse_network_state_from_config( - V1_CONFIG_NAMESERVERS_VALID) - - # If an interface was specified, DNS shouldn't be in the global list - assert ['192.168.1.0', '4.4.4.4'] == sorted( - config.dns_nameservers) - assert ['eggs.local'] == config.dns_searchdomains - - # If an interface was specified, DNS should be part of the interface - for iface in config.iter_interfaces(): - if iface['name'] == 'eth1': - assert iface['dns']['addresses'] == ['192.168.1.1', '8.8.8.8'] - assert iface['dns']['search'] == ['spam.local'] - else: - assert 'dns' not in iface - - def test_v1_nameservers_invalid(self): - with pytest.raises(ValueError): - self._parse_network_state_from_config( - V1_CONFIG_NAMESERVERS_INVALID) - - def test_v2_nameservers(self): - config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS) - - # Ensure DNS defined on interface exists on interface - for iface in config.iter_interfaces(): - if iface['name'] == 'eth0': - assert iface['dns'] == { - 'nameservers': ['8.8.8.8'], - 'search': ['spam.local', 'eggs.local'], - } - else: - assert iface['dns'] == { - 'nameservers': ['4.4.4.4'], - 'search': ['foo.local', 'bar.local'] - } - - # Ensure DNS defined on interface also exists globally (since there - # is no global DNS definitions in v2) - assert ['4.4.4.4', '8.8.8.8'] == sorted(config.dns_nameservers) - assert [ - 'bar.local', - 'eggs.local', - 'foo.local', - 'spam.local', - ] == sorted(config.dns_searchdomains) - -# vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_networkd.py b/cloudinit/net/tests/test_networkd.py deleted file mode 100644 index 8dc90b48..00000000 --- a/cloudinit/net/tests/test_networkd.py +++ /dev/null @@ -1,64 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import safeyaml -from cloudinit.net import networkd, network_state - -V2_CONFIG_SET_NAME = """\ -network: - version: 2 - ethernets: - eth0: - match: - macaddress: '00:11:22:33:44:55' - nameservers: - search: [spam.local, eggs.local] - addresses: [8.8.8.8] - eth1: - match: - macaddress: '66:77:88:99:00:11' - set-name: "ens92" - nameservers: - search: [foo.local, bar.local] - addresses: [4.4.4.4] -""" - -V2_CONFIG_SET_NAME_RENDERED_ETH0 = """[Match] -MACAddress=00:11:22:33:44:55 -Name=eth0 - -[Network] -DHCP=no -DNS=8.8.8.8 -Domains=spam.local eggs.local - -""" - -V2_CONFIG_SET_NAME_RENDERED_ETH1 = """[Match] -MACAddress=66:77:88:99:00:11 -Name=ens92 - -[Network] -DHCP=no -DNS=4.4.4.4 -Domains=foo.local bar.local - -""" - - -class TestNetworkdRenderState: - def _parse_network_state_from_config(self, config): - yaml = safeyaml.load(config) - return network_state.parse_net_config_data(yaml["network"]) - - def test_networkd_render_with_set_name(self): - ns = self._parse_network_state_from_config(V2_CONFIG_SET_NAME) - renderer = networkd.Renderer() - rendered_content = renderer._render_content(ns) - - assert "eth0" in rendered_content - assert rendered_content["eth0"] == V2_CONFIG_SET_NAME_RENDERED_ETH0 - assert "ens92" in rendered_content - assert rendered_content["ens92"] == V2_CONFIG_SET_NAME_RENDERED_ETH1 - - -# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py deleted file mode 100644 index cafe3961..00000000 --- a/cloudinit/sources/helpers/tests/test_netlink.py +++ /dev/null @@ -1,480 +0,0 @@ -# Author: Tamilmani Manoharan -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase, mock -import socket -import struct -import codecs -from cloudinit.sources.helpers.netlink import ( - NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket, - read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect, - wait_for_nic_attach_event, wait_for_nic_detach_event, - OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT, - OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK, - RTM_SETLINK, RTM_GETLINK, MAX_SIZE) - - -def int_to_bytes(i): - '''convert integer to binary: eg: 1 to \x01''' - hex_value = '{0:x}'.format(i) - hex_value = '0' * (len(hex_value) % 2) + hex_value - return codecs.decode(hex_value, 'hex_codec') - - -class TestCreateBoundNetlinkSocket(CiTestCase): - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - def test_socket_error_on_create(self, m_socket): - '''create_bound_netlink_socket catches socket creation exception''' - - """NetlinkCreateSocketError is raised when socket creation errors.""" - m_socket.side_effect = socket.error("Fake socket failure") - with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: - create_bound_netlink_socket() - self.assertEqual( - 'Exception during netlink socket create: Fake socket failure', - str(ctx_mgr.exception)) - - -class TestReadNetlinkSocket(CiTestCase): - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - @mock.patch('cloudinit.sources.helpers.netlink.select.select') - def test_read_netlink_socket(self, m_select, m_socket): - '''read_netlink_socket able to receive data''' - data = 'netlinktest' - m_select.return_value = [m_socket], None, None - m_socket.recv.return_value = data - recv_data = read_netlink_socket(m_socket, 2) - m_select.assert_called_with([m_socket], [], [], 2) - m_socket.recv.assert_called_with(MAX_SIZE) - self.assertIsNotNone(recv_data) - self.assertEqual(recv_data, data) - - @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') - @mock.patch('cloudinit.sources.helpers.netlink.select.select') - def test_netlink_read_timeout(self, m_select, m_socket): - '''read_netlink_socket should timeout if nothing to read''' - m_select.return_value = [], None, None - data = read_netlink_socket(m_socket, 1) - m_select.assert_called_with([m_socket], [], [], 1) - self.assertEqual(m_socket.recv.call_count, 0) - self.assertIsNone(data) - - def test_read_invalid_socket(self): - '''read_netlink_socket raises assert error if socket is invalid''' - socket = None - with self.assertRaises(AssertionError) as context: - read_netlink_socket(socket, 1) - self.assertTrue('netlink socket is none' in str(context.exception)) - - -class TestParseNetlinkMessage(CiTestCase): - - def test_read_rta_oper_state(self): - '''read_rta_oper_state could parse netlink message and extract data''' - ifname = "eth0" - bytes = ifname.encode("utf-8") - buf = bytearray(48) - struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5, - 16, int_to_bytes(OPER_DOWN)) - interface_state = read_rta_oper_state(buf) - self.assertEqual(interface_state.ifname, ifname) - self.assertEqual(interface_state.operstate, OPER_DOWN) - - def test_read_none_data(self): - '''read_rta_oper_state raises assert error if data is none''' - data = None - with self.assertRaises(AssertionError) as context: - read_rta_oper_state(data) - self.assertEqual('data is none', str(context.exception)) - - def test_read_invalid_rta_operstate_none(self): - '''read_rta_oper_state returns none if operstate is none''' - ifname = "eth0" - buf = bytearray(40) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes) - interface_state = read_rta_oper_state(buf) - self.assertIsNone(interface_state) - - def test_read_invalid_rta_ifname_none(self): - '''read_rta_oper_state returns none if ifname is none''' - buf = bytearray(40) - struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(OPER_DOWN)) - interface_state = read_rta_oper_state(buf) - self.assertIsNone(interface_state) - - def test_read_invalid_data_len(self): - '''raise assert error if data size is smaller than required size''' - buf = bytearray(32) - with self.assertRaises(AssertionError) as context: - read_rta_oper_state(buf) - self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in - str(context.exception)) - - def test_unpack_rta_attr_none_data(self): - '''unpack_rta_attr raises assert error if data is none''' - data = None - with self.assertRaises(AssertionError) as context: - unpack_rta_attr(data, RTATTR_START_OFFSET) - self.assertTrue('data is none' in str(context.exception)) - - def test_unpack_rta_attr_invalid_offset(self): - '''unpack_rta_attr raises assert error if offset is invalid''' - data = bytearray(48) - with self.assertRaises(AssertionError) as context: - unpack_rta_attr(data, "offset") - self.assertTrue('offset is not integer' in str(context.exception)) - with self.assertRaises(AssertionError) as context: - unpack_rta_attr(data, 31) - self.assertTrue('rta offset is less than expected length' in - str(context.exception)) - - -@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') -@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') -class TestNicAttachDetach(CiTestCase): - with_logs = True - - def _media_switch_data(self, ifname, msg_type, operstate): - '''construct netlink data with specified fields''' - if ifname and operstate is not None: - data = bytearray(48) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(operstate)) - elif ifname: - data = bytearray(40) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) - elif operstate: - data = bytearray(40) - struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(operstate)) - struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) - return data - - def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket): - '''Test for a new nic attached''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_op_down] - ifread = wait_for_nic_attach_event(m_socket, []) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual(ifname, ifread) - - def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket): - '''Test for a new nic attached''' - ifname = "eth0" - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_op_up] - ifread = wait_for_nic_attach_event(m_socket, []) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual(ifname, ifread) - - def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket): - '''Test that we read only the interfaces we are interested in.''' - data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) - data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_eth0, data_eth1] - ifread = wait_for_nic_attach_event(m_socket, ["eth0"]) - self.assertEqual(m_read_netlink_socket.call_count, 2) - self.assertEqual("eth1", ifread) - - def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket): - '''Test that we read only the interfaces we are interested in.''' - data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) - data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_eth0, data_eth1] - ifread = wait_for_nic_attach_event(m_socket, ["eth1"]) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual("eth0", ifread) - - def test_nic_detached(self, m_read_netlink_socket, m_socket): - '''Test for an existing nic detached''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN) - m_read_netlink_socket.side_effect = [data_op_down] - ifread = wait_for_nic_detach_event(m_socket) - self.assertEqual(m_read_netlink_socket.call_count, 1) - self.assertEqual(ifname, ifread) - - -@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') -@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') -class TestWaitForMediaDisconnectConnect(CiTestCase): - with_logs = True - - def _media_switch_data(self, ifname, msg_type, operstate): - '''construct netlink data with specified fields''' - if ifname and operstate is not None: - data = bytearray(48) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(operstate)) - elif ifname: - data = bytearray(40) - bytes = ifname.encode("utf-8") - struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) - elif operstate: - data = bytearray(40) - struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, - int_to_bytes(operstate)) - struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) - return data - - def test_media_down_up_scenario(self, m_read_netlink_socket, - m_socket): - '''Test for media down up sequence for required interface name''' - ifname = "eth0" - # construct data for Oper State down - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - # construct data for Oper State up - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [data_op_down, data_op_up] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 2) - - def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket, - m_socket): - '''wait_for_media_disconnect_connect ignores unexpected interfaces. - - The first two messages are for other interfaces and last two are for - expected interface. So the function exit only after receiving last - 2 messages and therefore the call count for m_read_netlink_socket - has to be 4 - ''' - other_ifname = "eth1" - expected_ifname = "eth0" - data_op_down_eth1 = self._media_switch_data( - other_ifname, RTM_NEWLINK, OPER_DOWN - ) - data_op_up_eth1 = self._media_switch_data( - other_ifname, RTM_NEWLINK, OPER_UP - ) - data_op_down_eth0 = self._media_switch_data( - expected_ifname, RTM_NEWLINK, OPER_DOWN - ) - data_op_up_eth0 = self._media_switch_data( - expected_ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [ - data_op_down_eth1, - data_op_up_eth1, - data_op_down_eth0, - data_op_up_eth0 - ] - wait_for_media_disconnect_connect(m_socket, expected_ifname) - self.assertIn('Ignored netlink event on interface %s' % other_ifname, - self.logs.getvalue()) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect ignores GETLINK events. - - The first two messages are for oper down and up for RTM_GETLINK type - which netlink module will ignore. The last 2 messages are RTM_NEWLINK - with oper state down and up messages. Therefore the call count for - m_read_netlink_socket has to be 4 ignoring first 2 messages - of RTM_GETLINK - ''' - ifname = "eth0" - data_getlink_down = self._media_switch_data( - ifname, RTM_GETLINK, OPER_DOWN - ) - data_getlink_up = self._media_switch_data( - ifname, RTM_GETLINK, OPER_UP - ) - data_newlink_down = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DOWN - ) - data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP - ) - m_read_netlink_socket.side_effect = [ - data_getlink_down, - data_getlink_up, - data_newlink_down, - data_newlink_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect ignores SETLINK events. - - The first two messages are for oper down and up for RTM_GETLINK type - which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down - and up messages. This function should exit after 4th messages since it - sees down->up scenario. So the call count for m_read_netlink_socket - has to be 4 ignoring first 2 messages of RTM_GETLINK and - last 2 messages of RTM_NEWLINK - ''' - ifname = "eth0" - data_setlink_down = self._media_switch_data( - ifname, RTM_SETLINK, OPER_DOWN - ) - data_setlink_up = self._media_switch_data( - ifname, RTM_SETLINK, OPER_UP - ) - data_newlink_down = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DOWN - ) - data_newlink_up = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UP - ) - m_read_netlink_socket.side_effect = [ - data_setlink_down, - data_setlink_up, - data_newlink_down, - data_newlink_up, - data_newlink_down, - data_newlink_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket, - m_socket): - '''returns only if it receives UP event after a DOWN event''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_dormant = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DORMANT - ) - data_op_notpresent = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_NOTPRESENT - ) - data_op_lowerdown = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN - ) - data_op_testing = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_TESTING - ) - data_op_unknown = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UNKNOWN - ) - m_read_netlink_socket.side_effect = [ - data_op_up, data_op_up, - data_op_dormant, data_op_up, - data_op_notpresent, data_op_up, - data_op_lowerdown, data_op_up, - data_op_testing, data_op_up, - data_op_unknown, data_op_up, - data_op_down, data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 14) - - def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket, - m_socket): - '''wait_for_media_disconnect_connect handles in between transitions''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_dormant = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_DORMANT) - data_op_unknown = self._media_switch_data( - ifname, RTM_NEWLINK, OPER_UNKNOWN) - m_read_netlink_socket.side_effect = [ - data_op_down, data_op_dormant, - data_op_unknown, data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect should handle invalid operstates. - - The function should not fail and return even if it receives invalid - operstates. It always should wait for down up sequence. - ''' - ifname = "eth0" - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) - m_read_netlink_socket.side_effect = [ - data_op_invalid, data_op_up, - data_op_down, data_op_invalid, - data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 5) - - def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect handle none netlink socket.''' - socket = None - ifname = "eth0" - with self.assertRaises(AssertionError) as context: - wait_for_media_disconnect_connect(socket, ifname) - self.assertTrue('netlink socket is none' in str(context.exception)) - - def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket): - '''wait_for_media_disconnect_connect handle none interface name''' - ifname = None - with self.assertRaises(AssertionError) as context: - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertTrue('interface name is none' in str(context.exception)) - ifname = "" - with self.assertRaises(AssertionError) as context: - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertTrue('interface name cannot be empty' in - str(context.exception)) - - def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket): - ''' wait_for_media_disconnect_connect handles invalid rta data''' - ifname = "eth0" - data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN) - data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) - data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) - data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) - m_read_netlink_socket.side_effect = [ - data_invalid1, data_invalid2, data_op_down, data_op_up - ] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 4) - - def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket): - '''Read multiple messages in single receive call''' - ifname = "eth0" - bytes = ifname.encode("utf-8") - data = bytearray(96) - struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN) - ) - struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, - 3, bytes, 5, 16, int_to_bytes(OPER_UP) - ) - m_read_netlink_socket.return_value = data - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 1) - - def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket): - '''Read partial messages in receive call''' - ifname = "eth0" - bytes = ifname.encode("utf-8") - data1 = bytearray(112) - data2 = bytearray(32) - struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, - bytes, 5, 16, int_to_bytes(OPER_DOWN) - ) - struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN) - ) - struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0) - struct.pack_into( - "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP) - ) - m_read_netlink_socket.side_effect = [data1, data2] - wait_for_media_disconnect_connect(m_socket, ifname) - self.assertEqual(m_read_netlink_socket.call_count, 2) diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py deleted file mode 100644 index 95fb9743..00000000 --- a/cloudinit/sources/helpers/tests/test_openstack.py +++ /dev/null @@ -1,49 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -# ./cloudinit/sources/helpers/tests/test_openstack.py -from unittest import mock - -from cloudinit.sources.helpers import openstack -from cloudinit.tests import helpers as test_helpers - - -@mock.patch( - "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) -) -class TestConvertNetJson(test_helpers.CiTestCase): - - def test_phy_types(self): - """Verify the different known physical types are handled.""" - # network_data.json example from - # https://docs.openstack.org/nova/latest/user/metadata.html - mac0 = "fa:16:3e:9c:bf:3d" - net_json = { - "links": [ - {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a", - "mtu": None, "type": "bridge", - "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"} - ], - "networks": [ - {"id": "network0", "link": "tapcd9f6d46-4a", - "network_id": "99e88329-f20d-4741-9593-25bf07847b16", - "type": "ipv4_dhcp"} - ], - "services": [{"address": "8.8.8.8", "type": "dns"}] - } - macs = {mac0: 'eth0'} - - expected = { - 'version': 1, - 'config': [ - {'mac_address': 'fa:16:3e:9c:bf:3d', - 'mtu': None, 'name': 'eth0', - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical'}, - {'address': '8.8.8.8', 'type': 'nameserver'}]} - - for t in openstack.KNOWN_PHYSICAL_TYPES: - net_json["links"][0]["type"] = t - self.assertEqual( - expected, - openstack.convert_net_json(network_json=net_json, - known_macs=macs)) diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py deleted file mode 100644 index ae09cb17..00000000 --- a/cloudinit/sources/tests/test_init.py +++ /dev/null @@ -1,771 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import inspect -import os -import stat - -from cloudinit.event import EventScope, EventType -from cloudinit.helpers import Paths -from cloudinit import importer -from cloudinit.sources import ( - EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, - METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, - canonical_cloud_id, redact_sensitive_keys) -from cloudinit.tests.helpers import CiTestCase, mock -from cloudinit.user_data import UserDataProcessor -from cloudinit import util - - -class DataSourceTestSubclassNet(DataSource): - - dsname = 'MyTestSubclass' - url_max_wait = 55 - - def __init__(self, sys_cfg, distro, paths, custom_metadata=None, - custom_userdata=None, get_data_retval=True): - super(DataSourceTestSubclassNet, self).__init__( - sys_cfg, distro, paths) - self._custom_userdata = custom_userdata - self._custom_metadata = custom_metadata - self._get_data_retval = get_data_retval - - def _get_cloud_name(self): - return 'SubclassCloudName' - - def _get_data(self): - if self._custom_metadata: - self.metadata = self._custom_metadata - else: - self.metadata = {'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion'} - if self._custom_userdata: - self.userdata_raw = self._custom_userdata - else: - self.userdata_raw = 'userdata_raw' - self.vendordata_raw = 'vendordata_raw' - return self._get_data_retval - - -class InvalidDataSourceTestSubclassNet(DataSource): - pass - - -class TestDataSource(CiTestCase): - - with_logs = True - maxDiff = None - - def setUp(self): - super(TestDataSource, self).setUp() - self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} - self.distro = 'distrotest' # generally should be a Distro object - self.paths = Paths({}) - self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) - - def test_datasource_init(self): - """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" - self.assertEqual(self.paths, self.datasource.paths) - self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) - self.assertEqual(self.distro, self.datasource.distro) - self.assertIsNone(self.datasource.userdata) - self.assertEqual({}, self.datasource.metadata) - self.assertIsNone(self.datasource.userdata_raw) - self.assertIsNone(self.datasource.vendordata) - self.assertIsNone(self.datasource.vendordata_raw) - self.assertEqual({'key1': False}, self.datasource.ds_cfg) - self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) - - def test_datasource_init_gets_ds_cfg_using_dsname(self): - """Init uses DataSource.dsname for sourcing ds_cfg.""" - sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} - distro = 'distrotest' # generally should be a Distro object - datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) - self.assertEqual({'key2': False}, datasource.ds_cfg) - - def test_str_is_classname(self): - """The string representation of the datasource is the classname.""" - self.assertEqual('DataSource', str(self.datasource)) - self.assertEqual( - 'DataSourceTestSubclassNet', - str(DataSourceTestSubclassNet('', '', self.paths))) - - def test_datasource_get_url_params_defaults(self): - """get_url_params default url config settings for the datasource.""" - params = self.datasource.get_url_params() - self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) - self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) - self.assertEqual(params.num_retries, self.datasource.url_retries) - self.assertEqual(params.sec_between_retries, - self.datasource.url_sec_between_retries) - - def test_datasource_get_url_params_subclassed(self): - """Subclasses can override get_url_params defaults.""" - sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} - distro = 'distrotest' # generally should be a Distro object - datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) - expected = (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries, datasource.url_sec_between_retries) - url_params = datasource.get_url_params() - self.assertNotEqual(self.datasource.get_url_params(), url_params) - self.assertEqual(expected, url_params) - - def test_datasource_get_url_params_ds_config_override(self): - """Datasource configuration options can override url param defaults.""" - sys_cfg = { - 'datasource': { - 'MyTestSubclass': { - 'max_wait': '1', 'timeout': '2', - 'retries': '3', 'sec_between_retries': 4 - }}} - datasource = DataSourceTestSubclassNet( - sys_cfg, self.distro, self.paths) - expected = (1, 2, 3, 4) - url_params = datasource.get_url_params() - self.assertNotEqual( - (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries, datasource.url_sec_between_retries), - url_params) - self.assertEqual(expected, url_params) - - def test_datasource_get_url_params_is_zero_or_greater(self): - """get_url_params ignores timeouts with a value below 0.""" - # Set an override that is below 0 which gets ignored. - sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} - datasource = DataSource(sys_cfg, self.distro, self.paths) - (_max_wait, timeout, _retries, - _sec_between_retries) = datasource.get_url_params() - self.assertEqual(0, timeout) - - def test_datasource_get_url_uses_defaults_on_errors(self): - """On invalid system config values for url_params defaults are used.""" - # All invalid values should be logged - sys_cfg = {'datasource': { - '_undef': { - 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} - datasource = DataSource(sys_cfg, self.distro, self.paths) - url_params = datasource.get_url_params() - expected = (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries, datasource.url_sec_between_retries) - self.assertEqual(expected, url_params) - logs = self.logs.getvalue() - expected_logs = [ - "Config max_wait 'nope' is not an int, using default '-1'", - "Config timeout 'bug' is not an int, using default '10'", - "Config retries 'nonint' is not an int, using default '5'", - ] - for log in expected_logs: - self.assertIn(log, logs) - - @mock.patch('cloudinit.sources.net.find_fallback_nic') - def test_fallback_interface_is_discovered(self, m_get_fallback_nic): - """The fallback_interface is discovered via find_fallback_nic.""" - m_get_fallback_nic.return_value = 'nic9' - self.assertEqual('nic9', self.datasource.fallback_interface) - - @mock.patch('cloudinit.sources.net.find_fallback_nic') - def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): - """Log a warning when fallback_interface can not discover the nic.""" - self.datasource._cloud_name = 'MySupahCloud' - m_get_fallback_nic.return_value = None # Couldn't discover nic - self.assertIsNone(self.datasource.fallback_interface) - self.assertEqual( - 'WARNING: Did not find a fallback interface on MySupahCloud.\n', - self.logs.getvalue()) - - @mock.patch('cloudinit.sources.net.find_fallback_nic') - def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): - """The fallback_interface is cached and won't be rediscovered.""" - self.datasource._fallback_interface = 'nic10' - self.assertEqual('nic10', self.datasource.fallback_interface) - m_get_fallback_nic.assert_not_called() - - def test__get_data_unimplemented(self): - """Raise an error when _get_data is not implemented.""" - with self.assertRaises(NotImplementedError) as context_manager: - self.datasource.get_data() - self.assertIn( - 'Subclasses of DataSource must implement _get_data', - str(context_manager.exception)) - datasource2 = InvalidDataSourceTestSubclassNet( - self.sys_cfg, self.distro, self.paths) - with self.assertRaises(NotImplementedError) as context_manager: - datasource2.get_data() - self.assertIn( - 'Subclasses of DataSource must implement _get_data', - str(context_manager.exception)) - - def test_get_data_calls_subclass__get_data(self): - """Datasource.get_data uses the subclass' version of _get_data.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - self.assertTrue(datasource.get_data()) - self.assertEqual( - {'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion'}, - datasource.metadata) - self.assertEqual('userdata_raw', datasource.userdata_raw) - self.assertEqual('vendordata_raw', datasource.vendordata_raw) - - def test_get_hostname_strips_local_hostname_without_domain(self): - """Datasource.get_hostname strips metadata local-hostname of domain.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - self.assertTrue(datasource.get_data()) - self.assertEqual( - 'test-subclass-hostname', datasource.metadata['local-hostname']) - self.assertEqual('test-subclass-hostname', datasource.get_hostname()) - datasource.metadata['local-hostname'] = 'hostname.my.domain.com' - self.assertEqual('hostname', datasource.get_hostname()) - - def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): - """Datasource.get_hostname with fqdn set gets qualified hostname.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - self.assertTrue(datasource.get_data()) - datasource.metadata['local-hostname'] = 'hostname.my.domain.com' - self.assertEqual( - 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) - - def test_get_hostname_without_metadata_uses_system_hostname(self): - """Datasource.gethostname runs util.get_hostname when no metadata.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - self.assertEqual({}, datasource.metadata) - mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' - with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: - with mock.patch(mock_fqdn) as m_fqdn: - m_gethost.return_value = 'systemhostname.domain.com' - m_fqdn.return_value = None # No maching fqdn in /etc/hosts - self.assertEqual('systemhostname', datasource.get_hostname()) - self.assertEqual( - 'systemhostname.domain.com', - datasource.get_hostname(fqdn=True)) - - def test_get_hostname_without_metadata_returns_none(self): - """Datasource.gethostname returns None when metadata_only and no MD.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - self.assertEqual({}, datasource.metadata) - mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' - with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: - with mock.patch(mock_fqdn) as m_fqdn: - self.assertIsNone(datasource.get_hostname(metadata_only=True)) - self.assertIsNone( - datasource.get_hostname(fqdn=True, metadata_only=True)) - self.assertEqual([], m_gethost.call_args_list) - self.assertEqual([], m_fqdn.call_args_list) - - def test_get_hostname_without_metadata_prefers_etc_hosts(self): - """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - self.assertEqual({}, datasource.metadata) - mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' - with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: - with mock.patch(mock_fqdn) as m_fqdn: - m_gethost.return_value = 'systemhostname.domain.com' - m_fqdn.return_value = 'fqdnhostname.domain.com' - self.assertEqual('fqdnhostname', datasource.get_hostname()) - self.assertEqual('fqdnhostname.domain.com', - datasource.get_hostname(fqdn=True)) - - def test_get_data_does_not_write_instance_data_on_failure(self): - """get_data does not write INSTANCE_JSON_FILE on get_data False.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - get_data_retval=False) - self.assertFalse(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - self.assertFalse( - os.path.exists(json_file), 'Found unexpected file %s' % json_file) - - def test_get_data_writes_json_instance_data_on_success(self): - """get_data writes INSTANCE_JSON_FILE to run_dir as world readable.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - sys_info = { - "python": "3.7", - "platform": - "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", - "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", - "x86_64"], - "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} - with mock.patch("cloudinit.util.system_info", return_value=sys_info): - datasource.get_data() - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - content = util.load_file(json_file) - expected = { - 'base64_encoded_keys': [], - 'merged_cfg': REDACT_SENSITIVE_VALUE, - 'sensitive_keys': ['merged_cfg'], - 'sys_info': sys_info, - 'v1': { - '_beta_keys': ['subplatform'], - 'availability-zone': 'myaz', - 'availability_zone': 'myaz', - 'cloud-name': 'subclasscloudname', - 'cloud_name': 'subclasscloudname', - 'distro': 'ubuntu', - 'distro_release': 'focal', - 'distro_version': '20.04', - 'instance-id': 'iid-datasource', - 'instance_id': 'iid-datasource', - 'local-hostname': 'test-subclass-hostname', - 'local_hostname': 'test-subclass-hostname', - 'kernel_release': '5.4.0-24-generic', - 'machine': 'x86_64', - 'platform': 'mytestsubclass', - 'public_ssh_keys': [], - 'python_version': '3.7', - 'region': 'myregion', - 'system_platform': - 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', - 'subplatform': 'unknown', - 'variant': 'ubuntu'}, - 'ds': { - - '_doc': EXPERIMENTAL_TEXT, - 'meta_data': {'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion'}}} - self.assertEqual(expected, util.load_json(content)) - file_stat = os.stat(json_file) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) - self.assertEqual(expected, util.load_json(content)) - - def test_get_data_writes_redacted_public_json_instance_data(self): - """get_data writes redacted content to public INSTANCE_JSON_FILE.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={ - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': {'security-credentials': { - 'cred1': 'sekret', 'cred2': 'othersekret'}}}) - self.assertCountEqual( - ('merged_cfg', 'security-credentials',), - datasource.sensitive_metadata_keys) - sys_info = { - "python": "3.7", - "platform": - "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", - "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", - "x86_64"], - "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} - with mock.patch("cloudinit.util.system_info", return_value=sys_info): - datasource.get_data() - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - redacted = util.load_json(util.load_file(json_file)) - expected = { - 'base64_encoded_keys': [], - 'merged_cfg': REDACT_SENSITIVE_VALUE, - 'sensitive_keys': [ - 'ds/meta_data/some/security-credentials', 'merged_cfg'], - 'sys_info': sys_info, - 'v1': { - '_beta_keys': ['subplatform'], - 'availability-zone': 'myaz', - 'availability_zone': 'myaz', - 'cloud-name': 'subclasscloudname', - 'cloud_name': 'subclasscloudname', - 'distro': 'ubuntu', - 'distro_release': 'focal', - 'distro_version': '20.04', - 'instance-id': 'iid-datasource', - 'instance_id': 'iid-datasource', - 'local-hostname': 'test-subclass-hostname', - 'local_hostname': 'test-subclass-hostname', - 'kernel_release': '5.4.0-24-generic', - 'machine': 'x86_64', - 'platform': 'mytestsubclass', - 'public_ssh_keys': [], - 'python_version': '3.7', - 'region': 'myregion', - 'system_platform': - 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', - 'subplatform': 'unknown', - 'variant': 'ubuntu'}, - 'ds': { - '_doc': EXPERIMENTAL_TEXT, - 'meta_data': { - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} - } - self.assertCountEqual(expected, redacted) - file_stat = os.stat(json_file) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) - - def test_get_data_writes_json_instance_data_sensitive(self): - """ - get_data writes unmodified data to sensitive file as root-readonly. - """ - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={ - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': {'security-credentials': { - 'cred1': 'sekret', 'cred2': 'othersekret'}}}) - sys_info = { - "python": "3.7", - "platform": - "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", - "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", - "x86_64"], - "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} - - self.assertCountEqual( - ('merged_cfg', 'security-credentials',), - datasource.sensitive_metadata_keys) - with mock.patch("cloudinit.util.system_info", return_value=sys_info): - datasource.get_data() - sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) - content = util.load_file(sensitive_json_file) - expected = { - 'base64_encoded_keys': [], - 'merged_cfg': { - '_doc': ( - 'Merged cloud-init system config from ' - '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/' - ), - 'datasource': {'_undef': {'key1': False}}}, - 'sensitive_keys': [ - 'ds/meta_data/some/security-credentials', 'merged_cfg'], - 'sys_info': sys_info, - 'v1': { - '_beta_keys': ['subplatform'], - 'availability-zone': 'myaz', - 'availability_zone': 'myaz', - 'cloud-name': 'subclasscloudname', - 'cloud_name': 'subclasscloudname', - 'distro': 'ubuntu', - 'distro_release': 'focal', - 'distro_version': '20.04', - 'instance-id': 'iid-datasource', - 'instance_id': 'iid-datasource', - 'kernel_release': '5.4.0-24-generic', - 'local-hostname': 'test-subclass-hostname', - 'local_hostname': 'test-subclass-hostname', - 'machine': 'x86_64', - 'platform': 'mytestsubclass', - 'public_ssh_keys': [], - 'python_version': '3.7', - 'region': 'myregion', - 'subplatform': 'unknown', - 'system_platform': - 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', - 'variant': 'ubuntu'}, - 'ds': { - '_doc': EXPERIMENTAL_TEXT, - 'meta_data': { - 'availability_zone': 'myaz', - 'local-hostname': 'test-subclass-hostname', - 'region': 'myregion', - 'some': { - 'security-credentials': - {'cred1': 'sekret', 'cred2': 'othersekret'}}}} - } - self.assertCountEqual(expected, util.load_json(content)) - file_stat = os.stat(sensitive_json_file) - self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) - self.assertEqual(expected, util.load_json(content)) - - def test_get_data_handles_redacted_unserializable_content(self): - """get_data warns unserializable content in INSTANCE_JSON_FILE.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) - datasource.get_data() - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - content = util.load_file(json_file) - expected_metadata = { - 'key1': 'val1', - 'key2': { - 'key2.1': "Warning: redacted unserializable type "}} - instance_json = util.load_json(content) - self.assertEqual( - expected_metadata, instance_json['ds']['meta_data']) - - def test_persist_instance_data_writes_ec2_metadata_when_set(self): - """When ec2_metadata class attribute is set, persist to json.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - datasource.ec2_metadata = UNSET - datasource.get_data() - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - instance_data = util.load_json(util.load_file(json_file)) - self.assertNotIn('ec2_metadata', instance_data['ds']) - datasource.ec2_metadata = {'ec2stuff': 'is good'} - datasource.persist_instance_data() - instance_data = util.load_json(util.load_file(json_file)) - self.assertEqual( - {'ec2stuff': 'is good'}, - instance_data['ds']['ec2_metadata']) - - def test_persist_instance_data_writes_network_json_when_set(self): - """When network_data.json class attribute is set, persist to json.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - datasource.get_data() - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - instance_data = util.load_json(util.load_file(json_file)) - self.assertNotIn('network_json', instance_data['ds']) - datasource.network_json = {'network_json': 'is good'} - datasource.persist_instance_data() - instance_data = util.load_json(util.load_file(json_file)) - self.assertEqual( - {'network_json': 'is good'}, - instance_data['ds']['network_json']) - - def test_get_data_base64encodes_unserializable_bytes(self): - """On py3, get_data base64encodes any unserializable content.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - content = util.load_file(json_file) - instance_json = util.load_json(content) - self.assertCountEqual( - ['ds/meta_data/key2/key2.1'], - instance_json['base64_encoded_keys']) - self.assertEqual( - {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, - instance_json['ds']['meta_data']) - - def test_get_hostname_subclass_support(self): - """Validate get_hostname signature on all subclasses of DataSource.""" - base_args = inspect.getfullargspec(DataSource.get_hostname) - # Import all DataSource subclasses so we can inspect them. - modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) - for _loc, name in modules.items(): - mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) - if mod_locs: - importer.import_module(mod_locs[0]) - for child in DataSource.__subclasses__(): - if 'Test' in child.dsname: - continue - self.assertEqual( - base_args, - inspect.getfullargspec(child.get_hostname), - '%s does not implement DataSource.get_hostname params' - % child) - for grandchild in child.__subclasses__(): - self.assertEqual( - base_args, - inspect.getfullargspec(grandchild.get_hostname), - '%s does not implement DataSource.get_hostname params' - % grandchild) - - def test_clear_cached_attrs_resets_cached_attr_class_attributes(self): - """Class attributes listed in cached_attr_defaults are reset.""" - count = 0 - # Setup values for all cached class attributes - for attr, value in self.datasource.cached_attr_defaults: - setattr(self.datasource, attr, count) - count += 1 - self.datasource._dirty_cache = True - self.datasource.clear_cached_attrs() - for attr, value in self.datasource.cached_attr_defaults: - self.assertEqual(value, getattr(self.datasource, attr)) - - def test_clear_cached_attrs_noops_on_clean_cache(self): - """Class attributes listed in cached_attr_defaults are reset.""" - count = 0 - # Setup values for all cached class attributes - for attr, _ in self.datasource.cached_attr_defaults: - setattr(self.datasource, attr, count) - count += 1 - self.datasource._dirty_cache = False # Fake clean cache - self.datasource.clear_cached_attrs() - count = 0 - for attr, _ in self.datasource.cached_attr_defaults: - self.assertEqual(count, getattr(self.datasource, attr)) - count += 1 - - def test_clear_cached_attrs_skips_non_attr_class_attributes(self): - """Skip any cached_attr_defaults which aren't class attributes.""" - self.datasource._dirty_cache = True - self.datasource.clear_cached_attrs() - for attr in ('ec2_metadata', 'network_json'): - self.assertFalse(hasattr(self.datasource, attr)) - - def test_clear_cached_attrs_of_custom_attrs(self): - """Custom attr_values can be passed to clear_cached_attrs.""" - self.datasource._dirty_cache = True - cached_attr_name = self.datasource.cached_attr_defaults[0][0] - setattr(self.datasource, cached_attr_name, 'himom') - self.datasource.myattr = 'orig' - self.datasource.clear_cached_attrs( - attr_defaults=(('myattr', 'updated'),)) - self.assertEqual('himom', getattr(self.datasource, cached_attr_name)) - self.assertEqual('updated', self.datasource.myattr) - - @mock.patch.dict(DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - @mock.patch.dict(DataSource.supported_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - def test_update_metadata_only_acts_on_supported_update_events(self): - """update_metadata_if_supported wont get_data on unsupported events.""" - self.assertEqual( - {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])}, - self.datasource.default_update_events - ) - - def fake_get_data(): - raise Exception('get_data should not be called') - - self.datasource.get_data = fake_get_data - self.assertFalse( - self.datasource.update_metadata_if_supported( - source_event_types=[EventType.BOOT])) - - @mock.patch.dict(DataSource.supported_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - def test_update_metadata_returns_true_on_supported_update_event(self): - """update_metadata_if_supported returns get_data on supported events""" - def fake_get_data(): - return True - - self.datasource.get_data = fake_get_data - self.datasource._network_config = 'something' - self.datasource._dirty_cache = True - self.assertTrue( - self.datasource.update_metadata_if_supported( - source_event_types=[ - EventType.BOOT, EventType.BOOT_NEW_INSTANCE])) - self.assertEqual(UNSET, self.datasource._network_config) - - self.assertIn( - "DEBUG: Update datasource metadata and network config due to" - " events: boot-new-instance", - self.logs.getvalue() - ) - - -class TestRedactSensitiveData(CiTestCase): - - def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self): - """When sensitive_keys is absent or empty from metadata do nothing.""" - md = {'my': 'data'} - self.assertEqual( - md, redact_sensitive_keys(md, redact_value='redacted')) - md['sensitive_keys'] = [] - self.assertEqual( - md, redact_sensitive_keys(md, redact_value='redacted')) - - def test_redact_sensitive_data_redacts_exact_match_name(self): - """Only exact matched sensitive_keys are redacted from metadata.""" - md = {'sensitive_keys': ['md/secure'], - 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} - secure_md = copy.deepcopy(md) - secure_md['md']['secure'] = 'redacted' - self.assertEqual( - secure_md, - redact_sensitive_keys(md, redact_value='redacted')) - - def test_redact_sensitive_data_does_redacts_with_default_string(self): - """When redact_value is absent, REDACT_SENSITIVE_VALUE is used.""" - md = {'sensitive_keys': ['md/secure'], - 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} - secure_md = copy.deepcopy(md) - secure_md['md']['secure'] = 'redacted for non-root user' - self.assertEqual( - secure_md, - redact_sensitive_keys(md)) - - -class TestCanonicalCloudID(CiTestCase): - - def test_cloud_id_returns_platform_on_unknowns(self): - """When region and cloud_name are unknown, return platform.""" - self.assertEqual( - 'platform', - canonical_cloud_id(cloud_name=METADATA_UNKNOWN, - region=METADATA_UNKNOWN, - platform='platform')) - - def test_cloud_id_returns_platform_on_none(self): - """When region and cloud_name are unknown, return platform.""" - self.assertEqual( - 'platform', - canonical_cloud_id(cloud_name=None, - region=None, - platform='platform')) - - def test_cloud_id_returns_cloud_name_on_unknown_region(self): - """When region is unknown, return cloud_name.""" - for region in (None, METADATA_UNKNOWN): - self.assertEqual( - 'cloudname', - canonical_cloud_id(cloud_name='cloudname', - region=region, - platform='platform')) - - def test_cloud_id_returns_platform_on_unknown_cloud_name(self): - """When region is set but cloud_name is unknown return cloud_name.""" - self.assertEqual( - 'platform', - canonical_cloud_id(cloud_name=METADATA_UNKNOWN, - region='region', - platform='platform')) - - def test_cloud_id_aws_based_on_region_and_cloud_name(self): - """When cloud_name is aws, return proper cloud-id based on region.""" - self.assertEqual( - 'aws-china', - canonical_cloud_id(cloud_name='aws', - region='cn-north-1', - platform='platform')) - self.assertEqual( - 'aws', - canonical_cloud_id(cloud_name='aws', - region='us-east-1', - platform='platform')) - self.assertEqual( - 'aws-gov', - canonical_cloud_id(cloud_name='aws', - region='us-gov-1', - platform='platform')) - self.assertEqual( # Overrideen non-aws cloud_name is returned - '!aws', - canonical_cloud_id(cloud_name='!aws', - region='us-gov-1', - platform='platform')) - - def test_cloud_id_azure_based_on_region_and_cloud_name(self): - """Report cloud-id when cloud_name is azure and region is in china.""" - self.assertEqual( - 'azure-china', - canonical_cloud_id(cloud_name='azure', - region='chinaeast', - platform='platform')) - self.assertEqual( - 'azure', - canonical_cloud_id(cloud_name='azure', - region='!chinaeast', - platform='platform')) - -# vi: ts=4 expandtab diff --git a/cloudinit/sources/tests/test_lxd.py b/cloudinit/sources/tests/test_lxd.py deleted file mode 100644 index a6e51f3b..00000000 --- a/cloudinit/sources/tests/test_lxd.py +++ /dev/null @@ -1,376 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from collections import namedtuple -from copy import deepcopy -import json -import re -import stat -from unittest import mock -import yaml - -import pytest - -from cloudinit.sources import ( - DataSourceLXD as lxd, InvalidMetaDataException, UNSET -) -DS_PATH = "cloudinit.sources.DataSourceLXD." - - -LStatResponse = namedtuple("lstatresponse", "st_mode") - - -NETWORK_V1 = { - "version": 1, - "config": [ - { - "type": "physical", "name": "eth0", - "subnets": [{"type": "dhcp", "control": "auto"}] - } - ] -} - - -def _add_network_v1_device(devname) -> dict: - """Helper to inject device name into default network v1 config.""" - network_cfg = deepcopy(NETWORK_V1) - network_cfg["config"][0]["name"] = devname - return network_cfg - - -LXD_V1_METADATA = { - "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", - "network-config": NETWORK_V1, - "user-data": "#cloud-config\npackages: [sl]\n", - "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", - "config": { - "user.user-data": - "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", - "user.vendor-data": - "#cloud-config\nruncmd: ['echo vendor-data']\n", - "user.network-config": yaml.safe_dump(NETWORK_V1), - } -} - - -@pytest.fixture -def lxd_metadata(): - return LXD_V1_METADATA - - -@pytest.yield_fixture -def lxd_ds(request, paths, lxd_metadata): - """ - Return an instantiated DataSourceLXD. - - This also performs the mocking required for the default test case: - * ``is_platform_viable`` returns True, - * ``read_metadata`` returns ``LXD_V1_METADATA`` - - (This uses the paths fixture for the required helpers.Paths object) - """ - with mock.patch(DS_PATH + "is_platform_viable", return_value=True): - with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata): - yield lxd.DataSourceLXD( - sys_cfg={}, distro=mock.Mock(), paths=paths - ) - - -class TestGenerateFallbackNetworkConfig: - - @pytest.mark.parametrize( - "uname_machine,systemd_detect_virt,expected", ( - # None for systemd_detect_virt returns None from which - ({}, None, NETWORK_V1), - ({}, None, NETWORK_V1), - ("anything", "lxc\n", NETWORK_V1), - # `uname -m` on kvm determines devname - ("x86_64", "kvm\n", _add_network_v1_device("enp5s0")), - ("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")), - ("s390x", "kvm\n", _add_network_v1_device("enc9")) - ) - ) - @mock.patch(DS_PATH + "util.system_info") - @mock.patch(DS_PATH + "subp.subp") - @mock.patch(DS_PATH + "subp.which") - def test_net_v2_based_on_network_mode_virt_type_and_uname_machine( - self, - m_which, - m_subp, - m_system_info, - uname_machine, - systemd_detect_virt, - expected, - ): - """Return network config v2 based on uname -m, systemd-detect-virt.""" - if systemd_detect_virt is None: - m_which.return_value = None - m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]} - m_subp.return_value = (systemd_detect_virt, "") - assert expected == lxd.generate_fallback_network_config() - if systemd_detect_virt is None: - assert 0 == m_subp.call_count - assert 0 == m_system_info.call_count - else: - assert [ - mock.call(["systemd-detect-virt"]) - ] == m_subp.call_args_list - if systemd_detect_virt != "kvm\n": - assert 0 == m_system_info.call_count - else: - assert 1 == m_system_info.call_count - - -class TestDataSourceLXD: - def test_platform_info(self, lxd_ds): - assert "LXD" == lxd_ds.dsname - assert "lxd" == lxd_ds.cloud_name - assert "lxd" == lxd_ds.platform_type - - def test_subplatform(self, lxd_ds): - assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform - - def test__get_data(self, lxd_ds): - """get_data calls read_metadata, setting appropiate instance attrs.""" - assert UNSET == lxd_ds._crawled_metadata - assert UNSET == lxd_ds._network_config - assert None is lxd_ds.userdata_raw - assert True is lxd_ds._get_data() - assert LXD_V1_METADATA == lxd_ds._crawled_metadata - # network-config is dumped from YAML - assert NETWORK_V1 == lxd_ds._network_config - # Any user-data and vendor-data are saved as raw - assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw - assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw - - -class TestIsPlatformViable: - @pytest.mark.parametrize( - "exists,lstat_mode,expected", ( - (False, None, False), - (True, stat.S_IFREG, False), - (True, stat.S_IFSOCK, True), - ) - ) - @mock.patch(DS_PATH + "os.lstat") - @mock.patch(DS_PATH + "os.path.exists") - def test_expected_viable( - self, m_exists, m_lstat, exists, lstat_mode, expected - ): - """Return True only when LXD_SOCKET_PATH exists and is a socket.""" - m_exists.return_value = exists - m_lstat.return_value = LStatResponse(lstat_mode) - assert expected is lxd.is_platform_viable() - m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) - if exists: - m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) - else: - assert 0 == m_lstat.call_count - - -class TestReadMetadata: - @pytest.mark.parametrize( - "url_responses,expected,logs", ( - ( # Assert non-JSON format from config route - { - "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": "[NOT_JSON", - }, - InvalidMetaDataException( - "Unable to determine cloud-init config from" - " http://lxd/1.0/config. Expected JSON but found:" - " [NOT_JSON"), - ["[GET] [HTTP:200] http://lxd/1.0/meta-data", - "[GET] [HTTP:200] http://lxd/1.0/config"], - ), - ( # Assert success on just meta-data - { - "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": "[]", - }, - { - "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, - "config": {}, "meta-data": "local-hostname: md\n" - }, - ["[GET] [HTTP:200] http://lxd/1.0/meta-data", - "[GET] [HTTP:200] http://lxd/1.0/config"], - ), - ( # Assert 404s for config routes log skipping - { - "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": - '["/1.0/config/user.custom1",' - ' "/1.0/config/user.meta-data",' - ' "/1.0/config/user.network-config",' - ' "/1.0/config/user.user-data",' - ' "/1.0/config/user.vendor-data"]', - "http://lxd/1.0/config/user.custom1": "custom1", - "http://lxd/1.0/config/user.meta-data": "", # 404 - "http://lxd/1.0/config/user.network-config": "net-config", - "http://lxd/1.0/config/user.user-data": "", # 404 - "http://lxd/1.0/config/user.vendor-data": "", # 404 - }, - { - "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, - "config": { - "user.custom1": "custom1", # Not promoted - "user.network-config": "net-config", - }, - "meta-data": "local-hostname: md\n", - "network-config": "net-config", - }, - [ - "Skipping http://lxd/1.0/config/user.vendor-data on" - " [HTTP:404]", - "Skipping http://lxd/1.0/config/user.meta-data on" - " [HTTP:404]", - "Skipping http://lxd/1.0/config/user.user-data on" - " [HTTP:404]", - "[GET] [HTTP:200] http://lxd/1.0/config", - "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1", - "[GET] [HTTP:200]" - " http://lxd/1.0/config/user.network-config", - ], - ), - ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys - { - "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": - '["/1.0/config/user.custom1",' - ' "/1.0/config/user.meta-data",' - ' "/1.0/config/user.network-config",' - ' "/1.0/config/user.user-data",' - ' "/1.0/config/user.vendor-data"]', - "http://lxd/1.0/config/user.custom1": "custom1", - "http://lxd/1.0/config/user.meta-data": "meta-data", - "http://lxd/1.0/config/user.network-config": "net-config", - "http://lxd/1.0/config/user.user-data": "user-data", - "http://lxd/1.0/config/user.vendor-data": "vendor-data", - }, - { - "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, - "config": { - "user.custom1": "custom1", # Not promoted - "user.meta-data": "meta-data", - "user.network-config": "net-config", - "user.user-data": "user-data", - "user.vendor-data": "vendor-data", - }, - "meta-data": "local-hostname: md\n", - "network-config": "net-config", - "user-data": "user-data", - "vendor-data": "vendor-data", - }, - [ - "[GET] [HTTP:200] http://lxd/1.0/meta-data", - "[GET] [HTTP:200] http://lxd/1.0/config", - "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1", - "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data", - "[GET] [HTTP:200]" - " http://lxd/1.0/config/user.network-config", - "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data", - "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data", - ], - ), - ( # Assert cloud-init.* config key values prefered over user.* - { - "http://lxd/1.0/meta-data": "local-hostname: md\n", - "http://lxd/1.0/config": - '["/1.0/config/user.meta-data",' - ' "/1.0/config/user.network-config",' - ' "/1.0/config/user.user-data",' - ' "/1.0/config/user.vendor-data",' - ' "/1.0/config/cloud-init.network-config",' - ' "/1.0/config/cloud-init.user-data",' - ' "/1.0/config/cloud-init.vendor-data"]', - "http://lxd/1.0/config/user.meta-data": "user.meta-data", - "http://lxd/1.0/config/user.network-config": - "user.network-config", - "http://lxd/1.0/config/user.user-data": "user.user-data", - "http://lxd/1.0/config/user.vendor-data": - "user.vendor-data", - "http://lxd/1.0/config/cloud-init.meta-data": - "cloud-init.meta-data", - "http://lxd/1.0/config/cloud-init.network-config": - "cloud-init.network-config", - "http://lxd/1.0/config/cloud-init.user-data": - "cloud-init.user-data", - "http://lxd/1.0/config/cloud-init.vendor-data": - "cloud-init.vendor-data", - }, - { - "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, - "config": { - "user.meta-data": "user.meta-data", - "user.network-config": "user.network-config", - "user.user-data": "user.user-data", - "user.vendor-data": "user.vendor-data", - "cloud-init.network-config": - "cloud-init.network-config", - "cloud-init.user-data": "cloud-init.user-data", - "cloud-init.vendor-data": - "cloud-init.vendor-data", - }, - "meta-data": "local-hostname: md\n", - "network-config": "cloud-init.network-config", - "user-data": "cloud-init.user-data", - "vendor-data": "cloud-init.vendor-data", - }, - [ - "[GET] [HTTP:200] http://lxd/1.0/meta-data", - "[GET] [HTTP:200] http://lxd/1.0/config", - "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data", - "[GET] [HTTP:200]" - " http://lxd/1.0/config/user.network-config", - "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data", - "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data", - "[GET] [HTTP:200]" - " http://lxd/1.0/config/cloud-init.network-config", - "[GET] [HTTP:200]" - " http://lxd/1.0/config/cloud-init.user-data", - "[GET] [HTTP:200]" - " http://lxd/1.0/config/cloud-init.vendor-data", - "Ignoring LXD config user.user-data in favor of" - " cloud-init.user-data value.", - "Ignoring LXD config user.network-config in favor of" - " cloud-init.network-config value.", - "Ignoring LXD config user.vendor-data in favor of" - " cloud-init.vendor-data value.", - ], - ), - ) - ) - @mock.patch.object(lxd.requests.Session, 'get') - def test_read_metadata_handles_unexpected_content_or_http_status( - self, session_get, url_responses, expected, logs, caplog - ): - """read_metadata handles valid and invalid content and status codes.""" - - def fake_get(url): - """Mock Response json, ok, status_code, text from url_responses.""" - m_resp = mock.MagicMock() - content = url_responses.get(url, '') - m_resp.json.side_effect = lambda: json.loads(content) - if content: - mock_ok = mock.PropertyMock(return_value=True) - mock_status_code = mock.PropertyMock(return_value=200) - else: - mock_ok = mock.PropertyMock(return_value=False) - mock_status_code = mock.PropertyMock(return_value=404) - type(m_resp).ok = mock_ok - type(m_resp).status_code = mock_status_code - mock_text = mock.PropertyMock(return_value=content) - type(m_resp).text = mock_text - return m_resp - - session_get.side_effect = fake_get - - if isinstance(expected, Exception): - with pytest.raises(type(expected), match=re.escape(str(expected))): - lxd.read_metadata() - else: - assert expected == lxd.read_metadata() - caplogs = caplog.text - for log in logs: - assert log in caplogs - -# vi: ts=4 expandtab diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py deleted file mode 100644 index 5f608cbb..00000000 --- a/cloudinit/sources/tests/test_oracle.py +++ /dev/null @@ -1,797 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import base64 -import copy -import json -from contextlib import ExitStack -from unittest import mock - -import pytest - -from cloudinit.sources import DataSourceOracle as oracle -from cloudinit.sources import NetworkConfigSource -from cloudinit.sources.DataSourceOracle import OpcMetadata -from cloudinit.tests import helpers as test_helpers -from cloudinit.url_helper import UrlError - -DS_PATH = "cloudinit.sources.DataSourceOracle" - -# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine -# with a secondary VNIC attached (vnicId truncated for Python line length) -OPC_BM_SECONDARY_VNIC_RESPONSE = """\ -[ { - "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||", - "privateIp" : "10.0.0.8", - "vlanTag" : 0, - "macAddr" : "90:e2:ba:d4:f1:68", - "virtualRouterIp" : "10.0.0.1", - "subnetCidrBlock" : "10.0.0.0/24", - "nicIndex" : 0 -}, { - "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||", - "privateIp" : "10.0.4.5", - "vlanTag" : 1, - "macAddr" : "02:00:17:05:CF:51", - "virtualRouterIp" : "10.0.4.1", - "subnetCidrBlock" : "10.0.4.0/24", - "nicIndex" : 0 -} ]""" - -# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine -# with a secondary VNIC attached -OPC_VM_SECONDARY_VNIC_RESPONSE = """\ -[ { - "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated", - "privateIp" : "10.0.0.230", - "vlanTag" : 1039, - "macAddr" : "02:00:17:05:D1:DB", - "virtualRouterIp" : "10.0.0.1", - "subnetCidrBlock" : "10.0.0.0/24" -}, { - "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated", - "privateIp" : "10.0.0.231", - "vlanTag" : 1041, - "macAddr" : "00:00:17:02:2B:B1", - "virtualRouterIp" : "10.0.0.1", - "subnetCidrBlock" : "10.0.0.0/24" -} ]""" - - -# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then -# truncated for line length) -OPC_V2_METADATA = """\ -{ - "availabilityDomain" : "qIZq:PHX-AD-1", - "faultDomain" : "FAULT-DOMAIN-2", - "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED", - "displayName" : "instance-20200320-1400", - "hostname" : "instance-20200320-1400", - "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED", - "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED", - "metadata" : { - "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated", - "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v" - }, - "region" : "phx", - "canonicalRegionName" : "us-phoenix-1", - "ociAdName" : "phx-ad-3", - "shape" : "VM.Standard2.1", - "state" : "Running", - "timeCreated" : 1584727285318, - "agentConfig" : { - "monitoringDisabled" : true, - "managementDisabled" : true - } -}""" - -# Just a small meaningless change to differentiate the two metadatas -OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance") - - -@pytest.fixture -def metadata_version(): - return 2 - - -@pytest.yield_fixture -def oracle_ds(request, fixture_utils, paths, metadata_version): - """ - Return an instantiated DataSourceOracle. - - This also performs the mocking required for the default test case: - * ``_read_system_uuid`` returns something, - * ``_is_platform_viable`` returns True, - * ``_is_iscsi_root`` returns True (the simpler code path), - * ``read_opc_metadata`` returns ``OPC_V1_METADATA`` - - (This uses the paths fixture for the required helpers.Paths object, and the - fixture_utils fixture for fetching markers.) - """ - sys_cfg = fixture_utils.closest_marker_first_arg_or( - request, "ds_sys_cfg", mock.MagicMock() - ) - metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None) - with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"): - with mock.patch(DS_PATH + "._is_platform_viable", return_value=True): - with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True): - with mock.patch( - DS_PATH + ".read_opc_metadata", - return_value=metadata, - ): - yield oracle.DataSourceOracle( - sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths, - ) - - -class TestDataSourceOracle: - def test_platform_info(self, oracle_ds): - assert "oracle" == oracle_ds.cloud_name - assert "oracle" == oracle_ds.platform_type - - def test_subplatform_before_fetch(self, oracle_ds): - assert 'unknown' == oracle_ds.subplatform - - def test_platform_info_after_fetch(self, oracle_ds): - oracle_ds._get_data() - assert 'metadata (http://169.254.169.254/opc/v2/)' == \ - oracle_ds.subplatform - - @pytest.mark.parametrize('metadata_version', [1]) - def test_v1_platform_info_after_fetch(self, oracle_ds): - oracle_ds._get_data() - assert 'metadata (http://169.254.169.254/opc/v1/)' == \ - oracle_ds.subplatform - - def test_secondary_nics_disabled_by_default(self, oracle_ds): - assert not oracle_ds.ds_cfg["configure_secondary_nics"] - - @pytest.mark.ds_sys_cfg( - {"datasource": {"Oracle": {"configure_secondary_nics": True}}} - ) - def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds): - assert oracle_ds.ds_cfg["configure_secondary_nics"] - - -class TestIsPlatformViable(test_helpers.CiTestCase): - @mock.patch(DS_PATH + ".dmi.read_dmi_data", - return_value=oracle.CHASSIS_ASSET_TAG) - def test_expected_viable(self, m_read_dmi_data): - """System with known chassis tag is viable.""" - self.assertTrue(oracle._is_platform_viable()) - m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) - - @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None) - def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data): - """System without known chassis tag is not viable.""" - self.assertFalse(oracle._is_platform_viable()) - m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) - - @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs") - def test_expected_not_viable_other(self, m_read_dmi_data): - """System with unnown chassis tag is not viable.""" - self.assertFalse(oracle._is_platform_viable()) - m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) - - -@mock.patch( - "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) -) -class TestNetworkConfigFromOpcImds: - def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): - oracle_ds._vnics_data = [{}] - # We test this by using in a non-dict to ensure that no dict - # operations are used; failure would be seen as exceptions - oracle_ds._network_config = object() - oracle_ds._add_network_config_from_opc_imds() - - def test_bare_metal_machine_skipped(self, oracle_ds, caplog): - # nicIndex in the first entry indicates a bare metal machine - oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE) - # We test this by using a non-dict to ensure that no dict - # operations are used - oracle_ds._network_config = object() - oracle_ds._add_network_config_from_opc_imds() - assert 'bare metal machine' in caplog.text - - def test_missing_mac_skipped(self, oracle_ds, caplog): - oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) - - oracle_ds._network_config = { - 'version': 1, 'config': [{'primary': 'nic'}] - } - with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): - oracle_ds._add_network_config_from_opc_imds() - - assert 1 == len(oracle_ds.network_config['config']) - assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ - caplog.text - - def test_missing_mac_skipped_v2(self, oracle_ds, caplog): - oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) - - oracle_ds._network_config = { - 'version': 2, 'ethernets': {'primary': {'nic': {}}} - } - with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): - oracle_ds._add_network_config_from_opc_imds() - - assert 1 == len(oracle_ds.network_config['ethernets']) - assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ - caplog.text - - def test_secondary_nic(self, oracle_ds): - oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) - oracle_ds._network_config = { - 'version': 1, 'config': [{'primary': 'nic'}] - } - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' - with mock.patch(DS_PATH + ".get_interfaces_by_mac", - return_value={mac_addr: nic_name}): - oracle_ds._add_network_config_from_opc_imds() - - # The input is mutated - assert 2 == len(oracle_ds.network_config['config']) - - secondary_nic_cfg = oracle_ds.network_config['config'][1] - assert nic_name == secondary_nic_cfg['name'] - assert 'physical' == secondary_nic_cfg['type'] - assert mac_addr == secondary_nic_cfg['mac_address'] - assert 9000 == secondary_nic_cfg['mtu'] - - assert 1 == len(secondary_nic_cfg['subnets']) - subnet_cfg = secondary_nic_cfg['subnets'][0] - # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE - assert '10.0.0.231' == subnet_cfg['address'] - - def test_secondary_nic_v2(self, oracle_ds): - oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) - oracle_ds._network_config = { - 'version': 2, 'ethernets': {'primary': {'nic': {}}} - } - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' - with mock.patch(DS_PATH + ".get_interfaces_by_mac", - return_value={mac_addr: nic_name}): - oracle_ds._add_network_config_from_opc_imds() - - # The input is mutated - assert 2 == len(oracle_ds.network_config['ethernets']) - - secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3'] - assert secondary_nic_cfg['dhcp4'] is False - assert secondary_nic_cfg['dhcp6'] is False - assert mac_addr == secondary_nic_cfg['match']['macaddress'] - assert 9000 == secondary_nic_cfg['mtu'] - - assert 1 == len(secondary_nic_cfg['addresses']) - # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE - assert '10.0.0.231' == secondary_nic_cfg['addresses'][0] - - -class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase): - - def setUp(self): - super(TestNetworkConfigFiltersNetFailover, self).setUp() - self.add_patch(DS_PATH + '.get_interfaces_by_mac', - 'm_get_interfaces_by_mac') - self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master') - - def test_ignore_bogus_network_config(self): - netcfg = {'something': 'here'} - passed_netcfg = copy.copy(netcfg) - oracle._ensure_netfailover_safe(passed_netcfg) - self.assertEqual(netcfg, passed_netcfg) - - def test_ignore_network_config_unknown_versions(self): - netcfg = {'something': 'here', 'version': 3} - passed_netcfg = copy.copy(netcfg) - oracle._ensure_netfailover_safe(passed_netcfg) - self.assertEqual(netcfg, passed_netcfg) - - def test_checks_v1_type_physical_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' - self.m_get_interfaces_by_mac.return_value = { - mac_addr: nic_name, - } - netcfg = {'version': 1, 'config': [ - {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr, - 'subnets': [{'type': 'dhcp4'}]}]} - passed_netcfg = copy.copy(netcfg) - self.m_netfail_master.return_value = False - oracle._ensure_netfailover_safe(passed_netcfg) - self.assertEqual(netcfg, passed_netcfg) - self.assertEqual([mock.call(nic_name)], - self.m_netfail_master.call_args_list) - - def test_checks_v1_skips_non_phys_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0' - self.m_get_interfaces_by_mac.return_value = { - mac_addr: nic_name, - } - netcfg = {'version': 1, 'config': [ - {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr, - 'subnets': [{'type': 'dhcp4'}]}]} - passed_netcfg = copy.copy(netcfg) - oracle._ensure_netfailover_safe(passed_netcfg) - self.assertEqual(netcfg, passed_netcfg) - self.assertEqual(0, self.m_netfail_master.call_count) - - def test_removes_master_mac_property_v1(self): - nic_master, mac_master = 'ens3', self.random_string() - nic_other, mac_other = 'ens7', self.random_string() - nic_extra, mac_extra = 'enp0s1f2', self.random_string() - self.m_get_interfaces_by_mac.return_value = { - mac_master: nic_master, - mac_other: nic_other, - mac_extra: nic_extra, - } - netcfg = {'version': 1, 'config': [ - {'type': 'physical', 'name': nic_master, - 'mac_address': mac_master}, - {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, - {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, - ]} - - def _is_netfail_master(iface): - if iface == 'ens3': - return True - return False - self.m_netfail_master.side_effect = _is_netfail_master - expected_cfg = {'version': 1, 'config': [ - {'type': 'physical', 'name': nic_master}, - {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, - {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, - ]} - oracle._ensure_netfailover_safe(netcfg) - self.assertEqual(expected_cfg, netcfg) - - def test_checks_v2_type_ethernet_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' - self.m_get_interfaces_by_mac.return_value = { - mac_addr: nic_name, - } - netcfg = {'version': 2, 'ethernets': { - nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, - 'match': {'macaddress': mac_addr}}}} - passed_netcfg = copy.copy(netcfg) - self.m_netfail_master.return_value = False - oracle._ensure_netfailover_safe(passed_netcfg) - self.assertEqual(netcfg, passed_netcfg) - self.assertEqual([mock.call(nic_name)], - self.m_netfail_master.call_args_list) - - def test_skips_v2_non_ethernet_interfaces(self): - mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0' - self.m_get_interfaces_by_mac.return_value = { - mac_addr: nic_name, - } - netcfg = {'version': 2, 'wifis': { - nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, - 'match': {'macaddress': mac_addr}}}} - passed_netcfg = copy.copy(netcfg) - oracle._ensure_netfailover_safe(passed_netcfg) - self.assertEqual(netcfg, passed_netcfg) - self.assertEqual(0, self.m_netfail_master.call_count) - - def test_removes_master_mac_property_v2(self): - nic_master, mac_master = 'ens3', self.random_string() - nic_other, mac_other = 'ens7', self.random_string() - nic_extra, mac_extra = 'enp0s1f2', self.random_string() - self.m_get_interfaces_by_mac.return_value = { - mac_master: nic_master, - mac_other: nic_other, - mac_extra: nic_extra, - } - netcfg = {'version': 2, 'ethernets': { - nic_extra: {'dhcp4': True, 'set-name': nic_extra, - 'match': {'macaddress': mac_extra}}, - nic_other: {'dhcp4': True, 'set-name': nic_other, - 'match': {'macaddress': mac_other}}, - nic_master: {'dhcp4': True, 'set-name': nic_master, - 'match': {'macaddress': mac_master}}, - }} - - def _is_netfail_master(iface): - if iface == 'ens3': - return True - return False - self.m_netfail_master.side_effect = _is_netfail_master - - expected_cfg = {'version': 2, 'ethernets': { - nic_master: {'dhcp4': True, 'match': {'name': nic_master}}, - nic_extra: {'dhcp4': True, 'set-name': nic_extra, - 'match': {'macaddress': mac_extra}}, - nic_other: {'dhcp4': True, 'set-name': nic_other, - 'match': {'macaddress': mac_other}}, - }} - oracle._ensure_netfailover_safe(netcfg) - import pprint - pprint.pprint(netcfg) - print('---- ^^ modified ^^ ---- vv original vv ----') - pprint.pprint(expected_cfg) - self.assertEqual(expected_cfg, netcfg) - - -def _mock_v2_urls(httpretty): - def instance_callback(request, uri, response_headers): - print(response_headers) - assert request.headers.get("Authorization") == "Bearer Oracle" - return [200, response_headers, OPC_V2_METADATA] - - def vnics_callback(request, uri, response_headers): - assert request.headers.get("Authorization") == "Bearer Oracle" - return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE] - - httpretty.register_uri( - httpretty.GET, - "http://169.254.169.254/opc/v2/instance/", - body=instance_callback - ) - httpretty.register_uri( - httpretty.GET, - "http://169.254.169.254/opc/v2/vnics/", - body=vnics_callback - ) - - -def _mock_no_v2_urls(httpretty): - httpretty.register_uri( - httpretty.GET, - "http://169.254.169.254/opc/v2/instance/", - status=404, - ) - httpretty.register_uri( - httpretty.GET, - "http://169.254.169.254/opc/v1/instance/", - body=OPC_V1_METADATA - ) - httpretty.register_uri( - httpretty.GET, - "http://169.254.169.254/opc/v1/vnics/", - body=OPC_BM_SECONDARY_VNIC_RESPONSE - ) - - -class TestReadOpcMetadata: - # See https://docs.pytest.org/en/stable/example - # /parametrize.html#parametrizing-conditional-raising - does_not_raise = ExitStack - - @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - @pytest.mark.parametrize( - 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [ - (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True, - json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), - (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None), - (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True, - json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), - (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None), - ] - ) - def test_metadata_returned( - self, version, setup_urls, instance_data, - fetch_vnics, vnics_data, httpretty - ): - setup_urls(httpretty) - metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics) - - assert version == metadata.version - assert instance_data == metadata.instance_data - assert vnics_data == metadata.vnics_data - - # No need to actually wait between retries in the tests - @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) - @pytest.mark.parametrize( - "v2_failure_count,v1_failure_count,expected_body,expectation", - [ - (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()), - (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()), - (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()), - (3, 3, None, pytest.raises(UrlError)), - ] - ) - def test_retries(self, v2_failure_count, v1_failure_count, - expected_body, expectation, httpretty): - v2_responses = [httpretty.Response("", status=404)] * v2_failure_count - v2_responses.append(httpretty.Response(OPC_V2_METADATA)) - v1_responses = [httpretty.Response("", status=404)] * v1_failure_count - v1_responses.append(httpretty.Response(OPC_V1_METADATA)) - - httpretty.register_uri( - httpretty.GET, - "http://169.254.169.254/opc/v1/instance/", - responses=v1_responses, - ) - httpretty.register_uri( - httpretty.GET, - "http://169.254.169.254/opc/v2/instance/", - responses=v2_responses, - ) - with expectation: - assert expected_body == oracle.read_opc_metadata().instance_data - - -class TestCommon_GetDataBehaviour: - """This test class tests behaviour common to iSCSI and non-iSCSI root. - - It defines a fixture, parameterized_oracle_ds, which is used in all the - tests herein to test that the commonly expected behaviour is the same with - iSCSI root and without. - - (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this - class is implicitly also testing all iSCSI root behaviour so there is no - separate class for that case.) - """ - - @pytest.yield_fixture(params=[True, False]) - def parameterized_oracle_ds(self, request, oracle_ds): - """oracle_ds parameterized for iSCSI and non-iSCSI root respectively""" - is_iscsi_root = request.param - with ExitStack() as stack: - stack.enter_context( - mock.patch( - DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root - ) - ) - if not is_iscsi_root: - stack.enter_context( - mock.patch(DS_PATH + ".net.find_fallback_nic") - ) - stack.enter_context( - mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4") - ) - yield oracle_ds - - @mock.patch( - DS_PATH + "._is_platform_viable", mock.Mock(return_value=False) - ) - def test_false_if_platform_not_viable( - self, parameterized_oracle_ds, - ): - assert not parameterized_oracle_ds._get_data() - - @pytest.mark.parametrize( - "keyname,expected_value", - ( - ("availability-zone", "phx-ad-3"), - ("launch-index", 0), - ("local-hostname", "instance-20200320-1400"), - ( - "instance-id", - "ocid1.instance.oc1.phx" - ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED", - ), - ("name", "instance-20200320-1400"), - ( - "public_keys", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated", - ), - ), - ) - def test_metadata_keys_set_correctly( - self, keyname, expected_value, parameterized_oracle_ds, - ): - assert parameterized_oracle_ds._get_data() - assert expected_value == parameterized_oracle_ds.metadata[keyname] - - @pytest.mark.parametrize( - "attribute_name,expected_value", - [ - ("_crawled_metadata", json.loads(OPC_V2_METADATA)), - ( - "userdata_raw", - base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"), - ), - ("system_uuid", "my-test-uuid"), - ], - ) - @mock.patch( - DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid") - ) - def test_attributes_set_correctly( - self, attribute_name, expected_value, parameterized_oracle_ds, - ): - assert parameterized_oracle_ds._get_data() - assert expected_value == getattr( - parameterized_oracle_ds, attribute_name - ) - - @pytest.mark.parametrize( - "ssh_keys,expected_value", - [ - # No SSH keys in metadata => no keys detected - (None, []), - # Empty SSH keys in metadata => no keys detected - ("", []), - # Single SSH key in metadata => single key detected - ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]), - # Multiple SSH keys in metadata => multiple keys detected - ( - "ssh-rsa ... test@test\nssh-rsa ... test2@test2", - ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"], - ), - ], - ) - def test_public_keys_handled_correctly( - self, ssh_keys, expected_value, parameterized_oracle_ds - ): - instance_data = json.loads(OPC_V1_METADATA) - if ssh_keys is None: - del instance_data["metadata"]["ssh_authorized_keys"] - else: - instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys - metadata = OpcMetadata(None, instance_data, None) - with mock.patch( - DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), - ): - assert parameterized_oracle_ds._get_data() - assert ( - expected_value == parameterized_oracle_ds.get_public_ssh_keys() - ) - - def test_missing_user_data_handled_gracefully( - self, parameterized_oracle_ds - ): - instance_data = json.loads(OPC_V1_METADATA) - del instance_data["metadata"]["user_data"] - metadata = OpcMetadata(None, instance_data, None) - with mock.patch( - DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), - ): - assert parameterized_oracle_ds._get_data() - - assert parameterized_oracle_ds.userdata_raw is None - - def test_missing_metadata_handled_gracefully( - self, parameterized_oracle_ds - ): - instance_data = json.loads(OPC_V1_METADATA) - del instance_data["metadata"] - metadata = OpcMetadata(None, instance_data, None) - with mock.patch( - DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), - ): - assert parameterized_oracle_ds._get_data() - - assert parameterized_oracle_ds.userdata_raw is None - assert [] == parameterized_oracle_ds.get_public_ssh_keys() - - -@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False) -class TestNonIscsiRoot_GetDataBehaviour: - @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4") - @mock.patch(DS_PATH + ".net.find_fallback_nic") - def test_read_opc_metadata_called_with_ephemeral_dhcp( - self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds - ): - in_context_manager = False - - def enter_context_manager(): - nonlocal in_context_manager - in_context_manager = True - - def exit_context_manager(*args): - nonlocal in_context_manager - in_context_manager = False - - m_EphemeralDHCPv4.return_value.__enter__.side_effect = ( - enter_context_manager - ) - m_EphemeralDHCPv4.return_value.__exit__.side_effect = ( - exit_context_manager - ) - - def assert_in_context_manager(**kwargs): - assert in_context_manager - return mock.MagicMock() - - with mock.patch( - DS_PATH + ".read_opc_metadata", - mock.Mock(side_effect=assert_in_context_manager), - ): - assert oracle_ds._get_data() - - assert [ - mock.call( - iface=m_find_fallback_nic.return_value, - connectivity_url_data={ - 'headers': { - 'Authorization': 'Bearer Oracle' - }, - 'url': 'http://169.254.169.254/opc/v2/instance/' - } - ) - ] == m_EphemeralDHCPv4.call_args_list - - -@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {}) -@mock.patch(DS_PATH + ".cmdline.read_initramfs_config") -class TestNetworkConfig: - def test_network_config_cached(self, m_read_initramfs_config, oracle_ds): - """.network_config should be cached""" - assert 0 == m_read_initramfs_config.call_count - oracle_ds.network_config # pylint: disable=pointless-statement - assert 1 == m_read_initramfs_config.call_count - oracle_ds.network_config # pylint: disable=pointless-statement - assert 1 == m_read_initramfs_config.call_count - - def test_network_cmdline(self, m_read_initramfs_config, oracle_ds): - """network_config should prefer initramfs config over fallback""" - ncfg = {"version": 1, "config": [{"a": "b"}]} - m_read_initramfs_config.return_value = copy.deepcopy(ncfg) - - assert ncfg == oracle_ds.network_config - assert 0 == oracle_ds.distro.generate_fallback_config.call_count - - def test_network_fallback(self, m_read_initramfs_config, oracle_ds): - """network_config should prefer initramfs config over fallback""" - ncfg = {"version": 1, "config": [{"a": "b"}]} - - m_read_initramfs_config.return_value = None - oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy( - ncfg - ) - - assert ncfg == oracle_ds.network_config - - @pytest.mark.parametrize( - "configure_secondary_nics,expect_secondary_nics", - [(True, True), (False, False), (None, False)], - ) - def test_secondary_nic_addition( - self, - m_read_initramfs_config, - configure_secondary_nics, - expect_secondary_nics, - oracle_ds, - ): - """Test that _add_network_config_from_opc_imds is called as expected - - (configure_secondary_nics=None is used to test the default behaviour.) - """ - m_read_initramfs_config.return_value = {"version": 1, "config": []} - - if configure_secondary_nics is not None: - oracle_ds.ds_cfg[ - "configure_secondary_nics" - ] = configure_secondary_nics - - def side_effect(self): - self._network_config["secondary_added"] = mock.sentinel.needle - - oracle_ds._vnics_data = 'DummyData' - with mock.patch.object( - oracle.DataSourceOracle, "_add_network_config_from_opc_imds", - new=side_effect, - ): - was_secondary_added = "secondary_added" in oracle_ds.network_config - assert expect_secondary_nics == was_secondary_added - - def test_secondary_nic_failure_isnt_blocking( - self, - m_read_initramfs_config, - caplog, - oracle_ds, - ): - oracle_ds.ds_cfg["configure_secondary_nics"] = True - oracle_ds._vnics_data = "DummyData" - - with mock.patch.object( - oracle.DataSourceOracle, "_add_network_config_from_opc_imds", - side_effect=Exception() - ): - network_config = oracle_ds.network_config - assert network_config == m_read_initramfs_config.return_value - assert "Failed to parse secondary network configuration" in caplog.text - - def test_ds_network_cfg_preferred_over_initramfs(self, _m): - """Ensure that DS net config is preferred over initramfs config""" - config_sources = oracle.DataSourceOracle.network_config_sources - ds_idx = config_sources.index(NetworkConfigSource.ds) - initramfs_idx = config_sources.index(NetworkConfigSource.initramfs) - assert ds_idx < initramfs_idx - - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/__init__.py b/cloudinit/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py deleted file mode 100644 index ccd56793..00000000 --- a/cloudinit/tests/helpers.py +++ /dev/null @@ -1,507 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import functools -import httpretty -import io -import logging -import os -import random -import shutil -import string -import sys -import tempfile -import time -import unittest -from contextlib import ExitStack, contextmanager -from unittest import mock -from unittest.util import strclass - -from cloudinit.config.schema import ( - SchemaValidationError, validate_cloudconfig_schema) -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers as ch -from cloudinit.sources import DataSourceNone -from cloudinit.templater import JINJA_AVAILABLE -from cloudinit import subp -from cloudinit import util - -_real_subp = subp.subp - -# Used for skipping tests -SkipTest = unittest.SkipTest -skipIf = unittest.skipIf - - -# Makes the old path start -# with new base instead of whatever -# it previously had -def rebase_path(old_path, new_base): - if old_path.startswith(new_base): - # Already handled... - return old_path - # Retarget the base of that path - # to the new base instead of the - # old one... - path = os.path.join(new_base, old_path.lstrip("/")) - path = os.path.abspath(path) - return path - - -# Can work on anything that takes a path as arguments -def retarget_many_wrapper(new_base, am, old_func): - def wrapper(*args, **kwds): - n_args = list(args) - nam = am - if am == -1: - nam = len(n_args) - for i in range(0, nam): - path = args[i] - # patchOS() wraps various os and os.path functions, however in - # Python 3 some of these now accept file-descriptors (integers). - # That breaks rebase_path() so in lieu of a better solution, just - # don't rebase if we get a fd. - if isinstance(path, str): - n_args[i] = rebase_path(path, new_base) - return old_func(*n_args, **kwds) - return wrapper - - -class TestCase(unittest.TestCase): - - def reset_global_state(self): - """Reset any global state to its original settings. - - cloudinit caches some values in cloudinit.util. Unit tests that - involved those cached paths were then subject to failure if the order - of invocation changed (LP: #1703697). - - This function resets any of these global state variables to their - initial state. - - In the future this should really be done with some registry that - can then be cleaned in a more obvious way. - """ - util.PROC_CMDLINE = None - util._DNS_REDIRECT_IP = None - util._LSB_RELEASE = {} - - def setUp(self): - super(TestCase, self).setUp() - self.reset_global_state() - - def shortDescription(self): - return strclass(self.__class__) + '.' + self._testMethodName - - def add_patch(self, target, attr, *args, **kwargs): - """Patches specified target object and sets it as attr on test - instance also schedules cleanup""" - if 'autospec' not in kwargs: - kwargs['autospec'] = True - m = mock.patch(target, *args, **kwargs) - p = m.start() - self.addCleanup(m.stop) - setattr(self, attr, p) - - -class CiTestCase(TestCase): - """This is the preferred test case base class unless user - needs other test case classes below.""" - - # Subclass overrides for specific test behavior - # Whether or not a unit test needs logfile setup - with_logs = False - allowed_subp = False - SUBP_SHELL_TRUE = "shell=true" - - @contextmanager - def allow_subp(self, allowed_subp): - orig = self.allowed_subp - try: - self.allowed_subp = allowed_subp - yield - finally: - self.allowed_subp = orig - - def setUp(self): - super(CiTestCase, self).setUp() - if self.with_logs: - # Create a log handler so unit tests can search expected logs. - self.logger = logging.getLogger() - self.logs = io.StringIO() - formatter = logging.Formatter('%(levelname)s: %(message)s') - handler = logging.StreamHandler(self.logs) - handler.setFormatter(formatter) - self.old_handlers = self.logger.handlers - self.logger.handlers = [handler] - if self.allowed_subp is True: - subp.subp = _real_subp - else: - subp.subp = self._fake_subp - - def _fake_subp(self, *args, **kwargs): - if 'args' in kwargs: - cmd = kwargs['args'] - else: - if not args: - raise TypeError( - "subp() missing 1 required positional argument: 'args'") - cmd = args[0] - - if not isinstance(cmd, str): - cmd = cmd[0] - pass_through = False - if not isinstance(self.allowed_subp, (list, bool)): - raise TypeError("self.allowed_subp supports list or bool.") - if isinstance(self.allowed_subp, bool): - pass_through = self.allowed_subp - else: - pass_through = ( - (cmd in self.allowed_subp) or - (self.SUBP_SHELL_TRUE in self.allowed_subp and - kwargs.get('shell'))) - if pass_through: - return _real_subp(*args, **kwargs) - raise Exception( - "called subp. set self.allowed_subp=True to allow\n subp(%s)" % - ', '.join([str(repr(a)) for a in args] + - ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()])) - - def tearDown(self): - if self.with_logs: - # Remove the handler we setup - logging.getLogger().handlers = self.old_handlers - logging.getLogger().setLevel(logging.NOTSET) - subp.subp = _real_subp - super(CiTestCase, self).tearDown() - - def tmp_dir(self, dir=None, cleanup=True): - # return a full path to a temporary directory that will be cleaned up. - if dir is None: - tmpd = tempfile.mkdtemp( - prefix="ci-%s." % self.__class__.__name__) - else: - tmpd = tempfile.mkdtemp(dir=dir) - self.addCleanup( - functools.partial(shutil.rmtree, tmpd, ignore_errors=True)) - return tmpd - - def tmp_path(self, path, dir=None): - # return an absolute path to 'path' under dir. - # if dir is None, one will be created with tmp_dir() - # the file is not created or modified. - if dir is None: - dir = self.tmp_dir() - return os.path.normpath(os.path.abspath(os.path.join(dir, path))) - - def tmp_cloud(self, distro, sys_cfg=None, metadata=None): - """Create a cloud with tmp working directory paths. - - @param distro: Name of the distro to attach to the cloud. - @param metadata: Optional metadata to set on the datasource. - - @return: The built cloud instance. - """ - self.new_root = self.tmp_dir() - if not sys_cfg: - sys_cfg = {} - tmp_paths = {} - for var in ['templates_dir', 'run_dir', 'cloud_dir']: - tmp_paths[var] = self.tmp_path(var, dir=self.new_root) - util.ensure_dir(tmp_paths[var]) - self.paths = ch.Paths(tmp_paths) - cls = distros.fetch(distro) - mydist = cls(distro, sys_cfg, self.paths) - myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths) - if metadata: - myds.metadata.update(metadata) - return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None) - - @classmethod - def random_string(cls, length=8): - """ return a random lowercase string with default length of 8""" - return ''.join( - random.choice(string.ascii_lowercase) for _ in range(length)) - - -class ResourceUsingTestCase(CiTestCase): - - def setUp(self): - super(ResourceUsingTestCase, self).setUp() - self.resource_path = None - - def getCloudPaths(self, ds=None): - tmpdir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmpdir) - cp = ch.Paths({'cloud_dir': tmpdir, - 'templates_dir': resourceLocation()}, - ds=ds) - return cp - - -class FilesystemMockingTestCase(ResourceUsingTestCase): - - def setUp(self): - super(FilesystemMockingTestCase, self).setUp() - self.patched_funcs = ExitStack() - - def tearDown(self): - self.patched_funcs.close() - ResourceUsingTestCase.tearDown(self) - - def replicateTestRoot(self, example_root, target_root): - real_root = resourceLocation() - real_root = os.path.join(real_root, 'roots', example_root) - for (dir_path, _dirnames, filenames) in os.walk(real_root): - real_path = dir_path - make_path = rebase_path(real_path[len(real_root):], target_root) - util.ensure_dir(make_path) - for f in filenames: - real_path = util.abs_join(real_path, f) - make_path = util.abs_join(make_path, f) - shutil.copy(real_path, make_path) - - def patchUtils(self, new_root): - patch_funcs = { - util: [('write_file', 1), - ('append_file', 1), - ('load_file', 1), - ('ensure_dir', 1), - ('chmod', 1), - ('delete_dir_contents', 1), - ('del_file', 1), - ('sym_link', -1), - ('copy', -1)], - } - for (mod, funcs) in patch_funcs.items(): - for (f, am) in funcs: - func = getattr(mod, f) - trap_func = retarget_many_wrapper(new_root, am, func) - self.patched_funcs.enter_context( - mock.patch.object(mod, f, trap_func)) - - # Handle subprocess calls - func = getattr(subp, 'subp') - - def nsubp(*_args, **_kwargs): - return ('', '') - - self.patched_funcs.enter_context( - mock.patch.object(subp, 'subp', nsubp)) - - def null_func(*_args, **_kwargs): - return None - - for f in ['chownbyid', 'chownbyname']: - self.patched_funcs.enter_context( - mock.patch.object(util, f, null_func)) - - def patchOS(self, new_root): - patch_funcs = { - os.path: [('isfile', 1), ('exists', 1), - ('islink', 1), ('isdir', 1), ('lexists', 1)], - os: [('listdir', 1), ('mkdir', 1), - ('lstat', 1), ('symlink', 2), - ('stat', 1)] - } - - if hasattr(os, 'scandir'): - # py27 does not have scandir - patch_funcs[os].append(('scandir', 1)) - - for (mod, funcs) in patch_funcs.items(): - for f, nargs in funcs: - func = getattr(mod, f) - trap_func = retarget_many_wrapper(new_root, nargs, func) - self.patched_funcs.enter_context( - mock.patch.object(mod, f, trap_func)) - - def patchOpen(self, new_root): - trap_func = retarget_many_wrapper(new_root, 1, open) - self.patched_funcs.enter_context( - mock.patch('builtins.open', trap_func) - ) - - def patchStdoutAndStderr(self, stdout=None, stderr=None): - if stdout is not None: - self.patched_funcs.enter_context( - mock.patch.object(sys, 'stdout', stdout)) - if stderr is not None: - self.patched_funcs.enter_context( - mock.patch.object(sys, 'stderr', stderr)) - - def reRoot(self, root=None): - if root is None: - root = self.tmp_dir() - self.patchUtils(root) - self.patchOS(root) - self.patchOpen(root) - return root - - @contextmanager - def reRooted(self, root=None): - try: - yield self.reRoot(root) - finally: - self.patched_funcs.close() - - -class HttprettyTestCase(CiTestCase): - # necessary as http_proxy gets in the way of httpretty - # https://github.com/gabrielfalcao/HTTPretty/issues/122 - # Also make sure that allow_net_connect is set to False. - # And make sure reset and enable/disable are done. - - def setUp(self): - self.restore_proxy = os.environ.get('http_proxy') - if self.restore_proxy is not None: - del os.environ['http_proxy'] - super(HttprettyTestCase, self).setUp() - httpretty.HTTPretty.allow_net_connect = False - httpretty.reset() - httpretty.enable() - # Stop the logging from HttpPretty so our logs don't get mixed - # up with its logs - logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) - - def tearDown(self): - httpretty.disable() - httpretty.reset() - if self.restore_proxy: - os.environ['http_proxy'] = self.restore_proxy - super(HttprettyTestCase, self).tearDown() - - -class SchemaTestCaseMixin(unittest.TestCase): - - def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."): - """Assert the config is valid per self.schema. - - If there is only one top level key in the schema properties, then - the cfg will be put under that key.""" - props = list(self.schema.get('properties')) - # put cfg under top level key if there is only one in the schema - if len(props) == 1: - cfg = {props[0]: cfg} - try: - validate_cloudconfig_schema(cfg, self.schema, strict=True) - except SchemaValidationError: - self.fail(msg) - - -def populate_dir(path, files): - if not os.path.exists(path): - os.makedirs(path) - ret = [] - for (name, content) in files.items(): - p = os.path.sep.join([path, name]) - util.ensure_dir(os.path.dirname(p)) - with open(p, "wb") as fp: - if isinstance(content, bytes): - fp.write(content) - else: - fp.write(content.encode('utf-8')) - fp.close() - ret.append(p) - - return ret - - -def populate_dir_with_ts(path, data): - """data is {'file': ('contents', mtime)}. mtime relative to now.""" - populate_dir(path, dict((k, v[0]) for k, v in data.items())) - btime = time.time() - for fpath, (_contents, mtime) in data.items(): - ts = btime + mtime if mtime else btime - os.utime(os.path.sep.join((path, fpath)), (ts, ts)) - - -def dir2dict(startdir, prefix=None): - flist = {} - if prefix is None: - prefix = startdir - for root, _dirs, files in os.walk(startdir): - for fname in files: - fpath = os.path.join(root, fname) - key = fpath[len(prefix):] - flist[key] = util.load_file(fpath) - return flist - - -def wrap_and_call(prefix, mocks, func, *args, **kwargs): - """ - call func(args, **kwargs) with mocks applied, then unapplies mocks - nicer to read than repeating dectorators on each function - - prefix: prefix for mock names (e.g. 'cloudinit.stages.util') or None - mocks: dictionary of names (under 'prefix') to mock and either - a return value or a dictionary to pass to the mock.patch call - func: function to call with mocks applied - *args,**kwargs: arguments for 'func' - - return_value: return from 'func' - """ - delim = '.' - if prefix is None: - prefix = '' - prefix = prefix.rstrip(delim) - unwraps = [] - for fname, kw in mocks.items(): - if prefix: - fname = delim.join((prefix, fname)) - if not isinstance(kw, dict): - kw = {'return_value': kw} - p = mock.patch(fname, **kw) - p.start() - unwraps.append(p) - try: - return func(*args, **kwargs) - finally: - for p in unwraps: - p.stop() - - -def resourceLocation(subname=None): - path = os.path.join('tests', 'data') - if not subname: - return path - return os.path.join(path, subname) - - -def readResource(name, mode='r'): - with open(resourceLocation(name), mode) as fh: - return fh.read() - - -try: - import jsonschema - assert jsonschema # avoid pyflakes error F401: import unused - _missing_jsonschema_dep = False -except ImportError: - _missing_jsonschema_dep = True - - -def skipUnlessJsonSchema(): - return skipIf( - _missing_jsonschema_dep, "No python-jsonschema dependency present.") - - -def skipUnlessJinja(): - return skipIf(not JINJA_AVAILABLE, "No jinja dependency present.") - - -def skipIfJinja(): - return skipIf(JINJA_AVAILABLE, "Jinja dependency present.") - - -# older versions of mock do not have the useful 'assert_not_called' -if not hasattr(mock.Mock, 'assert_not_called'): - def __mock_assert_not_called(mmock): - if mmock.call_count != 0: - msg = ("[citest] Expected '%s' to not have been called. " - "Called %s times." % - (mmock._mock_name or 'mock', mmock.call_count)) - raise AssertionError(msg) - mock.Mock.assert_not_called = __mock_assert_not_called - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_conftest.py b/cloudinit/tests/test_conftest.py deleted file mode 100644 index 6f1263a5..00000000 --- a/cloudinit/tests/test_conftest.py +++ /dev/null @@ -1,65 +0,0 @@ -import pytest - -from cloudinit import subp -from cloudinit.tests.helpers import CiTestCase - - -class TestDisableSubpUsage: - """Test that the disable_subp_usage fixture behaves as expected.""" - - def test_using_subp_raises_assertion_error(self): - with pytest.raises(AssertionError): - subp.subp(["some", "args"]) - - def test_typeerrors_on_incorrect_usage(self): - with pytest.raises(TypeError): - # We are intentionally passing no value for a parameter, so: - # pylint: disable=no-value-for-parameter - subp.subp() - - @pytest.mark.allow_all_subp - def test_subp_usage_can_be_reenabled(self): - subp.subp(['whoami']) - - @pytest.mark.allow_subp_for("whoami") - def test_subp_usage_can_be_conditionally_reenabled(self): - # The two parameters test each potential invocation with a single - # argument - with pytest.raises(AssertionError) as excinfo: - subp.subp(["some", "args"]) - assert "allowed: whoami" in str(excinfo.value) - subp.subp(['whoami']) - - @pytest.mark.allow_subp_for("whoami", "bash") - def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self): - with pytest.raises(AssertionError) as excinfo: - subp.subp(["some", "args"]) - assert "allowed: whoami,bash" in str(excinfo.value) - subp.subp(['bash', '-c', 'true']) - subp.subp(['whoami']) - - @pytest.mark.allow_all_subp - @pytest.mark.allow_subp_for("bash") - def test_both_marks_raise_an_error(self): - with pytest.raises(AssertionError, match="marked both"): - subp.subp(["bash"]) - - -class TestDisableSubpUsageInTestSubclass(CiTestCase): - """Test that disable_subp_usage doesn't impact CiTestCase's subp logic.""" - - def test_using_subp_raises_exception(self): - with pytest.raises(Exception): - subp.subp(["some", "args"]) - - def test_typeerrors_on_incorrect_usage(self): - with pytest.raises(TypeError): - subp.subp() - - def test_subp_usage_can_be_reenabled(self): - _old_allowed_subp = self.allow_subp - self.allowed_subp = True - try: - subp.subp(['bash', '-c', 'true']) - finally: - self.allowed_subp = _old_allowed_subp diff --git a/cloudinit/tests/test_dhclient_hook.py b/cloudinit/tests/test_dhclient_hook.py deleted file mode 100644 index eadae81c..00000000 --- a/cloudinit/tests/test_dhclient_hook.py +++ /dev/null @@ -1,105 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for cloudinit.dhclient_hook.""" - -from cloudinit import dhclient_hook as dhc -from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir - -import argparse -import json -import os -from unittest import mock - - -class TestDhclientHook(CiTestCase): - - ex_env = { - 'interface': 'eth0', - 'new_dhcp_lease_time': '3600', - 'new_host_name': 'x1', - 'new_ip_address': '10.145.210.163', - 'new_subnet_mask': '255.255.255.0', - 'old_host_name': 'x1', - 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', - 'pid': '614', - 'reason': 'BOUND', - } - - # some older versions of dhclient put the same content, - # but in upper case with DHCP4_ instead of new_ - ex_env_dhcp4 = { - 'REASON': 'BOUND', - 'DHCP4_dhcp_lease_time': '3600', - 'DHCP4_host_name': 'x1', - 'DHCP4_ip_address': '10.145.210.163', - 'DHCP4_subnet_mask': '255.255.255.0', - 'INTERFACE': 'eth0', - 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', - 'pid': '614', - } - - expected = { - 'dhcp_lease_time': '3600', - 'host_name': 'x1', - 'ip_address': '10.145.210.163', - 'subnet_mask': '255.255.255.0'} - - def setUp(self): - super(TestDhclientHook, self).setUp() - self.tmp = self.tmp_dir() - - def test_handle_args(self): - """quick test of call to handle_args.""" - nic = 'eth0' - args = argparse.Namespace(event=dhc.UP, interface=nic) - with mock.patch.dict("os.environ", clear=True, values=self.ex_env): - dhc.handle_args(dhc.NAME, args, data_d=self.tmp) - found = dir2dict(self.tmp + os.path.sep) - self.assertEqual([nic + ".json"], list(found.keys())) - self.assertEqual(self.expected, json.loads(found[nic + ".json"])) - - def test_run_hook_up_creates_dir(self): - """If dir does not exist, run_hook should create it.""" - subd = self.tmp_path("subdir", self.tmp) - nic = 'eth1' - dhc.run_hook(nic, 'up', data_d=subd, env=self.ex_env) - self.assertEqual( - set([nic + ".json"]), set(dir2dict(subd + os.path.sep))) - - def test_run_hook_up(self): - """Test expected use of run_hook_up.""" - nic = 'eth0' - dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env) - found = dir2dict(self.tmp + os.path.sep) - self.assertEqual([nic + ".json"], list(found.keys())) - self.assertEqual(self.expected, json.loads(found[nic + ".json"])) - - def test_run_hook_up_dhcp4_prefix(self): - """Test run_hook filters correctly with older DHCP4_ data.""" - nic = 'eth0' - dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env_dhcp4) - found = dir2dict(self.tmp + os.path.sep) - self.assertEqual([nic + ".json"], list(found.keys())) - self.assertEqual(self.expected, json.loads(found[nic + ".json"])) - - def test_run_hook_down_deletes(self): - """down should delete the created json file.""" - nic = 'eth1' - populate_dir( - self.tmp, {nic + ".json": "{'abcd'}", 'myfile.txt': 'text'}) - dhc.run_hook(nic, 'down', data_d=self.tmp, env={'old_host_name': 'x1'}) - self.assertEqual( - set(['myfile.txt']), - set(dir2dict(self.tmp + os.path.sep))) - - def test_get_parser(self): - """Smoke test creation of get_parser.""" - # cloud-init main uses 'action'. - event, interface = (dhc.UP, 'mynic0') - self.assertEqual( - argparse.Namespace(event=event, interface=interface, - action=(dhc.NAME, dhc.handle_args)), - dhc.get_parser().parse_args([event, interface])) - - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_dmi.py b/cloudinit/tests/test_dmi.py deleted file mode 100644 index 78a72122..00000000 --- a/cloudinit/tests/test_dmi.py +++ /dev/null @@ -1,154 +0,0 @@ -from cloudinit.tests import helpers -from cloudinit import dmi -from cloudinit import util -from cloudinit import subp - -import os -import tempfile -import shutil -from unittest import mock - - -class TestReadDMIData(helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestReadDMIData, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - self.reRoot(self.new_root) - p = mock.patch("cloudinit.dmi.is_container", return_value=False) - self.addCleanup(p.stop) - self._m_is_container = p.start() - p = mock.patch("cloudinit.dmi.is_FreeBSD", return_value=False) - self.addCleanup(p.stop) - self._m_is_FreeBSD = p.start() - - def _create_sysfs_parent_directory(self): - util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id')) - - def _create_sysfs_file(self, key, content): - """Mocks the sys path found on Linux systems.""" - self._create_sysfs_parent_directory() - dmi_key = "/sys/class/dmi/id/{0}".format(key) - util.write_file(dmi_key, content) - - def _configure_dmidecode_return(self, key, content, error=None): - """ - In order to test a missing sys path and call outs to dmidecode, this - function fakes the results of dmidecode to test the results. - """ - def _dmidecode_subp(cmd): - if cmd[-1] != key: - raise subp.ProcessExecutionError() - return (content, error) - - self.patched_funcs.enter_context( - mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True)) - self.patched_funcs.enter_context( - mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp)) - - def _configure_kenv_return(self, key, content, error=None): - """ - In order to test a FreeBSD system call outs to kenv, this - function fakes the results of kenv to test the results. - """ - def _kenv_subp(cmd): - if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].freebsd: - raise subp.ProcessExecutionError() - return (content, error) - - self.patched_funcs.enter_context( - mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp)) - - def patch_mapping(self, new_mapping): - self.patched_funcs.enter_context( - mock.patch('cloudinit.dmi.DMIDECODE_TO_KERNEL', - new_mapping)) - - def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): - self.patch_mapping({'mapped-key': dmi.kdmi('mapped-value', None)}) - expected_dmi_value = 'sys-used-correctly' - self._create_sysfs_file('mapped-value', expected_dmi_value) - self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong') - self.assertEqual(expected_dmi_value, dmi.read_dmi_data('mapped-key')) - - def test_dmidecode_used_if_no_sysfs_file_on_disk(self): - self.patch_mapping({}) - self._create_sysfs_parent_directory() - expected_dmi_value = 'dmidecode-used' - self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) - with mock.patch("cloudinit.util.os.uname") as m_uname: - m_uname.return_value = ('x-sysname', 'x-nodename', - 'x-release', 'x-version', 'x86_64') - self.assertEqual(expected_dmi_value, - dmi.read_dmi_data('use-dmidecode')) - - def test_dmidecode_not_used_on_arm(self): - self.patch_mapping({}) - print("current =%s", subp) - self._create_sysfs_parent_directory() - dmi_val = 'from-dmidecode' - dmi_name = 'use-dmidecode' - self._configure_dmidecode_return(dmi_name, dmi_val) - print("now =%s", subp) - - expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val} - found = {} - # we do not run the 'dmi-decode' binary on some arches - # verify that anything requested that is not in the sysfs dir - # will return None on those arches. - with mock.patch("cloudinit.util.os.uname") as m_uname: - for arch in expected: - m_uname.return_value = ('x-sysname', 'x-nodename', - 'x-release', 'x-version', arch) - print("now2 =%s", subp) - found[arch] = dmi.read_dmi_data(dmi_name) - self.assertEqual(expected, found) - - def test_none_returned_if_neither_source_has_data(self): - self.patch_mapping({}) - self._configure_dmidecode_return('key', 'value') - self.assertIsNone(dmi.read_dmi_data('expect-fail')) - - def test_none_returned_if_dmidecode_not_in_path(self): - self.patched_funcs.enter_context( - mock.patch.object(subp, 'which', lambda _: False)) - self.patch_mapping({}) - self.assertIsNone(dmi.read_dmi_data('expect-fail')) - - def test_empty_string_returned_instead_of_foxfox(self): - # uninitialized dmi values show as \xff, return empty string - my_len = 32 - dmi_value = b'\xff' * my_len + b'\n' - expected = "" - dmi_key = 'system-product-name' - sysfs_key = 'product_name' - self._create_sysfs_file(sysfs_key, dmi_value) - self.assertEqual(expected, dmi.read_dmi_data(dmi_key)) - - def test_container_returns_none(self): - """In a container read_dmi_data should always return None.""" - - # first verify we get the value if not in container - self._m_is_container.return_value = False - key, val = ("system-product-name", "my_product") - self._create_sysfs_file('product_name', val) - self.assertEqual(val, dmi.read_dmi_data(key)) - - # then verify in container returns None - self._m_is_container.return_value = True - self.assertIsNone(dmi.read_dmi_data(key)) - - def test_container_returns_none_on_unknown(self): - """In a container even bogus keys return None.""" - self._m_is_container.return_value = True - self._create_sysfs_file('product_name', "should-be-ignored") - self.assertIsNone(dmi.read_dmi_data("bogus")) - self.assertIsNone(dmi.read_dmi_data("system-product-name")) - - def test_freebsd_uses_kenv(self): - """On a FreeBSD system, kenv is called.""" - self._m_is_FreeBSD.return_value = True - key, val = ("system-product-name", "my_product") - self._configure_kenv_return(key, val) - self.assertEqual(dmi.read_dmi_data(key), val) diff --git a/cloudinit/tests/test_event.py b/cloudinit/tests/test_event.py deleted file mode 100644 index 3da4c70c..00000000 --- a/cloudinit/tests/test_event.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -"""Tests related to cloudinit.event module.""" -from cloudinit.event import EventType, EventScope, userdata_to_events - - -class TestEvent: - def test_userdata_to_events(self): - userdata = {'network': {'when': ['boot']}} - expected = {EventScope.NETWORK: {EventType.BOOT}} - assert expected == userdata_to_events(userdata) - - def test_invalid_scope(self, caplog): - userdata = {'networkasdfasdf': {'when': ['boot']}} - userdata_to_events(userdata) - assert ( - "'networkasdfasdf' is not a valid EventScope! Update data " - "will be ignored for 'networkasdfasdf' scope" - ) in caplog.text - - def test_invalid_event(self, caplog): - userdata = {'network': {'when': ['bootasdfasdf']}} - userdata_to_events(userdata) - assert ( - "'bootasdfasdf' is not a valid EventType! Update data " - "will be ignored for 'network' scope" - ) in caplog.text diff --git a/cloudinit/tests/test_features.py b/cloudinit/tests/test_features.py deleted file mode 100644 index d7a7226d..00000000 --- a/cloudinit/tests/test_features.py +++ /dev/null @@ -1,60 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -# pylint: disable=no-member,no-name-in-module -""" -This file is for testing the feature flag functionality itself, -NOT for testing any individual feature flag -""" -import pytest -import sys -from pathlib import Path - -import cloudinit - - -@pytest.yield_fixture() -def create_override(request): - """ - Create a feature overrides file and do some module wizardry to make - it seem like we're importing the features file for the first time. - - After creating the override file with the values passed by the test, - we need to reload cloudinit.features - to get all of the current features (including the overridden ones). - Once the test is complete, we remove the file we created and set - features and feature_overrides modules to how they were before - the test started - """ - override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py' - if override_path.exists(): - raise Exception("feature_overrides.py unexpectedly exists! " - "Remove it to run this test.") - with override_path.open('w') as f: - for key, value in request.param.items(): - f.write('{} = {}\n'.format(key, value)) - - sys.modules.pop('cloudinit.features', None) - - yield - - override_path.unlink() - sys.modules.pop('cloudinit.feature_overrides', None) - - -class TestFeatures: - def test_feature_without_override(self): - from cloudinit.features import ERROR_ON_USER_DATA_FAILURE - assert ERROR_ON_USER_DATA_FAILURE is True - - @pytest.mark.parametrize('create_override', - [{'ERROR_ON_USER_DATA_FAILURE': False}], - indirect=True) - def test_feature_with_override(self, create_override): - from cloudinit.features import ERROR_ON_USER_DATA_FAILURE - assert ERROR_ON_USER_DATA_FAILURE is False - - @pytest.mark.parametrize('create_override', - [{'SPAM': True}], - indirect=True) - def test_feature_only_in_override(self, create_override): - from cloudinit.features import SPAM - assert SPAM is True diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py deleted file mode 100644 index 311dfad6..00000000 --- a/cloudinit/tests/test_gpg.py +++ /dev/null @@ -1,55 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -"""Test gpg module.""" - -from unittest import mock - -from cloudinit import gpg -from cloudinit import subp -from cloudinit.tests.helpers import CiTestCase - - -@mock.patch("cloudinit.gpg.time.sleep") -@mock.patch("cloudinit.gpg.subp.subp") -class TestReceiveKeys(CiTestCase): - """Test the recv_key method.""" - - def test_retries_on_subp_exc(self, m_subp, m_sleep): - """retry should be done on gpg receive keys failure.""" - retries = (1, 2, 4) - my_exc = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) - m_subp.side_effect = (my_exc, my_exc, ('', '')) - gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) - self.assertEqual([mock.call(1), mock.call(2)], m_sleep.call_args_list) - - def test_raises_error_after_retries(self, m_subp, m_sleep): - """If the final run fails, error should be raised.""" - naplen = 1 - keyid, keyserver = ("ABCD", "keyserver.example.com") - m_subp.side_effect = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) - with self.assertRaises(ValueError) as rcm: - gpg.recv_key(keyid, keyserver, retries=(naplen,)) - self.assertIn(keyid, str(rcm.exception)) - self.assertIn(keyserver, str(rcm.exception)) - m_sleep.assert_called_with(naplen) - - def test_no_retries_on_none(self, m_subp, m_sleep): - """retry should not be done if retries is None.""" - m_subp.side_effect = subp.ProcessExecutionError( - stdout='', stderr='', exit_code=2, cmd=['mycmd']) - with self.assertRaises(ValueError): - gpg.recv_key("ABCD", "keyserver.example.com", retries=None) - m_sleep.assert_not_called() - - def test_expected_gpg_command(self, m_subp, m_sleep): - """Verify gpg is called with expected args.""" - key, keyserver = ("DEADBEEF", "keyserver.example.com") - retries = (1, 2, 4) - m_subp.return_value = ('', '') - gpg.recv_key(key, keyserver, retries=retries) - m_subp.assert_called_once_with( - ['gpg', '--no-tty', - '--keyserver=%s' % keyserver, '--recv-keys', key], - capture=True) - m_sleep.assert_not_called() diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py deleted file mode 100644 index e44b16d8..00000000 --- a/cloudinit/tests/test_netinfo.py +++ /dev/null @@ -1,181 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests netinfo module functions and classes.""" - -from copy import copy - -from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat -from cloudinit.tests.helpers import CiTestCase, mock, readResource - - -# Example ifconfig and route output -SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") -SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") -SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") -SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") -SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") -SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") -SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") -SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") -NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") -ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") -FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") - - -class TestNetInfo(CiTestCase): - - maxDiff = None - with_logs = True - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_netdev_old_nettools_pformat(self, m_subp, m_which): - """netdev_pformat properly rendering old nettools info.""" - m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '') - m_which.side_effect = lambda x: x if x == 'ifconfig' else None - content = netdev_pformat() - self.assertEqual(NETDEV_FORMATTED_OUT, content) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_netdev_new_nettools_pformat(self, m_subp, m_which): - """netdev_pformat properly rendering netdev new nettools info.""" - m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '') - m_which.side_effect = lambda x: x if x == 'ifconfig' else None - content = netdev_pformat() - self.assertEqual(NETDEV_FORMATTED_OUT, content) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): - """netdev_pformat properly rendering netdev new nettools info.""" - m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') - m_which.side_effect = lambda x: x if x == 'ifconfig' else None - content = netdev_pformat() - print() - print(content) - print() - self.assertEqual(FREEBSD_NETDEV_OUT, content) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_netdev_iproute_pformat(self, m_subp, m_which): - """netdev_pformat properly rendering ip route info.""" - m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') - m_which.side_effect = lambda x: x if x == 'ip' else None - content = netdev_pformat() - new_output = copy(NETDEV_FORMATTED_OUT) - # ip route show describes global scopes on ipv4 addresses - # whereas ifconfig does not. Add proper global/host scope to output. - new_output = new_output.replace('| . | 50:7b', '| global | 50:7b') - new_output = new_output.replace( - '255.0.0.0 | . |', '255.0.0.0 | host |') - self.assertEqual(new_output, content) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_netdev_warn_on_missing_commands(self, m_subp, m_which): - """netdev_pformat warns when missing both ip and 'netstat'.""" - m_which.return_value = None # Niether ip nor netstat found - content = netdev_pformat() - self.assertEqual('\n', content) - self.assertEqual( - "WARNING: Could not print networks: missing 'ip' and 'ifconfig'" - " commands\n", - self.logs.getvalue()) - m_subp.assert_not_called() - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_netdev_info_nettools_down(self, m_subp, m_which): - """test netdev_info using nettools and down interfaces.""" - m_subp.return_value = ( - readResource("netinfo/new-ifconfig-output-down"), "") - m_which.side_effect = lambda x: x if x == 'ifconfig' else None - self.assertEqual( - {'eth0': {'ipv4': [], 'ipv6': [], - 'hwaddr': '00:16:3e:de:51:a6', 'up': False}, - 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}], - 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], - 'hwaddr': '.', 'up': True}}, - netdev_info(".")) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_netdev_info_iproute_down(self, m_subp, m_which): - """Test netdev_info with ip and down interfaces.""" - m_subp.return_value = ( - readResource("netinfo/sample-ipaddrshow-output-down"), "") - m_which.side_effect = lambda x: x if x == 'ip' else None - self.assertEqual( - {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.', - 'mask': '255.0.0.0', 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], - 'hwaddr': '.', 'up': True}, - 'eth0': {'ipv4': [], 'ipv6': [], - 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}, - netdev_info(".")) - - @mock.patch('cloudinit.netinfo.netdev_info') - def test_netdev_pformat_with_down(self, m_netdev_info): - """test netdev_pformat when netdev_info returns 'down' interfaces.""" - m_netdev_info.return_value = ( - {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0', - 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], - 'hwaddr': '.', 'up': True}, - 'eth0': {'ipv4': [], 'ipv6': [], - 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}) - self.assertEqual( - readResource("netinfo/netdev-formatted-output-down"), - netdev_pformat()) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_route_nettools_pformat(self, m_subp, m_which): - """route_pformat properly rendering nettools route info.""" - - def subp_netstat_route_selector(*args, **kwargs): - if args[0] == ['netstat', '--route', '--numeric', '--extend']: - return (SAMPLE_ROUTE_OUT_V4, '') - if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']: - return (SAMPLE_ROUTE_OUT_V6, '') - raise Exception('Unexpected subp call %s' % args[0]) - - m_subp.side_effect = subp_netstat_route_selector - m_which.side_effect = lambda x: x if x == 'netstat' else None - content = route_pformat() - self.assertEqual(ROUTE_FORMATTED_OUT, content) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_route_iproute_pformat(self, m_subp, m_which): - """route_pformat properly rendering ip route info.""" - - def subp_iproute_selector(*args, **kwargs): - if ['ip', '-o', 'route', 'list'] == args[0]: - return (SAMPLE_IPROUTE_OUT_V4, '') - v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all'] - if v6cmd == args[0]: - return (SAMPLE_IPROUTE_OUT_V6, '') - raise Exception('Unexpected subp call %s' % args[0]) - - m_subp.side_effect = subp_iproute_selector - m_which.side_effect = lambda x: x if x == 'ip' else None - content = route_pformat() - self.assertEqual(ROUTE_FORMATTED_OUT, content) - - @mock.patch('cloudinit.netinfo.subp.which') - @mock.patch('cloudinit.netinfo.subp.subp') - def test_route_warn_on_missing_commands(self, m_subp, m_which): - """route_pformat warns when missing both ip and 'netstat'.""" - m_which.return_value = None # Niether ip nor netstat found - content = route_pformat() - self.assertEqual('\n', content) - self.assertEqual( - "WARNING: Could not print routes: missing 'ip' and 'netstat'" - " commands\n", - self.logs.getvalue()) - m_subp.assert_not_called() - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_persistence.py b/cloudinit/tests/test_persistence.py deleted file mode 100644 index ec1152a9..00000000 --- a/cloudinit/tests/test_persistence.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2020 Canonical Ltd. -# -# Author: Daniel Watkins -# -# This file is part of cloud-init. See LICENSE file for license information. -""" -Tests for cloudinit.persistence. - -Per https://docs.python.org/3/library/pickle.html, only "classes that are -defined at the top level of a module" can be pickled. This means that all of -our ``CloudInitPickleMixin`` subclasses for testing must be defined at -module-level (rather than being defined inline or dynamically in the body of -test methods, as we would do without this constraint). - -``TestPickleMixin.test_subclasses`` iterates over a list of all of these -classes, and tests that they round-trip through a pickle dump/load. As the -interface we're testing is that ``_unpickle`` is called appropriately on -subclasses, our subclasses define their assertions in their ``_unpickle`` -implementation. (This means that the assertions will not be executed if -``_unpickle`` is not called at all; we have -``TestPickleMixin.test_unpickle_called`` to ensure it is called.) - -To avoid manually maintaining a list of classes for parametrization we use a -simple metaclass, ``_Collector``, to gather them up. -""" - -import pickle -from unittest import mock - -import pytest - -from cloudinit.persistence import CloudInitPickleMixin - - -class _Collector(type): - """Any class using this as a metaclass will be stored in test_classes.""" - - test_classes = [] - - def __new__(cls, *args): - new_cls = super().__new__(cls, *args) - _Collector.test_classes.append(new_cls) - return new_cls - - -class InstanceVersionNotUsed(CloudInitPickleMixin, metaclass=_Collector): - """Test that the class version is used over one set in instance state.""" - - _ci_pkl_version = 1 - - def __init__(self): - self._ci_pkl_version = 2 - - def _unpickle(self, ci_pkl_version: int) -> None: - assert 1 == ci_pkl_version - - -class MissingVersionHandled(CloudInitPickleMixin, metaclass=_Collector): - """Test that pickles without ``_ci_pkl_version`` are handled gracefully. - - This is tested by overriding ``__getstate__`` so the dumped pickle of this - class will not have ``_ci_pkl_version`` included. - """ - - def __getstate__(self): - return self.__dict__ - - def _unpickle(self, ci_pkl_version: int) -> None: - assert 0 == ci_pkl_version - - -class OverridenVersionHonored(CloudInitPickleMixin, metaclass=_Collector): - """Test that the subclass's version is used.""" - - _ci_pkl_version = 1 - - def _unpickle(self, ci_pkl_version: int) -> None: - assert 1 == ci_pkl_version - - -class StateIsRestored(CloudInitPickleMixin, metaclass=_Collector): - """Instance state should be restored before ``_unpickle`` is called.""" - - def __init__(self): - self.some_state = "some state" - - def _unpickle(self, ci_pkl_version: int) -> None: - assert "some state" == self.some_state - - -class UnpickleCanBeUnoverriden(CloudInitPickleMixin, metaclass=_Collector): - """Subclasses should not need to override ``_unpickle``.""" - - -class VersionDefaultsToZero(CloudInitPickleMixin, metaclass=_Collector): - """Test that the default version is 0.""" - - def _unpickle(self, ci_pkl_version: int) -> None: - assert 0 == ci_pkl_version - - -class VersionIsPoppedFromState(CloudInitPickleMixin, metaclass=_Collector): - """Test _ci_pkl_version is popped from state before being restored.""" - - def _unpickle(self, ci_pkl_version: int) -> None: - # `self._ci_pkl_version` returns the type's _ci_pkl_version if it isn't - # in instance state, so we need to explicitly check self.__dict__. - assert "_ci_pkl_version" not in self.__dict__ - - -class TestPickleMixin: - def test_unpickle_called(self): - """Test that self._unpickle is called on unpickle.""" - with mock.patch.object( - CloudInitPickleMixin, "_unpickle" - ) as m_unpickle: - pickle.loads(pickle.dumps(CloudInitPickleMixin())) - assert 1 == m_unpickle.call_count - - @pytest.mark.parametrize("cls", _Collector.test_classes) - def test_subclasses(self, cls): - """For each collected class, round-trip through pickle dump/load. - - Assertions are implemented in ``cls._unpickle``, and so are evoked as - part of the pickle load. - """ - pickle.loads(pickle.dumps(cls())) diff --git a/cloudinit/tests/test_simpletable.py b/cloudinit/tests/test_simpletable.py deleted file mode 100644 index a12a62a0..00000000 --- a/cloudinit/tests/test_simpletable.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (C) 2017 Amazon.com, Inc. or its affiliates -# -# Author: Andrew Jorgensen -# -# This file is part of cloud-init. See LICENSE file for license information. -"""Tests that SimpleTable works just like PrettyTable for cloud-init. - -Not all possible PrettyTable cases are tested because we're not trying to -reimplement the entire library, only the minimal parts we actually use. -""" - -from cloudinit.simpletable import SimpleTable -from cloudinit.tests.helpers import CiTestCase - -# Examples rendered by cloud-init using PrettyTable -NET_DEVICE_FIELDS = ( - 'Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address') -NET_DEVICE_ROWS = ( - ('ens3', True, '172.31.4.203', '255.255.240.0', '.', '0a:1f:07:15:98:70'), - ('ens3', True, 'fe80::81f:7ff:fe15:9870/64', '.', 'link', - '0a:1f:07:15:98:70'), - ('lo', True, '127.0.0.1', '255.0.0.0', '.', '.'), - ('lo', True, '::1/128', '.', 'host', '.'), -) -NET_DEVICE_TABLE = """\ -+--------+------+----------------------------+---------------+-------+-------------------+ -| Device | Up | Address | Mask | Scope | Hw-Address | -+--------+------+----------------------------+---------------+-------+-------------------+ -| ens3 | True | 172.31.4.203 | 255.255.240.0 | . | 0a:1f:07:15:98:70 | -| ens3 | True | fe80::81f:7ff:fe15:9870/64 | . | link | 0a:1f:07:15:98:70 | -| lo | True | 127.0.0.1 | 255.0.0.0 | . | . | -| lo | True | ::1/128 | . | host | . | -+--------+------+----------------------------+---------------+-------+-------------------+""" # noqa: E501 -ROUTE_IPV4_FIELDS = ( - 'Route', 'Destination', 'Gateway', 'Genmask', 'Interface', 'Flags') -ROUTE_IPV4_ROWS = ( - ('0', '0.0.0.0', '172.31.0.1', '0.0.0.0', 'ens3', 'UG'), - ('1', '169.254.0.0', '0.0.0.0', '255.255.0.0', 'ens3', 'U'), - ('2', '172.31.0.0', '0.0.0.0', '255.255.240.0', 'ens3', 'U'), -) -ROUTE_IPV4_TABLE = """\ -+-------+-------------+------------+---------------+-----------+-------+ -| Route | Destination | Gateway | Genmask | Interface | Flags | -+-------+-------------+------------+---------------+-----------+-------+ -| 0 | 0.0.0.0 | 172.31.0.1 | 0.0.0.0 | ens3 | UG | -| 1 | 169.254.0.0 | 0.0.0.0 | 255.255.0.0 | ens3 | U | -| 2 | 172.31.0.0 | 0.0.0.0 | 255.255.240.0 | ens3 | U | -+-------+-------------+------------+---------------+-----------+-------+""" - -AUTHORIZED_KEYS_FIELDS = ( - 'Keytype', 'Fingerprint (md5)', 'Options', 'Comment') -AUTHORIZED_KEYS_ROWS = ( - ('ssh-rsa', '24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36', '-', - 'ajorgens'), -) -AUTHORIZED_KEYS_TABLE = """\ -+---------+-------------------------------------------------+---------+----------+ -| Keytype | Fingerprint (md5) | Options | Comment | -+---------+-------------------------------------------------+---------+----------+ -| ssh-rsa | 24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36 | - | ajorgens | -+---------+-------------------------------------------------+---------+----------+""" # noqa: E501 - -# from prettytable import PrettyTable -# pt = PrettyTable(('HEADER',)) -# print(pt) -NO_ROWS_FIELDS = ('HEADER',) -NO_ROWS_TABLE = """\ -+--------+ -| HEADER | -+--------+ -+--------+""" - - -class TestSimpleTable(CiTestCase): - - def test_no_rows(self): - """An empty table is rendered as PrettyTable would have done it.""" - table = SimpleTable(NO_ROWS_FIELDS) - self.assertEqual(str(table), NO_ROWS_TABLE) - - def test_net_dev(self): - """Net device info is rendered as it was with PrettyTable.""" - table = SimpleTable(NET_DEVICE_FIELDS) - for row in NET_DEVICE_ROWS: - table.add_row(row) - self.assertEqual(str(table), NET_DEVICE_TABLE) - - def test_route_ipv4(self): - """Route IPv4 info is rendered as it was with PrettyTable.""" - table = SimpleTable(ROUTE_IPV4_FIELDS) - for row in ROUTE_IPV4_ROWS: - table.add_row(row) - self.assertEqual(str(table), ROUTE_IPV4_TABLE) - - def test_authorized_keys(self): - """SSH authorized keys are rendered as they were with PrettyTable.""" - table = SimpleTable(AUTHORIZED_KEYS_FIELDS) - for row in AUTHORIZED_KEYS_ROWS: - table.add_row(row) - - def test_get_string(self): - """get_string() method returns the same content as str().""" - table = SimpleTable(AUTHORIZED_KEYS_FIELDS) - for row in AUTHORIZED_KEYS_ROWS: - table.add_row(row) - self.assertEqual(table.get_string(), str(table)) diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py deleted file mode 100644 index a50836a4..00000000 --- a/cloudinit/tests/test_stages.py +++ /dev/null @@ -1,478 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests related to cloudinit.stages module.""" -import os -import stat - -import pytest - -from cloudinit import stages -from cloudinit import sources -from cloudinit.sources import NetworkConfigSource - -from cloudinit.event import EventScope, EventType -from cloudinit.util import write_file - -from cloudinit.tests.helpers import CiTestCase, mock - -TEST_INSTANCE_ID = 'i-testing' - - -class FakeDataSource(sources.DataSource): - - def __init__(self, paths=None, userdata=None, vendordata=None, - network_config=''): - super(FakeDataSource, self).__init__({}, None, paths=paths) - self.metadata = {'instance-id': TEST_INSTANCE_ID} - self.userdata_raw = userdata - self.vendordata_raw = vendordata - self._network_config = None - if network_config: # Permit for None value to setup attribute - self._network_config = network_config - - @property - def network_config(self): - return self._network_config - - def _get_data(self): - return True - - -class TestInit(CiTestCase): - with_logs = True - allowed_subp = False - - def setUp(self): - super(TestInit, self).setUp() - self.tmpdir = self.tmp_dir() - self.init = stages.Init() - # Setup fake Paths for Init to reference - self.init._cfg = {'system_info': { - 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir, - 'run_dir': self.tmpdir}}} - self.init.datasource = FakeDataSource(paths=self.init.paths) - self._real_is_new_instance = self.init.is_new_instance - self.init.is_new_instance = mock.Mock(return_value=True) - - def test_wb__find_networking_config_disabled(self): - """find_networking_config returns no config when disabled.""" - disable_file = os.path.join( - self.init.paths.get_cpath('data'), 'upgraded-network') - write_file(disable_file, '') - self.assertEqual( - (None, disable_file), - self.init._find_networking_config()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_kernel( - self, m_cmdline, m_initramfs): - """find_networking_config returns when disabled by kernel cmdline.""" - m_cmdline.return_value = {'config': 'disabled'} - m_initramfs.return_value = {'config': ['fake_initrd']} - self.assertEqual( - (None, NetworkConfigSource.cmdline), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by cmdline\n', - self.logs.getvalue()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_initrd( - self, m_cmdline, m_initramfs): - """find_networking_config returns when disabled by kernel cmdline.""" - m_cmdline.return_value = {} - m_initramfs.return_value = {'config': 'disabled'} - self.assertEqual( - (None, NetworkConfigSource.initramfs), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by initramfs\n', - self.logs.getvalue()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_datasrc( - self, m_cmdline, m_initramfs): - """find_networking_config returns when disabled by datasource cfg.""" - m_cmdline.return_value = {} # Kernel doesn't disable networking - m_initramfs.return_value = {} # initramfs doesn't disable networking - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {}} # system config doesn't disable - - self.init.datasource = FakeDataSource( - network_config={'config': 'disabled'}) - self.assertEqual( - (None, NetworkConfigSource.ds), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by ds\n', - self.logs.getvalue()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_sysconfig( - self, m_cmdline, m_initramfs): - """find_networking_config returns when disabled by system config.""" - m_cmdline.return_value = {} # Kernel doesn't disable networking - m_initramfs.return_value = {} # initramfs doesn't disable networking - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {'config': 'disabled'}} - self.assertEqual( - (None, NetworkConfigSource.system_cfg), - self.init._find_networking_config()) - self.assertEqual('DEBUG: network config disabled by system_cfg\n', - self.logs.getvalue()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test__find_networking_config_uses_datasrc_order( - self, m_cmdline, m_initramfs): - """find_networking_config should check sources in DS defined order""" - # cmdline and initramfs, which would normally be preferred over other - # sources, disable networking; in this case, though, the DS moves them - # later so its own config is preferred - m_cmdline.return_value = {'config': 'disabled'} - m_initramfs.return_value = {'config': 'disabled'} - - ds_net_cfg = {'config': {'needle': True}} - self.init.datasource = FakeDataSource(network_config=ds_net_cfg) - self.init.datasource.network_config_sources = [ - NetworkConfigSource.ds, NetworkConfigSource.system_cfg, - NetworkConfigSource.cmdline, NetworkConfigSource.initramfs] - - self.assertEqual( - (ds_net_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test__find_networking_config_warns_if_datasrc_uses_invalid_src( - self, m_cmdline, m_initramfs): - """find_networking_config should check sources in DS defined order""" - ds_net_cfg = {'config': {'needle': True}} - self.init.datasource = FakeDataSource(network_config=ds_net_cfg) - self.init.datasource.network_config_sources = [ - 'invalid_src', NetworkConfigSource.ds] - - self.assertEqual( - (ds_net_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) - self.assertIn('WARNING: data source specifies an invalid network' - ' cfg_source: invalid_src', - self.logs.getvalue()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( - self, m_cmdline, m_initramfs): - """find_networking_config should check sources in DS defined order""" - ds_net_cfg = {'config': {'needle': True}} - self.init.datasource = FakeDataSource(network_config=ds_net_cfg) - self.init.datasource.network_config_sources = [ - NetworkConfigSource.fallback, NetworkConfigSource.ds] - - self.assertEqual( - (ds_net_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) - self.assertIn('WARNING: data source specifies an unavailable network' - ' cfg_source: fallback', - self.logs.getvalue()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_kernel( - self, m_cmdline, m_initramfs): - """find_networking_config returns kernel cmdline config if present.""" - expected_cfg = {'config': ['fakekernel']} - m_cmdline.return_value = expected_cfg - m_initramfs.return_value = {'config': ['fake_initrd']} - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {'config': ['fakesys_config']}} - self.init.datasource = FakeDataSource( - network_config={'config': ['fakedatasource']}) - self.assertEqual( - (expected_cfg, NetworkConfigSource.cmdline), - self.init._find_networking_config()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_initramfs( - self, m_cmdline, m_initramfs): - """find_networking_config returns kernel cmdline config if present.""" - expected_cfg = {'config': ['fake_initrd']} - m_cmdline.return_value = {} - m_initramfs.return_value = expected_cfg - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': {'config': ['fakesys_config']}} - self.init.datasource = FakeDataSource( - network_config={'config': ['fakedatasource']}) - self.assertEqual( - (expected_cfg, NetworkConfigSource.initramfs), - self.init._find_networking_config()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_system_cfg( - self, m_cmdline, m_initramfs): - """find_networking_config returns system config when present.""" - m_cmdline.return_value = {} # No kernel network config - m_initramfs.return_value = {} # no initramfs network config - expected_cfg = {'config': ['fakesys_config']} - self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, - 'network': expected_cfg} - self.init.datasource = FakeDataSource( - network_config={'config': ['fakedatasource']}) - self.assertEqual( - (expected_cfg, NetworkConfigSource.system_cfg), - self.init._find_networking_config()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_datasrc_cfg( - self, m_cmdline, m_initramfs): - """find_networking_config returns datasource net config if present.""" - m_cmdline.return_value = {} # No kernel network config - m_initramfs.return_value = {} # no initramfs network config - # No system config for network in setUp - expected_cfg = {'config': ['fakedatasource']} - self.init.datasource = FakeDataSource(network_config=expected_cfg) - self.assertEqual( - (expected_cfg, NetworkConfigSource.ds), - self.init._find_networking_config()) - - @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') - @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_fallback( - self, m_cmdline, m_initramfs): - """find_networking_config returns fallback config if not defined.""" - m_cmdline.return_value = {} # Kernel doesn't disable networking - m_initramfs.return_value = {} # no initramfs network config - # Neither datasource nor system_info disable or provide network - - fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], - 'version': 1} - - def fake_generate_fallback(): - return fake_cfg - - # Monkey patch distro which gets cached on self.init - distro = self.init.distro - distro.generate_fallback_config = fake_generate_fallback - self.assertEqual( - (fake_cfg, NetworkConfigSource.fallback), - self.init._find_networking_config()) - self.assertNotIn('network config disabled', self.logs.getvalue()) - - def test_apply_network_config_disabled(self): - """Log when network is disabled by upgraded-network.""" - disable_file = os.path.join( - self.init.paths.get_cpath('data'), 'upgraded-network') - - def fake_network_config(): - return (None, disable_file) - - self.init._find_networking_config = fake_network_config - - self.init.apply_network_config(True) - self.assertIn( - 'INFO: network config is disabled by %s' % disable_file, - self.logs.getvalue()) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_new_instance(self, m_ubuntu, m_macs): - """Call distro apply_network_config methods on is_new_instance.""" - net_cfg = { - 'version': 1, 'config': [ - {'subnets': [{'type': 'dhcp'}], 'type': 'physical', - 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} - - def fake_network_config(): - return net_cfg, NetworkConfigSource.fallback - - m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} - - self.init._find_networking_config = fake_network_config - - self.init.apply_network_config(True) - self.init.distro.apply_network_config_names.assert_called_with(net_cfg) - self.init.distro.apply_network_config.assert_called_with( - net_cfg, bring_up=True) - - @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_same_instance_id(self, m_ubuntu): - """Only call distro.apply_network_config_names on same instance id.""" - self.init.is_new_instance = self._real_is_new_instance - old_instance_id = os.path.join( - self.init.paths.get_cpath('data'), 'instance-id') - write_file(old_instance_id, TEST_INSTANCE_ID) - net_cfg = { - 'version': 1, 'config': [ - {'subnets': [{'type': 'dhcp'}], 'type': 'physical', - 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} - - def fake_network_config(): - return net_cfg, NetworkConfigSource.fallback - - self.init._find_networking_config = fake_network_config - - self.init.apply_network_config(True) - self.init.distro.apply_network_config_names.assert_called_with(net_cfg) - self.init.distro.apply_network_config.assert_not_called() - assert ( - "No network config applied. Neither a new instance nor datasource " - "network update allowed" - ) in self.logs.getvalue() - - # CiTestCase doesn't work with pytest.mark.parametrize, and moving this - # functionality to a separate class is more cumbersome than it'd be worth - # at the moment, so use this as a simple setup - def _apply_network_setup(self, m_macs): - old_instance_id = os.path.join( - self.init.paths.get_cpath('data'), 'instance-id') - write_file(old_instance_id, TEST_INSTANCE_ID) - net_cfg = { - 'version': 1, 'config': [ - {'subnets': [{'type': 'dhcp'}], 'type': 'physical', - 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} - - def fake_network_config(): - return net_cfg, NetworkConfigSource.fallback - - m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} - - self.init._find_networking_config = fake_network_config - self.init.datasource = FakeDataSource(paths=self.init.paths) - self.init.is_new_instance = mock.Mock(return_value=False) - return net_cfg - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}) - def test_apply_network_allowed_when_default_boot( - self, m_ubuntu, m_macs - ): - """Apply network if datasource permits BOOT event.""" - net_cfg = self._apply_network_setup(m_macs) - - self.init.apply_network_config(True) - assert mock.call( - net_cfg - ) == self.init.distro.apply_network_config_names.call_args_list[-1] - assert mock.call( - net_cfg, bring_up=True - ) == self.init.distro.apply_network_config.call_args_list[-1] - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - def test_apply_network_disabled_when_no_default_boot( - self, m_ubuntu, m_macs - ): - """Don't apply network if datasource has no BOOT event.""" - self._apply_network_setup(m_macs) - self.init.apply_network_config(True) - self.init.distro.apply_network_config.assert_not_called() - assert ( - "No network config applied. Neither a new instance nor datasource " - "network update allowed" - ) in self.logs.getvalue() - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.default_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - def test_apply_network_allowed_with_userdata_overrides( - self, m_ubuntu, m_macs - ): - """Apply network if userdata overrides default config""" - net_cfg = self._apply_network_setup(m_macs) - self.init._cfg = {'updates': {'network': {'when': ['boot']}}} - self.init.apply_network_config(True) - self.init.distro.apply_network_config_names.assert_called_with( - net_cfg) - self.init.distro.apply_network_config.assert_called_with( - net_cfg, bring_up=True) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - @mock.patch.dict(sources.DataSource.supported_update_events, { - EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) - def test_apply_network_disabled_when_unsupported( - self, m_ubuntu, m_macs - ): - """Don't apply network config if unsupported. - - Shouldn't work even when specified as userdata - """ - self._apply_network_setup(m_macs) - - self.init._cfg = {'updates': {'network': {'when': ['boot']}}} - self.init.apply_network_config(True) - self.init.distro.apply_network_config.assert_not_called() - assert ( - "No network config applied. Neither a new instance nor datasource " - "network update allowed" - ) in self.logs.getvalue() - - -class TestInit_InitializeFilesystem: - """Tests for cloudinit.stages.Init._initialize_filesystem. - - TODO: Expand these tests to cover all of _initialize_filesystem's behavior. - """ - - @pytest.yield_fixture - def init(self, paths): - """A fixture which yields a stages.Init instance with paths and cfg set - - As it is replaced with a mock, consumers of this fixture can set - `init._cfg` if the default empty dict configuration is not appropriate. - """ - with mock.patch("cloudinit.stages.util.ensure_dirs"): - init = stages.Init() - init._cfg = {} - init._paths = paths - yield init - - @mock.patch("cloudinit.stages.util.ensure_file") - def test_ensure_file_not_called_if_no_log_file_configured( - self, m_ensure_file, init - ): - """If no log file is configured, we should not ensure its existence.""" - init._cfg = {} - - init._initialize_filesystem() - - assert 0 == m_ensure_file.call_count - - def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir): - """If a log file is configured, we should ensure its existence.""" - log_file = tmpdir.join("cloud-init.log") - init._cfg = {"def_log_file": str(log_file)} - - init._initialize_filesystem() - - assert log_file.exists() - # Assert we create it 0o640 by default if it doesn't already exist - assert 0o640 == stat.S_IMODE(log_file.stat().mode) - - def test_existing_file_permissions_are_not_modified(self, init, tmpdir): - """If the log file already exists, we should not modify its permissions - - See https://bugs.launchpad.net/cloud-init/+bug/1900837. - """ - # Use a mode that will never be made the default so this test will - # always be valid - mode = 0o606 - log_file = tmpdir.join("cloud-init.log") - log_file.ensure() - log_file.chmod(mode) - init._cfg = {"def_log_file": str(log_file)} - - init._initialize_filesystem() - - assert mode == stat.S_IMODE(log_file.stat().mode) - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py deleted file mode 100644 index 515d5d64..00000000 --- a/cloudinit/tests/test_subp.py +++ /dev/null @@ -1,286 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for cloudinit.subp utility functions""" - -import json -import os -import sys -import stat - -from unittest import mock - -from cloudinit import subp, util -from cloudinit.tests.helpers import CiTestCase - - -BASH = subp.which('bash') -BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' - - -class TestPrependBaseCommands(CiTestCase): - - with_logs = True - - def test_prepend_base_command_errors_on_neither_string_nor_list(self): - """Raise an error for each command which is not a string or list.""" - orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']] - with self.assertRaises(TypeError) as context_manager: - subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) - self.assertEqual( - "Invalid basecmd config. These commands are not a string or" - " list:\n1\n{'not': 'gonna work'}", - str(context_manager.exception)) - - def test_prepend_base_command_warns_on_non_base_string_commands(self): - """Warn on each non-base for commands of type string.""" - orig_commands = [ - 'ls', 'basecmd list', 'touch /blah', 'basecmd install x'] - fixed_commands = subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) - self.assertEqual( - 'WARNING: Non-basecmd commands in basecmd config:\n' - 'ls\ntouch /blah\n', - self.logs.getvalue()) - self.assertEqual(orig_commands, fixed_commands) - - def test_prepend_base_command_prepends_on_non_base_list_commands(self): - """Prepend 'basecmd' for each non-basecmd command of type list.""" - orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'], - ['basecmd', 'install', 'x']] - expected = [['basecmd', 'ls'], ['basecmd', 'list'], - ['basecmd', 'basecmda', '/blah'], - ['basecmd', 'install', 'x']] - fixed_commands = subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) - self.assertEqual('', self.logs.getvalue()) - self.assertEqual(expected, fixed_commands) - - def test_prepend_base_command_removes_first_item_when_none(self): - """Remove the first element of a non-basecmd when it is None.""" - orig_commands = [[None, 'ls'], ['basecmd', 'list'], - [None, 'touch', '/blah'], - ['basecmd', 'install', 'x']] - expected = [['ls'], ['basecmd', 'list'], - ['touch', '/blah'], - ['basecmd', 'install', 'x']] - fixed_commands = subp.prepend_base_command( - base_command='basecmd', commands=orig_commands) - self.assertEqual('', self.logs.getvalue()) - self.assertEqual(expected, fixed_commands) - - -class TestSubp(CiTestCase): - allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE, - BOGUS_COMMAND, sys.executable] - - stdin2err = [BASH, '-c', 'cat >&2'] - stdin2out = ['cat'] - utf8_invalid = b'ab\xaadef' - utf8_valid = b'start \xc3\xa9 end' - utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' - printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] - - def printf_cmd(self, *args): - # bash's printf supports \xaa. So does /usr/bin/printf - # but by using bash, we remove dependency on another program. - return([BASH, '-c', 'printf "$@"', 'printf'] + list(args)) - - def test_subp_handles_bytestrings(self): - """subp can run a bytestring command if shell is True.""" - tmp_file = self.tmp_path('test.out') - cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) - (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True) - self.assertEqual('', out) - self.assertEqual('', _err) - self.assertEqual('HI MOM\n', util.load_file(tmp_file)) - - def test_subp_handles_strings(self): - """subp can run a string command if shell is True.""" - tmp_file = self.tmp_path('test.out') - cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) - (out, _err) = subp.subp(cmd, shell=True) - self.assertEqual('', out) - self.assertEqual('', _err) - self.assertEqual('HI MOM\n', util.load_file(tmp_file)) - - def test_subp_handles_utf8(self): - # The given bytes contain utf-8 accented characters as seen in e.g. - # the "deja dup" package in Ubuntu. - cmd = self.printf_cmd(self.utf8_valid_2) - (out, _err) = subp.subp(cmd, capture=True) - self.assertEqual(out, self.utf8_valid_2.decode('utf-8')) - - def test_subp_respects_decode_false(self): - (out, err) = subp.subp(self.stdin2out, capture=True, decode=False, - data=self.utf8_valid) - self.assertTrue(isinstance(out, bytes)) - self.assertTrue(isinstance(err, bytes)) - self.assertEqual(out, self.utf8_valid) - - def test_subp_decode_ignore(self): - # this executes a string that writes invalid utf-8 to stdout - (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'), - capture=True, decode='ignore') - self.assertEqual(out, 'abcdef') - - def test_subp_decode_strict_valid_utf8(self): - (out, _err) = subp.subp(self.stdin2out, capture=True, - decode='strict', data=self.utf8_valid) - self.assertEqual(out, self.utf8_valid.decode('utf-8')) - - def test_subp_decode_invalid_utf8_replaces(self): - (out, _err) = subp.subp(self.stdin2out, capture=True, - data=self.utf8_invalid) - expected = self.utf8_invalid.decode('utf-8', 'replace') - self.assertEqual(out, expected) - - def test_subp_decode_strict_raises(self): - args = [] - kwargs = {'args': self.stdin2out, 'capture': True, - 'decode': 'strict', 'data': self.utf8_invalid} - self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs) - - def test_subp_capture_stderr(self): - data = b'hello world' - (out, err) = subp.subp(self.stdin2err, capture=True, - decode=False, data=data, - update_env={'LC_ALL': 'C'}) - self.assertEqual(err, data) - self.assertEqual(out, b'') - - def test_subp_reads_env(self): - with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): - out, _err = subp.subp(self.printenv + ['FOO'], capture=True) - self.assertEqual('FOO=BAR', out.splitlines()[0]) - - def test_subp_env_and_update_env(self): - out, _err = subp.subp( - self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, - env={'FOO': 'BAR'}, - update_env={'HOME': '/myhome', 'K2': 'V2'}) - self.assertEqual( - ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines()) - - def test_subp_update_env(self): - extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} - with mock.patch.dict("os.environ", values=extra): - out, _err = subp.subp( - self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, - update_env={'HOME': '/myhome', 'K2': 'V2'}) - - self.assertEqual( - ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines()) - - def test_subp_warn_missing_shebang(self): - """Warn on no #! in script""" - noshebang = self.tmp_path('noshebang') - util.write_file(noshebang, 'true\n') - - print("os is %s" % os) - os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - with self.allow_subp([noshebang]): - self.assertRaisesRegex(subp.ProcessExecutionError, - r'Missing #! in script\?', - subp.subp, (noshebang,)) - - def test_subp_combined_stderr_stdout(self): - """Providing combine_capture as True redirects stderr to stdout.""" - data = b'hello world' - (out, err) = subp.subp(self.stdin2err, capture=True, - combine_capture=True, decode=False, data=data) - self.assertEqual(b'', err) - self.assertEqual(data, out) - - def test_returns_none_if_no_capture(self): - (out, err) = subp.subp(self.stdin2out, data=b'', capture=False) - self.assertIsNone(err) - self.assertIsNone(out) - - def test_exception_has_out_err_are_bytes_if_decode_false(self): - """Raised exc should have stderr, stdout as bytes if no decode.""" - with self.assertRaises(subp.ProcessExecutionError) as cm: - subp.subp([BOGUS_COMMAND], decode=False) - self.assertTrue(isinstance(cm.exception.stdout, bytes)) - self.assertTrue(isinstance(cm.exception.stderr, bytes)) - - def test_exception_has_out_err_are_bytes_if_decode_true(self): - """Raised exc should have stderr, stdout as string if no decode.""" - with self.assertRaises(subp.ProcessExecutionError) as cm: - subp.subp([BOGUS_COMMAND], decode=True) - self.assertTrue(isinstance(cm.exception.stdout, str)) - self.assertTrue(isinstance(cm.exception.stderr, str)) - - def test_bunch_of_slashes_in_path(self): - self.assertEqual("/target/my/path/", - subp.target_path("/target/", "//my/path/")) - self.assertEqual("/target/my/path/", - subp.target_path("/target/", "///my/path/")) - - def test_c_lang_can_take_utf8_args(self): - """Independent of system LC_CTYPE, args can contain utf-8 strings. - - When python starts up, its default encoding gets set based on - the value of LC_CTYPE. If no system locale is set, the default - encoding for both python2 and python3 in some paths will end up - being ascii. - - Attempts to use setlocale or patching (or changing) os.environ - in the current environment seem to not be effective. - - This test starts up a python with LC_CTYPE set to C so that - the default encoding will be set to ascii. In such an environment - Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError. - """ - python_prog = '\n'.join([ - 'import json, sys', - 'from cloudinit.subp import subp', - 'data = sys.stdin.read()', - 'cmd = json.loads(data)', - 'subp(cmd, capture=False)', - '']) - cmd = [BASH, '-c', 'echo -n "$@"', '--', - self.utf8_valid.decode("utf-8")] - python_subp = [sys.executable, '-c', python_prog] - - out, _err = subp.subp( - python_subp, update_env={'LC_CTYPE': 'C'}, - data=json.dumps(cmd).encode("utf-8"), - decode=False) - self.assertEqual(self.utf8_valid, out) - - def test_bogus_command_logs_status_messages(self): - """status_cb gets status messages logs on bogus commands provided.""" - logs = [] - - def status_cb(log): - logs.append(log) - - with self.assertRaises(subp.ProcessExecutionError): - subp.subp([BOGUS_COMMAND], status_cb=status_cb) - - expected = [ - 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND), - 'ERROR: End run command: invalid command provided\n'] - self.assertEqual(expected, logs) - - def test_command_logs_exit_codes_to_status_cb(self): - """status_cb gets status messages containing command exit code.""" - logs = [] - - def status_cb(log): - logs.append(log) - - with self.assertRaises(subp.ProcessExecutionError): - subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb) - subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb) - - expected = [ - 'Begin run command: %s -c exit 2\n' % BASH, - 'ERROR: End run command: exit(2)\n', - 'Begin run command: %s -c exit 0\n' % BASH, - 'End run command: exit(0)\n'] - self.assertEqual(expected, logs) - - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py deleted file mode 100644 index 4a52ef89..00000000 --- a/cloudinit/tests/test_temp_utils.py +++ /dev/null @@ -1,117 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for cloudinit.temp_utils""" - -from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir -from cloudinit.tests.helpers import CiTestCase, wrap_and_call -import os - - -class TestTempUtils(CiTestCase): - - def test_mkdtemp_default_non_root(self): - """mkdtemp creates a dir under /tmp for the unprivileged.""" - calls = [] - - def fake_mkdtemp(*args, **kwargs): - calls.append(kwargs) - return '/fake/return/path' - - retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 1000, - 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkdtemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/tmp'}], calls) - - def test_mkdtemp_default_non_root_needs_exe(self): - """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe.""" - calls = [] - - def fake_mkdtemp(*args, **kwargs): - calls.append(kwargs) - return '/fake/return/path' - - retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 1000, - 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkdtemp, needs_exe=True) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/var/tmp/cloud-init'}], calls) - - def test_mkdtemp_default_root(self): - """mkdtemp creates a dir under /run/cloud-init for the privileged.""" - calls = [] - - def fake_mkdtemp(*args, **kwargs): - calls.append(kwargs) - return '/fake/return/path' - - retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 0, - 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkdtemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) - - def test_mkstemp_default_non_root(self): - """mkstemp creates secure tempfile under /tmp for the unprivileged.""" - calls = [] - - def fake_mkstemp(*args, **kwargs): - calls.append(kwargs) - return '/fake/return/path' - - retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 1000, - 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkstemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/tmp'}], calls) - - def test_mkstemp_default_root(self): - """mkstemp creates a secure tempfile in /run/cloud-init for root.""" - calls = [] - - def fake_mkstemp(*args, **kwargs): - calls.append(kwargs) - return '/fake/return/path' - - retval = wrap_and_call( - 'cloudinit.temp_utils', - {'os.getuid': 0, - 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, - '_TMPDIR': {'new': None}, - 'os.path.isdir': True}, - mkstemp) - self.assertEqual('/fake/return/path', retval) - self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) - - def test_tempdir_error_suppression(self): - """test tempdir suppresses errors during directory removal.""" - - with self.assertRaises(OSError): - with tempdir(prefix='cloud-init-dhcp-') as tdir: - os.rmdir(tdir) - # As a result, the directory is already gone, - # so shutil.rmtree should raise OSError - - with tempdir(rmtree_ignore_errors=True, - prefix='cloud-init-dhcp-') as tdir: - os.rmdir(tdir) - # Since the directory is already gone, shutil.rmtree would raise - # OSError, but we suppress that - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py deleted file mode 100644 index da3ab23b..00000000 --- a/cloudinit/tests/test_upgrade.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2020 Canonical Ltd. -# -# Author: Daniel Watkins -# -# This file is part of cloud-init. See LICENSE file for license information. - -"""Upgrade testing for cloud-init. - -This module tests cloud-init's behaviour across upgrades. Specifically, it -specifies a set of invariants that the current codebase expects to be true (as -tests in ``TestUpgrade``) and then checks that these hold true after unpickling -``obj.pkl``s from previous versions of cloud-init; those pickles are stored in -``tests/data/old_pickles/``. -""" - -import operator -import pathlib - -import pytest - -from cloudinit.stages import _pkl_load -from cloudinit.tests.helpers import resourceLocation - - -class TestUpgrade: - @pytest.fixture( - params=pathlib.Path(resourceLocation("old_pickles")).glob("*.pkl"), - scope="class", - ids=operator.attrgetter("name"), - ) - def previous_obj_pkl(self, request): - """Load each pickle to memory once, then run all tests against it. - - Test implementations _must not_ modify the ``previous_obj_pkl`` which - they are passed, as that will affect tests that run after them. - """ - return _pkl_load(str(request.param)) - - def test_networking_set_on_distro(self, previous_obj_pkl): - """We always expect to have ``.networking`` on ``Distro`` objects.""" - assert previous_obj_pkl.distro.networking is not None - - def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl): - """We always expect Networking.blacklist_drivers to be initialised.""" - assert previous_obj_pkl.distro.networking.blacklist_drivers is None - - def test_paths_has_run_dir_attribute(self, previous_obj_pkl): - assert previous_obj_pkl.paths.run_dir is not None - - def test_vendordata_exists(self, previous_obj_pkl): - assert previous_obj_pkl.vendordata2 is None - assert previous_obj_pkl.vendordata2_raw is None diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py deleted file mode 100644 index c3918f80..00000000 --- a/cloudinit/tests/test_url_helper.py +++ /dev/null @@ -1,178 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.url_helper import ( - NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, - retry_on_url_exc) -from cloudinit.tests.helpers import CiTestCase, mock, skipIf -from cloudinit import util -from cloudinit import version - -import httpretty -import logging -import requests - - -try: - import oauthlib - assert oauthlib # avoid pyflakes error F401: import unused - _missing_oauthlib_dep = False -except ImportError: - _missing_oauthlib_dep = True - - -M_PATH = 'cloudinit.url_helper.' - - -class TestOAuthHeaders(CiTestCase): - - def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): - """oauth_headers raises a NotImplemented error when oauth absent.""" - with mock.patch.dict('sys.modules', {'oauthlib': None}): - with self.assertRaises(NotImplementedError) as context_manager: - oauth_headers(1, 2, 3, 4, 5) - self.assertEqual( - 'oauth support is not available', - str(context_manager.exception)) - - @skipIf(_missing_oauthlib_dep, "No python-oauthlib dependency") - @mock.patch('oauthlib.oauth1.Client') - def test_oauth_headers_calls_oathlibclient_when_available(self, m_client): - """oauth_headers calls oaut1.hClient.sign with the provided url.""" - class fakeclient(object): - def sign(self, url): - # The first and 3rd item of the client.sign tuple are ignored - return ('junk', url, 'junk2') - - m_client.return_value = fakeclient() - - return_value = oauth_headers( - 'url', 'consumer_key', 'token_key', 'token_secret', - 'consumer_secret') - self.assertEqual('url', return_value) - - -class TestReadFileOrUrl(CiTestCase): - - with_logs = True - - def test_read_file_or_url_str_from_file(self): - """Test that str(result.contents) on file is text version of contents. - It should not be "b'data'", but just "'data'" """ - tmpf = self.tmp_path("myfile1") - data = b'This is my file content\n' - util.write_file(tmpf, data, omode="wb") - result = read_file_or_url("file://%s" % tmpf) - self.assertEqual(result.contents, data) - self.assertEqual(str(result), data.decode('utf-8')) - - @httpretty.activate - def test_read_file_or_url_str_from_url(self): - """Test that str(result.contents) on url is text version of contents. - It should not be "b'data'", but just "'data'" """ - url = 'http://hostname/path' - data = b'This is my url content\n' - httpretty.register_uri(httpretty.GET, url, data) - result = read_file_or_url(url) - self.assertEqual(result.contents, data) - self.assertEqual(str(result), data.decode('utf-8')) - - @httpretty.activate - def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self): - """Headers are redacted from logs but unredacted in requests.""" - url = 'http://hostname/path' - headers = {'sensitive': 'sekret', 'server': 'blah'} - httpretty.register_uri(httpretty.GET, url) - # By default, httpretty will log our request along with the header, - # so if we don't change this the secret will show up in the logs - logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) - - read_file_or_url(url, headers=headers, headers_redact=['sensitive']) - logs = self.logs.getvalue() - for k in headers.keys(): - self.assertEqual(headers[k], httpretty.last_request().headers[k]) - self.assertIn(REDACTED, logs) - self.assertNotIn('sekret', logs) - - @httpretty.activate - def test_read_file_or_url_str_from_url_redacts_noheaders(self): - """When no headers_redact, header values are in logs and requests.""" - url = 'http://hostname/path' - headers = {'sensitive': 'sekret', 'server': 'blah'} - httpretty.register_uri(httpretty.GET, url) - - read_file_or_url(url, headers=headers) - for k in headers.keys(): - self.assertEqual(headers[k], httpretty.last_request().headers[k]) - logs = self.logs.getvalue() - self.assertNotIn(REDACTED, logs) - self.assertIn('sekret', logs) - - @mock.patch(M_PATH + 'readurl') - def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): - """read_file_or_url passes all params through to readurl.""" - url = 'http://hostname/path' - response = 'This is my url content\n' - m_readurl.return_value = response - params = {'url': url, 'timeout': 1, 'retries': 2, - 'headers': {'somehdr': 'val'}, - 'data': 'data', 'sec_between': 1, - 'ssl_details': {'cert_file': '/path/cert.pem'}, - 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} - self.assertEqual(response, read_file_or_url(**params)) - params.pop('url') # url is passed in as a positional arg - self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) - - def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): - """Readurl param defaults used when unspecified by read_file_or_url - - Param defaults tested are as follows: - retries: 0, additional headers None beyond default, method: GET, - data: None, check_status: True and allow_redirects: True - """ - url = 'http://hostname/path' - - m_response = mock.MagicMock() - - class FakeSession(requests.Session): - @classmethod - def request(cls, **kwargs): - self.assertEqual( - {'url': url, 'allow_redirects': True, 'method': 'GET', - 'headers': { - 'User-Agent': 'Cloud-Init/%s' % ( - version.version_string())}}, - kwargs) - return m_response - - with mock.patch(M_PATH + 'requests.Session') as m_session: - error = requests.exceptions.HTTPError('broke') - m_session.side_effect = [error, FakeSession()] - # assert no retries and check_status == True - with self.assertRaises(UrlError) as context_manager: - response = read_file_or_url(url) - self.assertEqual('broke', str(context_manager.exception)) - # assert default headers, method, url and allow_redirects True - # Success on 2nd call with FakeSession - response = read_file_or_url(url) - self.assertEqual(m_response, response._response) - - -class TestRetryOnUrlExc(CiTestCase): - - def test_do_not_retry_non_urlerror(self): - """When exception is not UrlError return False.""" - myerror = IOError('something unexcpected') - self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) - - def test_perform_retries_on_not_found(self): - """When exception is UrlError with a 404 status code return True.""" - myerror = UrlError(cause=RuntimeError( - 'something was not found'), code=NOT_FOUND) - self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) - - def test_perform_retries_on_timeout(self): - """When exception is a requests.Timout return True.""" - myerror = UrlError(cause=requests.Timeout('something timed out')) - self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py deleted file mode 100644 index 7a3175f3..00000000 --- a/cloudinit/tests/test_util.py +++ /dev/null @@ -1,1187 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for cloudinit.util""" - -import base64 -import logging -import json -import platform -import pytest - -import cloudinit.util as util -from cloudinit import subp - -from cloudinit.tests.helpers import CiTestCase, mock -from textwrap import dedent - -LOG = logging.getLogger(__name__) - -MOUNT_INFO = [ - '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', - '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2' -] - -OS_RELEASE_SLES = dedent("""\ - NAME="SLES" - VERSION="12-SP3" - VERSION_ID="12.3" - PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" - ID="sles" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:suse:sles:12:sp3" -""") - -OS_RELEASE_OPENSUSE = dedent("""\ - NAME="openSUSE Leap" - VERSION="42.3" - ID=opensuse - ID_LIKE="suse" - VERSION_ID="42.3" - PRETTY_NAME="openSUSE Leap 42.3" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:opensuse:leap:42.3" - BUG_REPORT_URL="https://bugs.opensuse.org" - HOME_URL="https://www.opensuse.org/" -""") - -OS_RELEASE_OPENSUSE_L15 = dedent("""\ - NAME="openSUSE Leap" - VERSION="15.0" - ID="opensuse-leap" - ID_LIKE="suse opensuse" - VERSION_ID="15.0" - PRETTY_NAME="openSUSE Leap 15.0" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:opensuse:leap:15.0" - BUG_REPORT_URL="https://bugs.opensuse.org" - HOME_URL="https://www.opensuse.org/" -""") - -OS_RELEASE_OPENSUSE_TW = dedent("""\ - NAME="openSUSE Tumbleweed" - ID="opensuse-tumbleweed" - ID_LIKE="opensuse suse" - VERSION_ID="20180920" - PRETTY_NAME="openSUSE Tumbleweed" - ANSI_COLOR="0;32" - CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" - BUG_REPORT_URL="https://bugs.opensuse.org" - HOME_URL="https://www.opensuse.org/" -""") - -OS_RELEASE_CENTOS = dedent("""\ - NAME="CentOS Linux" - VERSION="7 (Core)" - ID="centos" - ID_LIKE="rhel fedora" - VERSION_ID="7" - PRETTY_NAME="CentOS Linux 7 (Core)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:centos:centos:7" - HOME_URL="https://www.centos.org/" - BUG_REPORT_URL="https://bugs.centos.org/" - - CENTOS_MANTISBT_PROJECT="CentOS-7" - CENTOS_MANTISBT_PROJECT_VERSION="7" - REDHAT_SUPPORT_PRODUCT="centos" - REDHAT_SUPPORT_PRODUCT_VERSION="7" -""") - -OS_RELEASE_REDHAT_7 = dedent("""\ - NAME="Red Hat Enterprise Linux Server" - VERSION="7.5 (Maipo)" - ID="rhel" - ID_LIKE="fedora" - VARIANT="Server" - VARIANT_ID="server" - VERSION_ID="7.5" - PRETTY_NAME="Red Hat" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server" - HOME_URL="https://www.redhat.com/" - BUG_REPORT_URL="https://bugzilla.redhat.com/" - - REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" - REDHAT_BUGZILLA_PRODUCT_VERSION=7.5 - REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" - REDHAT_SUPPORT_PRODUCT_VERSION="7.5" -""") - -OS_RELEASE_ALMALINUX_8 = dedent("""\ - NAME="AlmaLinux" - VERSION="8.3 (Purple Manul)" - ID="almalinux" - ID_LIKE="rhel centos fedora" - VERSION_ID="8.3" - PLATFORM_ID="platform:el8" - PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)" - ANSI_COLOR="0;34" - CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA" - HOME_URL="https://almalinux.org/" - BUG_REPORT_URL="https://bugs.almalinux.org/" - - ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8" - ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" -""") - -OS_RELEASE_EUROLINUX_7 = dedent("""\ - VERSION="7.9 (Minsk)" - ID="eurolinux" - ID_LIKE="rhel scientific centos fedora" - VERSION_ID="7.9" - PRETTY_NAME="EuroLinux 7.9 (Minsk)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA" - HOME_URL="http://www.euro-linux.com/" - BUG_REPORT_URL="mailto:support@euro-linux.com" - REDHAT_BUGZILLA_PRODUCT="EuroLinux 7" - REDHAT_BUGZILLA_PRODUCT_VERSION=7.9 - REDHAT_SUPPORT_PRODUCT="EuroLinux" - REDHAT_SUPPORT_PRODUCT_VERSION="7.9" -""") - -OS_RELEASE_EUROLINUX_8 = dedent("""\ - NAME="EuroLinux" - VERSION="8.4 (Vaduz)" - ID="eurolinux" - ID_LIKE="rhel fedora centos" - VERSION_ID="8.4" - PLATFORM_ID="platform:el8" - PRETTY_NAME="EuroLinux 8.4 (Vaduz)" - ANSI_COLOR="0;34" - CPE_NAME="cpe:/o:eurolinux:eurolinux:8" - HOME_URL="https://www.euro-linux.com/" - BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/" - REDHAT_SUPPORT_PRODUCT="EuroLinux" - REDHAT_SUPPORT_PRODUCT_VERSION="8" -""") - -OS_RELEASE_ROCKY_8 = dedent("""\ - NAME="Rocky Linux" - VERSION="8.3 (Green Obsidian)" - ID="rocky" - ID_LIKE="rhel fedora" - VERSION_ID="8.3" - PLATFORM_ID="platform:el8" - PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:rocky:rocky:8" - HOME_URL="https://rockylinux.org/" - BUG_REPORT_URL="https://bugs.rockylinux.org/" - ROCKY_SUPPORT_PRODUCT="Rocky Linux" - ROCKY_SUPPORT_PRODUCT_VERSION="8" -""") - -OS_RELEASE_VIRTUOZZO_8 = dedent("""\ - NAME="Virtuozzo Linux" - VERSION="8" - ID="virtuozzo" - ID_LIKE="rhel fedora" - VERSION_ID="8" - PLATFORM_ID="platform:el8" - PRETTY_NAME="Virtuozzo Linux" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8" - HOME_URL="https://www.vzlinux.org" - BUG_REPORT_URL="https://bugs.openvz.org" -""") - -OS_RELEASE_CLOUDLINUX_8 = dedent("""\ - NAME="CloudLinux" - VERSION="8.4 (Valery Rozhdestvensky)" - ID="cloudlinux" - ID_LIKE="rhel fedora centos" - VERSION_ID="8.4" - PLATFORM_ID="platform:el8" - PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)" - ANSI_COLOR="0;31" - CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server" - HOME_URL="https://www.cloudlinux.com/" - BUG_REPORT_URL="https://www.cloudlinux.com/support" -""") - -OS_RELEASE_OPENEULER_20 = dedent("""\ - NAME="openEuler" - VERSION="20.03 (LTS-SP2)" - ID="openEuler" - VERSION_ID="20.03" - PRETTY_NAME="openEuler 20.03 (LTS-SP2)" - ANSI_COLOR="0;31" -""") - -REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" -REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" -REDHAT_RELEASE_REDHAT_6 = ( - "Red Hat Enterprise Linux Server release 6.10 (Santiago)") -REDHAT_RELEASE_REDHAT_7 = ( - "Red Hat Enterprise Linux Server release 7.5 (Maipo)") -REDHAT_RELEASE_ALMALINUX_8 = ( - "AlmaLinux release 8.3 (Purple Manul)") -REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" -REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" -REDHAT_RELEASE_ROCKY_8 = ( - "Rocky Linux release 8.3 (Green Obsidian)") -REDHAT_RELEASE_VIRTUOZZO_8 = ( - "Virtuozzo Linux release 8") -REDHAT_RELEASE_CLOUDLINUX_8 = ( - "CloudLinux release 8.4 (Valery Rozhdestvensky)") -OS_RELEASE_DEBIAN = dedent("""\ - PRETTY_NAME="Debian GNU/Linux 9 (stretch)" - NAME="Debian GNU/Linux" - VERSION_ID="9" - VERSION="9 (stretch)" - ID=debian - HOME_URL="https://www.debian.org/" - SUPPORT_URL="https://www.debian.org/support" - BUG_REPORT_URL="https://bugs.debian.org/" -""") - -OS_RELEASE_UBUNTU = dedent("""\ - NAME="Ubuntu"\n - # comment test - VERSION="16.04.3 LTS (Xenial Xerus)"\n - ID=ubuntu\n - ID_LIKE=debian\n - PRETTY_NAME="Ubuntu 16.04.3 LTS"\n - VERSION_ID="16.04"\n - HOME_URL="http://www.ubuntu.com/"\n - SUPPORT_URL="http://help.ubuntu.com/"\n - BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n - VERSION_CODENAME=xenial\n - UBUNTU_CODENAME=xenial\n -""") - -OS_RELEASE_PHOTON = ("""\ - NAME="VMware Photon OS" - VERSION="4.0" - ID=photon - VERSION_ID=4.0 - PRETTY_NAME="VMware Photon OS/Linux" - ANSI_COLOR="1;34" - HOME_URL="https://vmware.github.io/photon/" - BUG_REPORT_URL="https://github.com/vmware/photon/issues" -""") - - -class FakeCloud(object): - - def __init__(self, hostname, fqdn): - self.hostname = hostname - self.fqdn = fqdn - self.calls = [] - - def get_hostname(self, fqdn=None, metadata_only=None): - myargs = {} - if fqdn is not None: - myargs['fqdn'] = fqdn - if metadata_only is not None: - myargs['metadata_only'] = metadata_only - self.calls.append(myargs) - if fqdn: - return self.fqdn - return self.hostname - - -class TestUtil(CiTestCase): - - def test_parse_mount_info_no_opts_no_arg(self): - result = util.parse_mount_info('/home', MOUNT_INFO, LOG) - self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) - - def test_parse_mount_info_no_opts_arg(self): - result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) - self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) - - def test_parse_mount_info_with_opts(self): - result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) - self.assertEqual( - ('/dev/sda1', 'btrfs', '/', 'ro,relatime'), - result - ) - - @mock.patch('cloudinit.util.get_mount_info') - def test_mount_is_rw(self, m_mount_info): - m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') - is_rw = util.mount_is_read_write('/') - self.assertEqual(is_rw, True) - - @mock.patch('cloudinit.util.get_mount_info') - def test_mount_is_ro(self, m_mount_info): - m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') - is_rw = util.mount_is_read_write('/') - self.assertEqual(is_rw, False) - - -class TestUptime(CiTestCase): - - @mock.patch('cloudinit.util.boottime') - @mock.patch('cloudinit.util.os.path.exists') - @mock.patch('cloudinit.util.time.time') - def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): - boottime = 1000.0 - uptime = 10.0 - m_boottime.return_value = boottime - m_time.return_value = boottime + uptime - m_exists.return_value = False - result = util.uptime() - self.assertEqual(str(uptime), result) - - -class TestShellify(CiTestCase): - - def test_input_dict_raises_type_error(self): - self.assertRaisesRegex( - TypeError, 'Input.*was.*dict.*xpected', - util.shellify, {'mykey': 'myval'}) - - def test_input_str_raises_type_error(self): - self.assertRaisesRegex( - TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar") - - def test_value_with_int_raises_type_error(self): - self.assertRaisesRegex( - TypeError, 'shellify.*int', util.shellify, ["foo", 1]) - - def test_supports_strings_and_lists(self): - self.assertEqual( - '\n'.join(["#!/bin/sh", "echo hi mom", "'echo' 'hi dad'", - "'echo' 'hi' 'sis'", ""]), - util.shellify(["echo hi mom", ["echo", "hi dad"], - ('echo', 'hi', 'sis')])) - - def test_supports_comments(self): - self.assertEqual( - '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]), - util.shellify(["echo start", None, "echo end"])) - - -class TestGetHostnameFqdn(CiTestCase): - - def test_get_hostname_fqdn_from_only_cfg_fqdn(self): - """When cfg only has the fqdn key, derive hostname and fqdn from it.""" - hostname, fqdn = util.get_hostname_fqdn( - cfg={'fqdn': 'myhost.domain.com'}, cloud=None) - self.assertEqual('myhost', hostname) - self.assertEqual('myhost.domain.com', fqdn) - - def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): - """When cfg has both fqdn and hostname keys, return them.""" - hostname, fqdn = util.get_hostname_fqdn( - cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None) - self.assertEqual('other', hostname) - self.assertEqual('myhost.domain.com', fqdn) - - def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): - """When cfg has only hostname key which represents a fqdn, use that.""" - hostname, fqdn = util.get_hostname_fqdn( - cfg={'hostname': 'myhost.domain.com'}, cloud=None) - self.assertEqual('myhost', hostname) - self.assertEqual('myhost.domain.com', fqdn) - - def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): - """When cfg has a hostname without a '.' query cloud.get_hostname.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - hostname, fqdn = util.get_hostname_fqdn( - cfg={'hostname': 'myhost'}, cloud=mycloud) - self.assertEqual('myhost', hostname) - self.assertEqual('cloudhost.mycloud.com', fqdn) - self.assertEqual( - [{'fqdn': True, 'metadata_only': False}], mycloud.calls) - - def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): - """When cfg has neither hostname nor fqdn cloud.get_hostname.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) - self.assertEqual('cloudhost', hostname) - self.assertEqual('cloudhost.mycloud.com', fqdn) - self.assertEqual( - [{'fqdn': True, 'metadata_only': False}, - {'metadata_only': False}], mycloud.calls) - - def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): - """Calls to cloud.get_hostname pass the metadata_only parameter.""" - mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - _hn, _fqdn = util.get_hostname_fqdn( - cfg={}, cloud=mycloud, metadata_only=True) - self.assertEqual( - [{'fqdn': True, 'metadata_only': True}, - {'metadata_only': True}], mycloud.calls) - - -class TestBlkid(CiTestCase): - ids = { - "id01": "1111-1111", - "id02": "22222222-2222", - "id03": "33333333-3333", - "id04": "44444444-4444", - "id05": "55555555-5555-5555-5555-555555555555", - "id06": "66666666-6666-6666-6666-666666666666", - "id07": "52894610484658920398", - "id08": "86753098675309867530", - "id09": "99999999-9999-9999-9999-999999999999", - } - - blkid_out = dedent("""\ - /dev/loop0: TYPE="squashfs" - /dev/loop1: TYPE="squashfs" - /dev/loop2: TYPE="squashfs" - /dev/loop3: TYPE="squashfs" - /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" - /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" - /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" - /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ - """TYPE="zfs_member" PARTUUID="{id09}" - /dev/loop4: TYPE="squashfs" - """) - - maxDiff = None - - def _get_expected(self): - return ({ - "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, - "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, - "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, - "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, - "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, - "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", - "UUID": self.ids["id01"], - "PARTUUID": self.ids["id02"]}, - "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", - "UUID": self.ids["id03"], - "PARTUUID": self.ids["id04"]}, - "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", - "UUID": self.ids["id05"], - "PARTUUID": self.ids["id06"]}, - "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", - "LABEL": "default", - "UUID": self.ids["id07"], - "UUID_SUB": self.ids["id08"], - "PARTUUID": self.ids["id09"]}, - }) - - @mock.patch("cloudinit.subp.subp") - def test_functional_blkid(self, m_subp): - m_subp.return_value = ( - self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), util.blkid()) - m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, - decode="replace") - - @mock.patch("cloudinit.subp.subp") - def test_blkid_no_cache_uses_no_cache(self, m_subp): - """blkid should turn off cache if disable_cache is true.""" - m_subp.return_value = ( - self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), - util.blkid(disable_cache=True)) - m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], - capture=True, decode="replace") - - -@mock.patch('cloudinit.subp.subp') -class TestUdevadmSettle(CiTestCase): - def test_with_no_params(self, m_subp): - """called with no parameters.""" - util.udevadm_settle() - m_subp.called_once_with(mock.call(['udevadm', 'settle'])) - - def test_with_exists_and_not_exists(self, m_subp): - """with exists=file where file does not exist should invoke subp.""" - mydev = self.tmp_path("mydev") - util.udevadm_settle(exists=mydev) - m_subp.called_once_with( - ['udevadm', 'settle', '--exit-if-exists=%s' % mydev]) - - def test_with_exists_and_file_exists(self, m_subp): - """with exists=file where file does exist should not invoke subp.""" - mydev = self.tmp_path("mydev") - util.write_file(mydev, "foo\n") - util.udevadm_settle(exists=mydev) - self.assertIsNone(m_subp.call_args) - - def test_with_timeout_int(self, m_subp): - """timeout can be an integer.""" - timeout = 9 - util.udevadm_settle(timeout=timeout) - m_subp.called_once_with( - ['udevadm', 'settle', '--timeout=%s' % timeout]) - - def test_with_timeout_string(self, m_subp): - """timeout can be a string.""" - timeout = "555" - util.udevadm_settle(timeout=timeout) - m_subp.assert_called_once_with( - ['udevadm', 'settle', '--timeout=%s' % timeout]) - - def test_with_exists_and_timeout(self, m_subp): - """test call with both exists and timeout.""" - mydev = self.tmp_path("mydev") - timeout = "3" - util.udevadm_settle(exists=mydev) - m_subp.called_once_with( - ['udevadm', 'settle', '--exit-if-exists=%s' % mydev, - '--timeout=%s' % timeout]) - - def test_subp_exception_raises_to_caller(self, m_subp): - m_subp.side_effect = subp.ProcessExecutionError("BOOM") - self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) - - -@mock.patch('os.path.exists') -class TestGetLinuxDistro(CiTestCase): - - def setUp(self): - # python2 has no lru_cache, and therefore, no cache_clear() - if hasattr(util.get_linux_distro, "cache_clear"): - util.get_linux_distro.cache_clear() - - @classmethod - def os_release_exists(self, path): - """Side effect function""" - if path == '/etc/os-release': - return 1 - - @classmethod - def redhat_release_exists(self, path): - """Side effect function """ - if path == '/etc/redhat-release': - return 1 - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): - """Verify we get the correct name if the os-release file has - the distro name in quotes""" - m_os_release.return_value = OS_RELEASE_SLES - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('sles', '12.3', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): - """Verify we get the correct name if the os-release file does not - have the distro name in quotes""" - m_os_release.return_value = OS_RELEASE_UBUNTU - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) - - @mock.patch('platform.system') - @mock.patch('platform.release') - @mock.patch('cloudinit.util._parse_redhat_release') - def test_get_linux_freebsd(self, m_parse_redhat_release, - m_platform_release, - m_platform_system, m_path_exists): - """Verify we get the correct name and release name on FreeBSD.""" - m_path_exists.return_value = False - m_platform_release.return_value = '12.0-RELEASE-p10' - m_platform_system.return_value = 'FreeBSD' - m_parse_redhat_release.return_value = {} - util.is_BSD.cache_clear() - dist = util.get_linux_distro() - self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_centos6(self, m_os_release, m_path_exists): - """Verify we get the correct name and release name on CentOS 6.""" - m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('centos', '6.10', 'Final'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): - """Verify the correct release info on CentOS 7 without os-release.""" - m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 - m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('centos', '7.5.1804', 'Core'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): - """Verify redhat 7 read from os-release.""" - m_os_release.return_value = OS_RELEASE_REDHAT_7 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('redhat', '7.5', 'Maipo'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): - """Verify redhat 7 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('redhat', '7.5', 'Maipo'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): - """Verify redhat 6 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('redhat', '6.10', 'Santiago'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_copr_centos(self, m_os_release, m_path_exists): - """Verify we get the correct name and release name on COPR CentOS.""" - m_os_release.return_value = OS_RELEASE_CENTOS - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('centos', '7', 'Core'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): - """Verify almalinux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): - """Verify almalinux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_ALMALINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 7 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 7 read from os-release.""" - m_os_release.return_value = OS_RELEASE_EUROLINUX_7 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): - """Verify eurolinux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_EUROLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): - """Verify rocky linux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): - """Verify rocky linux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_ROCKY_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): - """Verify virtuozzo linux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): - """Verify virtuozzo linux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): - """Verify cloudlinux 8 read from redhat-release.""" - m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists - dist = util.get_linux_distro() - self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): - """Verify cloudlinux 8 read from os-release.""" - m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_debian(self, m_os_release, m_path_exists): - """Verify we get the correct name and release name on Debian.""" - m_os_release.return_value = OS_RELEASE_DEBIAN - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('debian', '9', 'stretch'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_openeuler(self, m_os_release, m_path_exists): - """Verify get the correct name and release name on Openeuler.""" - m_os_release.return_value = OS_RELEASE_OPENEULER_20 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_opensuse(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on openSUSE - prior to openSUSE Leap 15. - """ - m_os_release.return_value = OS_RELEASE_OPENSUSE - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('opensuse', '42.3', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on openSUSE - for openSUSE Leap 15.0 and later. - """ - m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on openSUSE - for openSUSE Tumbleweed - """ - m_os_release.return_value = OS_RELEASE_OPENSUSE_TW - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual( - ('opensuse-tumbleweed', '20180920', platform.machine()), dist) - - @mock.patch('cloudinit.util.load_file') - def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): - """Verify we get the correct name and machine arch on PhotonOS""" - m_os_release.return_value = OS_RELEASE_PHOTON - m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists - dist = util.get_linux_distro() - self.assertEqual( - ('photon', '4.0', 'VMware Photon OS/Linux'), dist) - - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_data(self, m_platform_dist, - m_platform_system, m_path_exists): - """Verify we get no information if os-release does not exist""" - m_platform_dist.return_value = ('', '', '') - m_platform_system.return_value = "Linux" - m_path_exists.return_value = 0 - dist = util.get_linux_distro() - self.assertEqual(('', '', ''), dist) - - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_impl(self, m_platform_dist, - m_platform_system, m_path_exists): - """Verify we get an empty tuple when no information exists and - Exceptions are not propagated""" - m_platform_dist.side_effect = Exception() - m_platform_system.return_value = "Linux" - m_path_exists.return_value = 0 - dist = util.get_linux_distro() - self.assertEqual(('', '', ''), dist) - - @mock.patch('platform.system') - @mock.patch('platform.dist', create=True) - def test_get_linux_distro_plat_data(self, m_platform_dist, - m_platform_system, m_path_exists): - """Verify we get the correct platform information""" - m_platform_dist.return_value = ('foo', '1.1', 'aarch64') - m_platform_system.return_value = "Linux" - m_path_exists.return_value = 0 - dist = util.get_linux_distro() - self.assertEqual(('foo', '1.1', 'aarch64'), dist) - - -class TestGetVariant: - @pytest.mark.parametrize('info, expected_variant', [ - ({'system': 'Linux', 'dist': ('almalinux',)}, 'almalinux'), - ({'system': 'linux', 'dist': ('alpine',)}, 'alpine'), - ({'system': 'linux', 'dist': ('arch',)}, 'arch'), - ({'system': 'linux', 'dist': ('centos',)}, 'centos'), - ({'system': 'linux', 'dist': ('cloudlinux',)}, 'cloudlinux'), - ({'system': 'linux', 'dist': ('debian',)}, 'debian'), - ({'system': 'linux', 'dist': ('eurolinux',)}, 'eurolinux'), - ({'system': 'linux', 'dist': ('fedora',)}, 'fedora'), - ({'system': 'linux', 'dist': ('openEuler',)}, 'openeuler'), - ({'system': 'linux', 'dist': ('photon',)}, 'photon'), - ({'system': 'linux', 'dist': ('rhel',)}, 'rhel'), - ({'system': 'linux', 'dist': ('rocky',)}, 'rocky'), - ({'system': 'linux', 'dist': ('suse',)}, 'suse'), - ({'system': 'linux', 'dist': ('virtuozzo',)}, 'virtuozzo'), - ({'system': 'linux', 'dist': ('ubuntu',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('linuxmint',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('mint',)}, 'ubuntu'), - ({'system': 'linux', 'dist': ('redhat',)}, 'rhel'), - ({'system': 'linux', 'dist': ('opensuse',)}, 'suse'), - ({'system': 'linux', 'dist': ('opensuse-tumbleweed',)}, 'suse'), - ({'system': 'linux', 'dist': ('opensuse-leap',)}, 'suse'), - ({'system': 'linux', 'dist': ('sles',)}, 'suse'), - ({'system': 'linux', 'dist': ('sle_hpc',)}, 'suse'), - ({'system': 'linux', 'dist': ('my_distro',)}, 'linux'), - ({'system': 'Windows', 'dist': ('dontcare',)}, 'windows'), - ({'system': 'Darwin', 'dist': ('dontcare',)}, 'darwin'), - ({'system': 'Freebsd', 'dist': ('dontcare',)}, 'freebsd'), - ({'system': 'Netbsd', 'dist': ('dontcare',)}, 'netbsd'), - ({'system': 'Openbsd', 'dist': ('dontcare',)}, 'openbsd'), - ({'system': 'Dragonfly', 'dist': ('dontcare',)}, 'dragonfly'), - ]) - def test_get_variant(self, info, expected_variant): - """Verify we get the correct variant name""" - assert util._get_variant(info) == expected_variant - - -class TestJsonDumps(CiTestCase): - def test_is_str(self): - """json_dumps should return a string.""" - self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) - - def test_utf8(self): - smiley = '\\ud83d\\ude03' - self.assertEqual( - {'smiley': smiley}, - json.loads(util.json_dumps({'smiley': smiley}))) - - def test_non_utf8(self): - blob = b'\xba\x03Qx-#y\xea' - self.assertEqual( - {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, - json.loads(util.json_dumps({'blob': blob}))) - - -@mock.patch('os.path.exists') -class TestIsLXD(CiTestCase): - - def test_is_lxd_true_on_sock_device(self, m_exists): - """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" - m_exists.return_value = True - self.assertTrue(util.is_lxd()) - m_exists.assert_called_once_with('/dev/lxd/sock') - - def test_is_lxd_false_when_sock_device_absent(self, m_exists): - """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" - m_exists.return_value = False - self.assertFalse(util.is_lxd()) - m_exists.assert_called_once_with('/dev/lxd/sock') - - -class TestReadCcFromCmdline: - - @pytest.mark.parametrize( - "cmdline,expected_cfg", - [ - # Return None if cmdline has no cc:end_cc content. - (CiTestCase.random_string(), None), - # Return None if YAML content is empty string. - ('foo cc: end_cc bar', None), - # Return expected dictionary without trailing end_cc marker. - ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}), - # Return expected dictionary w escaped newline and no end_cc. - ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}), - # Return expected dictionary of yaml between cc: and end_cc. - ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}), - # Return dict with list value w escaped newline, no end_cc. - ( - 'cc: ssh_import_id: [smoser, kirkland]\\n', - {'ssh_import_id': ['smoser', 'kirkland']} - ), - # Parse urlencoded brackets in yaml content. - ( - 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc', - {'ssh_import_id': ['smoser', 'kirkland']} - ), - # Parse complete urlencoded yaml content. - ( - 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc', - {'ssh_import_id': ['user1', 'user2']} - ), - # Parse nested dictionary in yaml content. - ( - 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc', - {'ntp': {'enabled': True, 'ntp_client': 'myclient'}} - ), - # Parse single mapping value in yaml content. - ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}), - # Parse multiline content with multiple mapping and nested lists. - ( - ('cc: ssh_import_id: [smoser, bob]\\n' - 'runcmd: [ [ ls, -l ], echo hi ] end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # Parse multiline encoded content w/ mappings and nested lists. - ( - ('cc: ssh_import_id: %5Bsmoser, bob%5D\\n' - 'runcmd: [ [ ls, -l ], echo hi ] end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # test encoded escaped newlines work. - # - # unquote(encoded_content) - # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]' - ( - ('cc: ' + - ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn' - 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' - '%20echo%20hi%20%5D') + ' end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # test encoded newlines work. - # - # unquote(encoded_content) - # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]' - ( - ("cc: " + - ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A' - 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' - '%20echo%20hi%20%5D') + ' end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l'], 'echo hi']} - ), - # Parse and merge multiple yaml content sections. - ( - ('cc:ssh_import_id: [smoser, bob] end_cc ' - 'cc: runcmd: [ [ ls, -l ] ] end_cc'), - {'ssh_import_id': ['smoser', 'bob'], - 'runcmd': [['ls', '-l']]} - ), - # Parse and merge multiple encoded yaml content sections. - ( - ('cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc ' - 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc'), - {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]} - ), - ] - ) - def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline): - assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline) - - -class TestMountCb: - """Tests for ``util.mount_cb``. - - These tests consider the "unit" under test to be ``util.mount_cb`` and - ``util.unmounter``, which is only used by ``mount_cb``. - - TODO: Test default mtype determination - TODO: Test the if/else branch that actually performs the mounting operation - """ - - @pytest.yield_fixture - def already_mounted_device_and_mountdict(self): - """Mock an already-mounted device, and yield (device, mount dict)""" - device = "/dev/fake0" - mountpoint = "/mnt/fake" - with mock.patch("cloudinit.util.subp.subp"): - with mock.patch("cloudinit.util.mounts") as m_mounts: - mounts = {device: {"mountpoint": mountpoint}} - m_mounts.return_value = mounts - yield device, mounts[device] - - @pytest.fixture - def already_mounted_device(self, already_mounted_device_and_mountdict): - """already_mounted_device_and_mountdict, but return only the device""" - return already_mounted_device_and_mountdict[0] - - @pytest.mark.parametrize( - "mtype,expected", - [ - # While the filesystem is called iso9660, the mount type is cd9660 - ("iso9660", "cd9660"), - # vfat is generally called "msdos" on BSD - ("vfat", "msdos"), - # judging from man pages, only FreeBSD has this alias - ("msdosfs", "msdos"), - # Test happy path - ("ufs", "ufs") - ], - ) - @mock.patch("cloudinit.util.is_Linux", autospec=True) - @mock.patch("cloudinit.util.is_BSD", autospec=True) - @mock.patch("cloudinit.util.subp.subp") - @mock.patch("cloudinit.temp_utils.tempdir", autospec=True) - def test_normalize_mtype_on_bsd( - self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected - ): - m_is_BSD.return_value = True - m_is_Linux.return_value = False - m_tmpdir.return_value.__enter__ = mock.Mock( - autospec=True, return_value="/tmp/fake" - ) - m_tmpdir.return_value.__exit__ = mock.Mock( - autospec=True, return_value=True - ) - callback = mock.Mock(autospec=True) - - util.mount_cb('/dev/fake0', callback, mtype=mtype) - assert mock.call( - ["mount", "-o", "ro", "-t", expected, "/dev/fake0", "/tmp/fake"], - update_env=None) in m_subp.call_args_list - - @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()]) - def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype): - with pytest.raises(TypeError): - util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype) - - @mock.patch("cloudinit.util.subp.subp") - def test_already_mounted_does_not_mount_or_umount_anything( - self, m_subp, already_mounted_device - ): - util.mount_cb(already_mounted_device, mock.Mock()) - - assert 0 == m_subp.call_count - - @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""]) - def test_already_mounted_calls_callback( - self, trailing_slash_in_mounts, already_mounted_device_and_mountdict - ): - device, mount_dict = already_mounted_device_and_mountdict - mountpoint = mount_dict["mountpoint"] - mount_dict["mountpoint"] += trailing_slash_in_mounts - - callback = mock.Mock() - util.mount_cb(device, callback) - - # The mountpoint passed to callback should always have a trailing - # slash, regardless of the input - assert [mock.call(mountpoint + "/")] == callback.call_args_list - - def test_already_mounted_calls_callback_with_data( - self, already_mounted_device - ): - callback = mock.Mock() - util.mount_cb( - already_mounted_device, callback, data=mock.sentinel.data - ) - - assert [ - mock.call(mock.ANY, mock.sentinel.data) - ] == callback.call_args_list - - -@mock.patch("cloudinit.util.write_file") -class TestEnsureFile: - """Tests for ``cloudinit.util.ensure_file``.""" - - def test_parameters_passed_through(self, m_write_file): - """Test the parameters in the signature are passed to write_file.""" - util.ensure_file( - mock.sentinel.path, - mode=mock.sentinel.mode, - preserve_mode=mock.sentinel.preserve_mode, - ) - - assert 1 == m_write_file.call_count - args, kwargs = m_write_file.call_args - assert (mock.sentinel.path,) == args - assert mock.sentinel.mode == kwargs["mode"] - assert mock.sentinel.preserve_mode == kwargs["preserve_mode"] - - @pytest.mark.parametrize( - "kwarg,expected", - [ - # Files should be world-readable by default - ("mode", 0o644), - # The previous behaviour of not preserving mode should be retained - ("preserve_mode", False), - ], - ) - def test_defaults(self, m_write_file, kwarg, expected): - """Test that ensure_file defaults appropriately.""" - util.ensure_file(mock.sentinel.path) - - assert 1 == m_write_file.call_count - _args, kwargs = m_write_file.call_args - assert expected == kwargs[kwarg] - - def test_static_parameters_are_passed(self, m_write_file): - """Test that the static write_files parameters are passed correctly.""" - util.ensure_file(mock.sentinel.path) - - assert 1 == m_write_file.call_count - _args, kwargs = m_write_file.call_args - assert "" == kwargs["content"] - assert "ab" == kwargs["omode"] - - -@mock.patch("cloudinit.util.grp.getgrnam") -@mock.patch("cloudinit.util.os.setgid") -@mock.patch("cloudinit.util.os.umask") -class TestRedirectOutputPreexecFn: - """This tests specifically the preexec_fn used in redirect_output.""" - - @pytest.fixture(params=["outfmt", "errfmt"]) - def preexec_fn(self, request): - """A fixture to gather the preexec_fn used by redirect_output. - - This enables simpler direct testing of it, and parameterises any tests - using it to cover both the stdout and stderr code paths. - """ - test_string = "| piped output to invoke subprocess" - if request.param == "outfmt": - args = (test_string, None) - elif request.param == "errfmt": - args = (None, test_string) - with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: - util.redirect_output(*args) - - assert 1 == m_popen.call_count - _args, kwargs = m_popen.call_args - assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" - return kwargs["preexec_fn"] - - def test_preexec_fn_sets_umask( - self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn - ): - """preexec_fn should set a mask that avoids world-readable files.""" - preexec_fn() - - assert [mock.call(0o037)] == m_os_umask.call_args_list - - def test_preexec_fn_sets_group_id_if_adm_group_present( - self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn - ): - """We should setgrp to adm if present, so files are owned by them.""" - fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) - m_getgrnam.return_value = fake_group - - preexec_fn() - - assert [mock.call("adm")] == m_getgrnam.call_args_list - assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list - - def test_preexec_fn_handles_absent_adm_group_gracefully( - self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn - ): - """We should handle an absent adm group gracefully.""" - m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") - - preexec_fn() - - assert 0 == m_setgid.call_count - -# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py deleted file mode 100644 index 778a762c..00000000 --- a/cloudinit/tests/test_version.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from unittest import mock - -from cloudinit.tests.helpers import CiTestCase -from cloudinit import version - - -class TestExportsFeatures(CiTestCase): - def test_has_network_config_v1(self): - self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) - - def test_has_network_config_v2(self): - self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) - - -class TestVersionString(CiTestCase): - @mock.patch("cloudinit.version._PACKAGED_VERSION", - "17.2-3-gb05b9972-0ubuntu1") - def test_package_version_respected(self): - """If _PACKAGED_VERSION is filled in, then it should be returned.""" - self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) - - @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") - @mock.patch("cloudinit.version.__VERSION__", "17.2") - def test_package_version_skipped(self): - """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" - self.assertEqual("17.2", version.version_string()) - - -# vi: ts=4 expandtab diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst index d882e036..7a1e3eec 100644 --- a/doc/rtd/topics/testing.rst +++ b/doc/rtd/topics/testing.rst @@ -3,8 +3,7 @@ Testing ******* cloud-init has both unit tests and integration tests. Unit tests can -be found in-tree alongside the source code, as well as -at ``tests/unittests``. Integration tests can be found at +be found at ``tests/unittests``. Integration tests can be found at ``tests/integration_tests``. Documentation specifically for integration tests can be found on the :ref:`integration_tests` page, but the guidelines specified below apply to both types of tests. @@ -36,6 +35,16 @@ Test Layout subclass (indirectly) from ``TestCase`` (e.g. `TestPrependBaseCommands`_) +* Unit tests and integration tests are located under cloud-init/tests + + * For consistency, unit test files should have a matching name and + directory location under `tests/unittests` + + * For example: the expected test file for code in + `cloudinit/path/to/file.py` is + `tests/unittests/path/to/test_file.py` + + ``pytest`` Tests ---------------- diff --git a/setup.py b/setup.py index 58fddf0f..100b07fe 100755 --- a/setup.py +++ b/setup.py @@ -291,7 +291,7 @@ setuptools.setup( author='Scott Moser', author_email='scott.moser@canonical.com', url='http://launchpad.net/cloud-init/', - packages=setuptools.find_packages(exclude=['tests.*', '*.tests', 'tests']), + packages=setuptools.find_packages(exclude=['tests.*', 'tests']), scripts=['tools/cloud-init-per'], license='Dual-licensed under GPLv3 or Apache 2.0', data_files=data_files, diff --git a/tests/unittests/analyze/test_boot.py b/tests/unittests/analyze/test_boot.py new file mode 100644 index 00000000..fd878b44 --- /dev/null +++ b/tests/unittests/analyze/test_boot.py @@ -0,0 +1,161 @@ +import os +from cloudinit.analyze.__main__ import (analyze_boot, get_parser) +from tests.unittests.helpers import CiTestCase, mock +from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \ + FAIL_CODE, CONTAINER_CODE + +err_code = (FAIL_CODE, -1, -1, -1) + + +class TestDistroChecker(CiTestCase): + + def test_blank_distro(self): + self.assertEqual(err_code, dist_check_timestamp()) + + @mock.patch('cloudinit.util.is_FreeBSD', return_value=True) + def test_freebsd_gentoo_cant_find(self, m_is_FreeBSD): + self.assertEqual(err_code, dist_check_timestamp()) + + @mock.patch('cloudinit.subp.subp', return_value=(0, 1)) + def test_subp_fails(self, m_subp): + self.assertEqual(err_code, dist_check_timestamp()) + + +class TestSystemCtlReader(CiTestCase): + + def test_systemctl_invalid_property(self): + reader = SystemctlReader('dummyProperty') + with self.assertRaises(RuntimeError): + reader.parse_epoch_as_float() + + def test_systemctl_invalid_parameter(self): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + with self.assertRaises(RuntimeError): + reader.parse_epoch_as_float() + + @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) + def test_systemctl_works_correctly_threshold(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + self.assertEqual(1.0, reader.parse_epoch_as_float()) + thresh = 1.0 - reader.parse_epoch_as_float() + self.assertTrue(thresh < 1e-6) + self.assertTrue(thresh > (-1 * 1e-6)) + + @mock.patch('cloudinit.subp.subp', return_value=('U=0', None)) + def test_systemctl_succeed_zero(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + self.assertEqual(0.0, reader.parse_epoch_as_float()) + + @mock.patch('cloudinit.subp.subp', return_value=('U=1', None)) + def test_systemctl_succeed_distinct(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + val1 = reader.parse_epoch_as_float() + m_subp.return_value = ('U=2', None) + reader2 = SystemctlReader('dummyProperty', 'dummyParameter') + val2 = reader2.parse_epoch_as_float() + self.assertNotEqual(val1, val2) + + @mock.patch('cloudinit.subp.subp', return_value=('100', None)) + def test_systemctl_epoch_not_splittable(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + with self.assertRaises(IndexError): + reader.parse_epoch_as_float() + + @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None)) + def test_systemctl_cannot_convert_epoch_to_float(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + with self.assertRaises(ValueError): + reader.parse_epoch_as_float() + + +class TestAnalyzeBoot(CiTestCase): + + def set_up_dummy_file_ci(self, path, log_path): + infh = open(path, 'w+') + infh.write('2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. ' + '19.1-1-gbaa47854-0ubuntu1~18.04.1 running \'init-local\' ' + 'at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds.') + infh.close() + outfh = open(log_path, 'w+') + outfh.close() + + def set_up_dummy_file(self, path, log_path): + infh = open(path, 'w+') + infh.write('dummy data') + infh.close() + outfh = open(log_path, 'w+') + outfh.close() + + def remove_dummy_file(self, path, log_path): + if os.path.isfile(path): + os.remove(path) + if os.path.isfile(log_path): + os.remove(log_path) + + @mock.patch('cloudinit.analyze.show.dist_check_timestamp', + return_value=err_code) + def test_boot_invalid_distro(self, m_dist_check_timestamp): + + path = os.path.dirname(os.path.abspath(__file__)) + log_path = path + '/boot-test.log' + path += '/dummy.log' + self.set_up_dummy_file(path, log_path) + + parser = get_parser() + args = parser.parse_args(args=['boot', '-i', path, '-o', + log_path]) + name_default = '' + analyze_boot(name_default, args) + # now args have been tested, go into outfile and make sure error + # message is in the outfile + outfh = open(args.outfile, 'r') + data = outfh.read() + err_string = 'Your Linux distro or container does not support this ' \ + 'functionality.\nYou must be running a Kernel ' \ + 'Telemetry supported distro.\nPlease check ' \ + 'https://cloudinit.readthedocs.io/en/latest/topics' \ + '/analyze.html for more information on supported ' \ + 'distros.\n' + + self.remove_dummy_file(path, log_path) + self.assertEqual(err_string, data) + + @mock.patch("cloudinit.util.is_container", return_value=True) + @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) + def test_container_no_ci_log_line(self, m_is_container, m_subp): + path = os.path.dirname(os.path.abspath(__file__)) + log_path = path + '/boot-test.log' + path += '/dummy.log' + self.set_up_dummy_file(path, log_path) + + parser = get_parser() + args = parser.parse_args(args=['boot', '-i', path, '-o', + log_path]) + name_default = '' + + finish_code = analyze_boot(name_default, args) + + self.remove_dummy_file(path, log_path) + self.assertEqual(FAIL_CODE, finish_code) + + @mock.patch("cloudinit.util.is_container", return_value=True) + @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None)) + @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{ + 'name': 'init-local', 'description': 'starting search', 'timestamp': + 100000}]) + @mock.patch('cloudinit.analyze.show.dist_check_timestamp', + return_value=(CONTAINER_CODE, 1, 1, 1)) + def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g): + path = os.path.dirname(os.path.abspath(__file__)) + log_path = path + '/boot-test.log' + path += '/dummy.log' + self.set_up_dummy_file_ci(path, log_path) + + parser = get_parser() + args = parser.parse_args(args=['boot', '-i', path, '-o', + log_path]) + name_default = '' + finish_code = analyze_boot(name_default, args) + + self.remove_dummy_file(path, log_path) + self.assertEqual(CONTAINER_CODE, finish_code) diff --git a/tests/unittests/analyze/test_dump.py b/tests/unittests/analyze/test_dump.py new file mode 100644 index 00000000..e3683bbf --- /dev/null +++ b/tests/unittests/analyze/test_dump.py @@ -0,0 +1,208 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from datetime import datetime +from textwrap import dedent + +from cloudinit.analyze.dump import ( + dump_events, parse_ci_logline, parse_timestamp) +from cloudinit.util import write_file +from cloudinit.subp import which +from tests.unittests.helpers import CiTestCase, mock, skipIf + + +class TestParseTimestamp(CiTestCase): + + def test_parse_timestamp_handles_cloud_init_default_format(self): + """Logs with cloud-init detailed formats will be properly parsed.""" + trusty_fmt = '%Y-%m-%d %H:%M:%S,%f' + trusty_stamp = '2016-09-12 14:39:20,839' + dt = datetime.strptime(trusty_stamp, trusty_fmt) + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(trusty_stamp)) + + def test_parse_timestamp_handles_syslog_adding_year(self): + """Syslog timestamps lack a year. Add year and properly parse.""" + syslog_fmt = '%b %d %H:%M:%S %Y' + syslog_stamp = 'Aug 08 15:12:51' + + # convert stamp ourselves by adding the missing year value + year = datetime.now().year + dt = datetime.strptime(syslog_stamp + " " + str(year), syslog_fmt) + self.assertEqual( + float(dt.strftime('%s.%f')), + parse_timestamp(syslog_stamp)) + + def test_parse_timestamp_handles_journalctl_format_adding_year(self): + """Journalctl precise timestamps lack a year. Add year and parse.""" + journal_fmt = '%b %d %H:%M:%S.%f %Y' + journal_stamp = 'Aug 08 17:15:50.606811' + + # convert stamp ourselves by adding the missing year value + year = datetime.now().year + dt = datetime.strptime(journal_stamp + " " + str(year), journal_fmt) + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(journal_stamp)) + + @skipIf(not which("date"), "'date' command not available.") + def test_parse_unexpected_timestamp_format_with_date_command(self): + """Dump sends unexpected timestamp formats to date for processing.""" + new_fmt = '%H:%M %m/%d %Y' + new_stamp = '17:15 08/08' + # convert stamp ourselves by adding the missing year value + year = datetime.now().year + dt = datetime.strptime(new_stamp + " " + str(year), new_fmt) + + # use date(1) + with self.allow_subp(["date"]): + self.assertEqual( + float(dt.strftime('%s.%f')), parse_timestamp(new_stamp)) + + +class TestParseCILogLine(CiTestCase): + + def test_parse_logline_returns_none_without_separators(self): + """When no separators are found, parse_ci_logline returns None.""" + expected_parse_ignores = [ + '', '-', 'adsf-asdf', '2017-05-22 18:02:01,088', 'CLOUDINIT'] + for parse_ignores in expected_parse_ignores: + self.assertIsNone(parse_ci_logline(parse_ignores)) + + def test_parse_logline_returns_event_for_cloud_init_logs(self): + """parse_ci_logline returns an event parse from cloud-init format.""" + line = ( + "2017-08-08 20:05:07,147 - util.py[DEBUG]: Cloud-init v. 0.7.9" + " running 'init-local' at Tue, 08 Aug 2017 20:05:07 +0000. Up" + " 6.26 seconds.") + dt = datetime.strptime( + '2017-08-08 20:05:07,147', '%Y-%m-%d %H:%M:%S,%f') + timestamp = float(dt.strftime('%s.%f')) + expected = { + 'description': 'starting search for local datasources', + 'event_type': 'start', + 'name': 'init-local', + 'origin': 'cloudinit', + 'timestamp': timestamp} + self.assertEqual(expected, parse_ci_logline(line)) + + def test_parse_logline_returns_event_for_journalctl_logs(self): + """parse_ci_logline returns an event parse from journalctl format.""" + line = ("Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT]" + " util.py[DEBUG]: Cloud-init v. 0.7.8 running 'init-local' at" + " Thu, 03 Nov 2016 06:51:06 +0000. Up 1.0 seconds.") + year = datetime.now().year + dt = datetime.strptime( + 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') + timestamp = float(dt.strftime('%s.%f')) + expected = { + 'description': 'starting search for local datasources', + 'event_type': 'start', + 'name': 'init-local', + 'origin': 'cloudinit', + 'timestamp': timestamp} + self.assertEqual(expected, parse_ci_logline(line)) + + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_parse_logline_returns_event_for_finish_events(self, + m_parse_from_date): + """parse_ci_logline returns a finish event for a parsed log line.""" + line = ('2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT]' + ' handlers.py[DEBUG]: finish: modules-final: SUCCESS: running' + ' modules for final') + expected = { + 'description': 'running modules for final', + 'event_type': 'finish', + 'name': 'modules-final', + 'origin': 'cloudinit', + 'result': 'SUCCESS', + 'timestamp': 1472594005.972} + m_parse_from_date.return_value = "1472594005.972" + self.assertEqual(expected, parse_ci_logline(line)) + m_parse_from_date.assert_has_calls( + [mock.call("2016-08-30 21:53:25.972325+00:00")]) + + def test_parse_logline_returns_event_for_amazon_linux_2_line(self): + line = ( + "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:" + " init-local/check-cache: attempting to read from cache [check]") + # Generate the expected value using `datetime`, so that TZ + # determination is consistent with the code under test. + timestamp_dt = datetime.strptime( + "Apr 30 19:39:11", "%b %d %H:%M:%S" + ).replace(year=datetime.now().year) + expected = { + 'description': 'attempting to read from cache [check]', + 'event_type': 'start', + 'name': 'init-local/check-cache', + 'origin': 'cloudinit', + 'timestamp': timestamp_dt.timestamp()} + self.assertEqual(expected, parse_ci_logline(line)) + + +SAMPLE_LOGS = dedent("""\ +Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\ + Cloud-init v. 0.7.8 running 'init-local' at Thu, 03 Nov 2016\ + 06:51:06 +0000. Up 1.0 seconds. +2016-08-30 21:53:25.972325+00:00 y1 [CLOUDINIT] handlers.py[DEBUG]: finish:\ + modules-final: SUCCESS: running modules for final +""") + + +class TestDumpEvents(CiTestCase): + maxDiff = None + + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_dump_events_with_rawdata(self, m_parse_from_date): + """Rawdata is split and parsed into a tuple of events and data""" + m_parse_from_date.return_value = "1472594005.972" + events, data = dump_events(rawdata=SAMPLE_LOGS) + expected_data = SAMPLE_LOGS.splitlines() + self.assertEqual( + [mock.call("2016-08-30 21:53:25.972325+00:00")], + m_parse_from_date.call_args_list) + self.assertEqual(expected_data, data) + year = datetime.now().year + dt1 = datetime.strptime( + 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') + timestamp1 = float(dt1.strftime('%s.%f')) + expected_events = [{ + 'description': 'starting search for local datasources', + 'event_type': 'start', + 'name': 'init-local', + 'origin': 'cloudinit', + 'timestamp': timestamp1}, { + 'description': 'running modules for final', + 'event_type': 'finish', + 'name': 'modules-final', + 'origin': 'cloudinit', + 'result': 'SUCCESS', + 'timestamp': 1472594005.972}] + self.assertEqual(expected_events, events) + + @mock.patch("cloudinit.analyze.dump.parse_timestamp_from_date") + def test_dump_events_with_cisource(self, m_parse_from_date): + """Cisource file is read and parsed into a tuple of events and data.""" + tmpfile = self.tmp_path('logfile') + write_file(tmpfile, SAMPLE_LOGS) + m_parse_from_date.return_value = 1472594005.972 + + events, data = dump_events(cisource=open(tmpfile)) + year = datetime.now().year + dt1 = datetime.strptime( + 'Nov 03 06:51:06.074410 %d' % year, '%b %d %H:%M:%S.%f %Y') + timestamp1 = float(dt1.strftime('%s.%f')) + expected_events = [{ + 'description': 'starting search for local datasources', + 'event_type': 'start', + 'name': 'init-local', + 'origin': 'cloudinit', + 'timestamp': timestamp1}, { + 'description': 'running modules for final', + 'event_type': 'finish', + 'name': 'modules-final', + 'origin': 'cloudinit', + 'result': 'SUCCESS', + 'timestamp': 1472594005.972}] + self.assertEqual(expected_events, events) + self.assertEqual(SAMPLE_LOGS.splitlines(), [d.strip() for d in data]) + m_parse_from_date.assert_has_calls( + [mock.call("2016-08-30 21:53:25.972325+00:00")]) diff --git a/tests/unittests/cloudinit/__init__py b/tests/unittests/cloudinit/__init__py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/cmd/__init__.py b/tests/unittests/cmd/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/cmd/devel/__init__.py b/tests/unittests/cmd/devel/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py new file mode 100644 index 00000000..18bdcdda --- /dev/null +++ b/tests/unittests/cmd/devel/test_logs.py @@ -0,0 +1,167 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from datetime import datetime +import os +from io import StringIO + +from cloudinit.cmd.devel import logs +from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from tests.unittests.helpers import ( + FilesystemMockingTestCase, mock, wrap_and_call) +from cloudinit.subp import subp +from cloudinit.util import ensure_dir, load_file, write_file + + +@mock.patch('cloudinit.cmd.devel.logs.os.getuid') +class TestCollectLogs(FilesystemMockingTestCase): + + def setUp(self): + super(TestCollectLogs, self).setUp() + self.new_root = self.tmp_dir() + self.run_dir = self.tmp_path('run', self.new_root) + + def test_collect_logs_with_userdata_requires_root_user(self, m_getuid): + """collect-logs errors when non-root user collects userdata .""" + m_getuid.return_value = 100 # non-root + output_tarfile = self.tmp_path('logs.tgz') + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + self.assertEqual( + 1, logs.collect_logs(output_tarfile, include_userdata=True)) + self.assertEqual( + 'To include userdata, root user is required.' + ' Try sudo cloud-init collect-logs\n', + m_stderr.getvalue()) + + def test_collect_logs_creates_tarfile(self, m_getuid): + """collect-logs creates a tarfile with all related cloud-init info.""" + m_getuid.return_value = 100 + log1 = self.tmp_path('cloud-init.log', self.new_root) + write_file(log1, 'cloud-init-log') + log2 = self.tmp_path('cloud-init-output.log', self.new_root) + write_file(log2, 'cloud-init-output-log') + ensure_dir(self.run_dir) + write_file(self.tmp_path('results.json', self.run_dir), 'results') + write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), + 'sensitive') + output_tarfile = self.tmp_path('logs.tgz') + + date = datetime.utcnow().date().strftime('%Y-%m-%d') + date_logdir = 'cloud-init-logs-{0}'.format(date) + + version_out = '/usr/bin/cloud-init 18.2fake\n' + expected_subp = { + ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): + '0.7fake\n', + ('cloud-init', '--version'): version_out, + ('dmesg',): 'dmesg-out\n', + ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', + ('tar', 'czvf', output_tarfile, date_logdir): '' + } + + def fake_subp(cmd): + cmd_tuple = tuple(cmd) + if cmd_tuple not in expected_subp: + raise AssertionError( + 'Unexpected command provided to subp: {0}'.format(cmd)) + if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: + subp(cmd) # Pass through tar cmd so we can check output + return expected_subp[cmd_tuple], '' + + fake_stderr = mock.MagicMock() + + wrap_and_call( + 'cloudinit.cmd.devel.logs', + {'subp': {'side_effect': fake_subp}, + 'sys.stderr': {'new': fake_stderr}, + 'CLOUDINIT_LOGS': {'new': [log1, log2]}, + 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}}, + logs.collect_logs, output_tarfile, include_userdata=False) + # unpack the tarfile and check file contents + subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) + out_logdir = self.tmp_path(date_logdir, self.new_root) + self.assertFalse( + os.path.exists( + os.path.join(out_logdir, 'run', 'cloud-init', + INSTANCE_JSON_SENSITIVE_FILE)), + 'Unexpected file found: %s' % INSTANCE_JSON_SENSITIVE_FILE) + self.assertEqual( + '0.7fake\n', + load_file(os.path.join(out_logdir, 'dpkg-version'))) + self.assertEqual(version_out, + load_file(os.path.join(out_logdir, 'version'))) + self.assertEqual( + 'cloud-init-log', + load_file(os.path.join(out_logdir, 'cloud-init.log'))) + self.assertEqual( + 'cloud-init-output-log', + load_file(os.path.join(out_logdir, 'cloud-init-output.log'))) + self.assertEqual( + 'dmesg-out\n', + load_file(os.path.join(out_logdir, 'dmesg.txt'))) + self.assertEqual( + 'journal-out\n', + load_file(os.path.join(out_logdir, 'journal.txt'))) + self.assertEqual( + 'results', + load_file( + os.path.join(out_logdir, 'run', 'cloud-init', 'results.json'))) + fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) + + def test_collect_logs_includes_optional_userdata(self, m_getuid): + """collect-logs include userdata when --include-userdata is set.""" + m_getuid.return_value = 0 + log1 = self.tmp_path('cloud-init.log', self.new_root) + write_file(log1, 'cloud-init-log') + log2 = self.tmp_path('cloud-init-output.log', self.new_root) + write_file(log2, 'cloud-init-output-log') + userdata = self.tmp_path('user-data.txt', self.new_root) + write_file(userdata, 'user-data') + ensure_dir(self.run_dir) + write_file(self.tmp_path('results.json', self.run_dir), 'results') + write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir), + 'sensitive') + output_tarfile = self.tmp_path('logs.tgz') + + date = datetime.utcnow().date().strftime('%Y-%m-%d') + date_logdir = 'cloud-init-logs-{0}'.format(date) + + version_out = '/usr/bin/cloud-init 18.2fake\n' + expected_subp = { + ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'): + '0.7fake', + ('cloud-init', '--version'): version_out, + ('dmesg',): 'dmesg-out\n', + ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n', + ('tar', 'czvf', output_tarfile, date_logdir): '' + } + + def fake_subp(cmd): + cmd_tuple = tuple(cmd) + if cmd_tuple not in expected_subp: + raise AssertionError( + 'Unexpected command provided to subp: {0}'.format(cmd)) + if cmd == ['tar', 'czvf', output_tarfile, date_logdir]: + subp(cmd) # Pass through tar cmd so we can check output + return expected_subp[cmd_tuple], '' + + fake_stderr = mock.MagicMock() + + wrap_and_call( + 'cloudinit.cmd.devel.logs', + {'subp': {'side_effect': fake_subp}, + 'sys.stderr': {'new': fake_stderr}, + 'CLOUDINIT_LOGS': {'new': [log1, log2]}, + 'CLOUDINIT_RUN_DIR': {'new': self.run_dir}, + 'USER_DATA_FILE': {'new': userdata}}, + logs.collect_logs, output_tarfile, include_userdata=True) + # unpack the tarfile and check file contents + subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root]) + out_logdir = self.tmp_path(date_logdir, self.new_root) + self.assertEqual( + 'user-data', + load_file(os.path.join(out_logdir, 'user-data.txt'))) + self.assertEqual( + 'sensitive', + load_file(os.path.join(out_logdir, 'run', 'cloud-init', + INSTANCE_JSON_SENSITIVE_FILE))) + fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile) diff --git a/tests/unittests/cmd/devel/test_render.py b/tests/unittests/cmd/devel/test_render.py new file mode 100644 index 00000000..c7ddca3d --- /dev/null +++ b/tests/unittests/cmd/devel/test_render.py @@ -0,0 +1,144 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os +from io import StringIO + +from collections import namedtuple +from cloudinit.cmd.devel import render +from cloudinit.helpers import Paths +from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJinja +from cloudinit.util import ensure_dir, write_file + + +class TestRender(CiTestCase): + + with_logs = True + + args = namedtuple('renderargs', 'user_data instance_data debug') + + def setUp(self): + super(TestRender, self).setUp() + self.tmp = self.tmp_dir() + + def test_handle_args_error_on_missing_user_data(self): + """When user_data file path does not exist, log an error.""" + absent_file = self.tmp_path('user-data', dir=self.tmp) + instance_data = self.tmp_path('instance-data', dir=self.tmp) + write_file(instance_data, '{}') + args = self.args( + user_data=absent_file, instance_data=instance_data, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + self.assertIn( + 'Missing user-data file: %s' % absent_file, + self.logs.getvalue()) + + def test_handle_args_error_on_missing_instance_data(self): + """When instance_data file path does not exist, log an error.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + absent_file = self.tmp_path('instance-data', dir=self.tmp) + args = self.args( + user_data=user_data, instance_data=absent_file, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + self.assertIn( + 'Missing instance-data.json file: %s' % absent_file, + self.logs.getvalue()) + + def test_handle_args_defaults_instance_data(self): + """When no instance_data argument, default to configured run_dir.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + run_dir = self.tmp_path('run_dir', dir=self.tmp) + ensure_dir(run_dir) + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + args = self.args( + user_data=user_data, instance_data=None, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) + self.assertIn( + 'Missing instance-data.json file: %s' % json_file, + self.logs.getvalue()) + + def test_handle_args_root_fallback_from_sensitive_instance_data(self): + """When root user defaults to sensitive.json.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + run_dir = self.tmp_path('run_dir', dir=self.tmp) + ensure_dir(run_dir) + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + args = self.args( + user_data=user_data, instance_data=None, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + self.assertEqual(1, render.handle_args('anyname', args)) + json_file = os.path.join(run_dir, INSTANCE_JSON_FILE) + json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) + self.assertIn( + 'WARNING: Missing root-readable %s. Using redacted %s' % ( + json_sensitive, json_file), self.logs.getvalue()) + self.assertIn( + 'ERROR: Missing instance-data.json file: %s' % json_file, + self.logs.getvalue()) + + def test_handle_args_root_uses_sensitive_instance_data(self): + """When root user, and no instance-data arg, use sensitive.json.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') + run_dir = self.tmp_path('run_dir', dir=self.tmp) + ensure_dir(run_dir) + json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE) + write_file(json_sensitive, '{"my-var": "jinja worked"}') + paths = Paths({'run_dir': run_dir}) + self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths') + self.m_paths.return_value = paths + args = self.args( + user_data=user_data, instance_data=None, debug=False) + with mock.patch('sys.stderr', new_callable=StringIO): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + self.assertEqual(0, render.handle_args('anyname', args)) + self.assertIn('rendering: jinja worked', m_stdout.getvalue()) + + @skipUnlessJinja() + def test_handle_args_renders_instance_data_vars_in_template(self): + """If user_data file is a jinja template render instance-data vars.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + write_file(user_data, '##template: jinja\nrendering: {{ my_var }}') + instance_data = self.tmp_path('instance-data', dir=self.tmp) + write_file(instance_data, '{"my-var": "jinja worked"}') + args = self.args( + user_data=user_data, instance_data=instance_data, debug=True) + with mock.patch('sys.stderr', new_callable=StringIO) as m_console_err: + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + self.assertEqual(0, render.handle_args('anyname', args)) + self.assertIn( + 'DEBUG: Converted jinja variables\n{', self.logs.getvalue()) + self.assertIn( + 'DEBUG: Converted jinja variables\n{', m_console_err.getvalue()) + self.assertEqual('rendering: jinja worked', m_stdout.getvalue()) + + @skipUnlessJinja() + def test_handle_args_warns_and_gives_up_on_invalid_jinja_operation(self): + """If user_data file has invalid jinja operations log warnings.""" + user_data = self.tmp_path('user-data', dir=self.tmp) + write_file(user_data, '##template: jinja\nrendering: {{ my-var }}') + instance_data = self.tmp_path('instance-data', dir=self.tmp) + write_file(instance_data, '{"my-var": "jinja worked"}') + args = self.args( + user_data=user_data, instance_data=instance_data, debug=True) + with mock.patch('sys.stderr', new_callable=StringIO): + self.assertEqual(1, render.handle_args('anyname', args)) + self.assertIn( + 'WARNING: Ignoring jinja template for %s: Undefined jinja' + ' variable: "my-var". Jinja tried subtraction. Perhaps you meant' + ' "my_var"?' % user_data, + self.logs.getvalue()) + +# vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py new file mode 100644 index 00000000..81fc930e --- /dev/null +++ b/tests/unittests/cmd/test_clean.py @@ -0,0 +1,178 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.cmd import clean +from cloudinit.util import ensure_dir, sym_link, write_file +from tests.unittests.helpers import CiTestCase, wrap_and_call, mock +from collections import namedtuple +import os +from io import StringIO + +mypaths = namedtuple('MyPaths', 'cloud_dir') + + +class TestClean(CiTestCase): + + def setUp(self): + super(TestClean, self).setUp() + self.new_root = self.tmp_dir() + self.artifact_dir = self.tmp_path('artifacts', self.new_root) + self.log1 = self.tmp_path('cloud-init.log', self.new_root) + self.log2 = self.tmp_path('cloud-init-output.log', self.new_root) + + class FakeInit(object): + cfg = {'def_log_file': self.log1, + 'output': {'all': '|tee -a {0}'.format(self.log2)}} + # Ensure cloud_dir has a trailing slash, to match real behaviour + paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir)) + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test_remove_artifacts_removes_logs(self): + """remove_artifacts removes logs when remove_logs is True.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + self.assertFalse( + os.path.exists(self.artifact_dir), 'Unexpected artifacts dir') + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=True) + self.assertFalse(os.path.exists(self.log1), 'Unexpected file') + self.assertFalse(os.path.exists(self.log2), 'Unexpected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_preserves_logs(self): + """remove_artifacts leaves logs when remove_logs is False.""" + write_file(self.log1, 'cloud-init-log') + write_file(self.log2, 'cloud-init-output-log') + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertTrue(os.path.exists(self.log1), 'Missing expected file') + self.assertTrue(os.path.exists(self.log2), 'Missing expected file') + self.assertEqual(0, retcode) + + def test_remove_artifacts_removes_unlinks_symlinks(self): + """remove_artifacts cleans artifacts dir unlinking any symlinks.""" + dir1 = os.path.join(self.artifact_dir, 'dir1') + ensure_dir(dir1) + symlink = os.path.join(self.artifact_dir, 'mylink') + sym_link(dir1, symlink) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(0, retcode) + for path in (dir1, symlink): + self.assertFalse( + os.path.exists(path), + 'Unexpected {0} dir'.format(path)) + + def test_remove_artifacts_removes_artifacts_skipping_seed(self): + """remove_artifacts cleans artifacts dir with exception of seed dir.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(0, retcode) + for expected_dir in dirs[:2]: + self.assertTrue( + os.path.exists(expected_dir), + 'Missing {0} dir'.format(expected_dir)) + for deleted_dir in dirs[2:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_removes_artifacts_removes_seed(self): + """remove_artifacts removes seed dir when remove_seed is True.""" + dirs = [ + self.artifact_dir, + os.path.join(self.artifact_dir, 'seed'), + os.path.join(self.artifact_dir, 'dir1'), + os.path.join(self.artifact_dir, 'dir2')] + for _dir in dirs: + ensure_dir(_dir) + + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False, remove_seed=True) + self.assertEqual(0, retcode) + self.assertTrue( + os.path.exists(self.artifact_dir), 'Missing artifact dir') + for deleted_dir in dirs[1:]: + self.assertFalse( + os.path.exists(deleted_dir), + 'Unexpected {0} dir'.format(deleted_dir)) + + def test_remove_artifacts_returns_one_on_errors(self): + """remove_artifacts returns non-zero on failure and prints an error.""" + ensure_dir(self.artifact_dir) + ensure_dir(os.path.join(self.artifact_dir, 'dir1')) + + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'del_dir': {'side_effect': OSError('oops')}, + 'Init': {'side_effect': self.init_class}}, + clean.remove_artifacts, remove_logs=False) + self.assertEqual(1, retcode) + self.assertEqual( + 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, + m_stderr.getvalue()) + + def test_handle_clean_args_reboots(self): + """handle_clean_args_reboots when reboot arg is provided.""" + + called_cmds = [] + + def fake_subp(cmd, capture): + called_cmds.append((cmd, capture)) + return '', '' + + myargs = namedtuple('MyArgs', 'remove_logs remove_seed reboot') + cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True) + retcode = wrap_and_call( + 'cloudinit.cmd.clean', + {'subp': {'side_effect': fake_subp}, + 'Init': {'side_effect': self.init_class}}, + clean.handle_clean_args, name='does not matter', args=cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [(['shutdown', '-r', 'now'], False)], called_cmds) + + def test_status_main(self): + '''clean.main can be run as a standalone script.''' + write_file(self.log1, 'cloud-init-log') + with self.assertRaises(SystemExit) as context_manager: + wrap_and_call( + 'cloudinit.cmd.clean', + {'Init': {'side_effect': self.init_class}, + 'sys.argv': {'new': ['clean', '--logs']}}, + clean.main) + + self.assertEqual(0, context_manager.exception.code) + self.assertFalse( + os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) + + +# vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py new file mode 100644 index 00000000..12fc80e8 --- /dev/null +++ b/tests/unittests/cmd/test_cloud_id.py @@ -0,0 +1,127 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloud-id command line utility.""" + +from cloudinit import util +from collections import namedtuple +from io import StringIO + +from cloudinit.cmd import cloud_id + +from tests.unittests.helpers import CiTestCase, mock + + +class TestCloudId(CiTestCase): + + args = namedtuple('cloudidargs', ('instance_data json long')) + + def setUp(self): + super(TestCloudId, self).setUp() + self.tmp = self.tmp_dir() + self.instance_data = self.tmp_path('instance-data.json', dir=self.tmp) + + def test_cloud_id_arg_parser_defaults(self): + """Validate the argument defaults when not provided by the end-user.""" + cmd = ['cloud-id'] + with mock.patch('sys.argv', cmd): + args = cloud_id.get_parser().parse_args() + self.assertEqual( + '/run/cloud-init/instance-data.json', + args.instance_data) + self.assertEqual(False, args.long) + self.assertEqual(False, args.json) + + def test_cloud_id_arg_parse_overrides(self): + """Override argument defaults by specifying values for each param.""" + util.write_file(self.instance_data, '{}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long', + '--json'] + with mock.patch('sys.argv', cmd): + args = cloud_id.get_parser().parse_args() + self.assertEqual(self.instance_data, args.instance_data) + self.assertEqual(True, args.long) + self.assertEqual(True, args.json) + + def test_cloud_id_missing_instance_data_json(self): + """Exit error when the provided instance-data.json does not exist.""" + cmd = ['cloud-id', '--instance-data', self.instance_data] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(1, context_manager.exception.code) + self.assertIn( + "ERROR: File not found '%s'" % self.instance_data, + m_stderr.getvalue()) + + def test_cloud_id_non_json_instance_data(self): + """Exit error when the provided instance-data.json is not json.""" + cmd = ['cloud-id', '--instance-data', self.instance_data] + util.write_file(self.instance_data, '{') + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(1, context_manager.exception.code) + self.assertIn( + "ERROR: File '%s' is not valid json." % self.instance_data, + m_stderr.getvalue()) + + def test_cloud_id_from_cloud_name_in_instance_data(self): + """Report canonical cloud-id from cloud_name in instance-data.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("mycloud\n", m_stdout.getvalue()) + + def test_cloud_id_long_name_from_instance_data(self): + """Report long cloud-id format from cloud_name and region.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("mycloud\tsomereg\n", m_stdout.getvalue()) + + def test_cloud_id_lookup_from_instance_data_region(self): + """Report discovered canonical cloud_id when region lookup matches.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "aws", "region": "cn-north-1",' + ' "platform": "ec2"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("aws-china\tcn-north-1\n", m_stdout.getvalue()) + + def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(self): + """Report v1 instance-data content with cloud_id when --json set.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "unknown", "region": "dfw",' + ' "platform": "openstack", "public_ssh_keys": []}}') + expected = util.json_dumps({ + 'cloud_id': 'openstack', 'cloud_name': 'unknown', + 'platform': 'openstack', 'public_ssh_keys': [], 'region': 'dfw'}) + cmd = ['cloud-id', '--instance-data', self.instance_data, '--json'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual(expected + '\n', m_stdout.getvalue()) + +# vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py new file mode 100644 index 00000000..e1ce682b --- /dev/null +++ b/tests/unittests/cmd/test_main.py @@ -0,0 +1,188 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import copy +import os +from io import StringIO +from unittest import mock + +import pytest + +from cloudinit.cmd import main +from cloudinit import safeyaml +from cloudinit.util import ( + ensure_dir, load_file, write_file) +from tests.unittests.helpers import ( + FilesystemMockingTestCase, wrap_and_call) + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') + + +class TestMain(FilesystemMockingTestCase): + with_logs = True + allowed_subp = False + + def setUp(self): + super(TestMain, self).setUp() + self.new_root = self.tmp_dir() + self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) + os.makedirs(self.cloud_dir) + self.replicateTestRoot('simple_ubuntu', self.new_root) + self.cfg = { + 'datasource_list': ['None'], + 'runcmd': ['ls /etc'], # test ALL_DISTROS + 'system_info': {'paths': {'cloud_dir': self.cloud_dir, + 'run_dir': self.new_root}}, + 'write_files': [ + { + 'path': '/etc/blah.ini', + 'content': 'blah', + 'permissions': 0o755, + }, + ], + 'cloud_init_modules': ['write-files', 'runcmd'], + } + cloud_cfg = safeyaml.dumps(self.cfg) + ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + self.cloud_cfg_file = os.path.join( + self.new_root, 'etc', 'cloud', 'cloud.cfg') + write_file(self.cloud_cfg_file, cloud_cfg) + self.patchOS(self.new_root) + self.patchUtils(self.new_root) + self.stderr = StringIO() + self.patchStdoutAndStderr(stderr=self.stderr) + + def test_main_init_run_net_stops_on_file_no_net(self): + """When no-net file is present, main_init does not process modules.""" + stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file + write_file(stop_file, '') + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + (_item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + # We should not run write_files module + self.assertFalse( + os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), + 'Unexpected run of write_files module produced blah.ini') + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertFalse( + os.path.exists(os.path.join(self.new_root, instance_id_path)), + 'Unexpected call to datasource.instancify produced instance-id') + expected_logs = [ + "Exiting. stop file ['{stop_file}'] existed\n".format( + stop_file=stop_file), + 'my net debug info' # netinfo.debug_info + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + + def test_main_init_run_net_runs_modules(self): + """Modules like write_files are run in 'net' mode.""" + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + (_item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertEqual( + 'iid-datasource-none\n', + os.path.join(load_file( + os.path.join(self.new_root, instance_id_path)))) + # modules are run (including write_files) + self.assertEqual( + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + expected_logs = [ + 'network config is disabled by fallback', # apply_network_config + 'my net debug info', # netinfo.debug_info + 'no previous run detected' + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + + def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): + """When local-hostname metadata is present, call cc_set_hostname.""" + self.cfg['datasource'] = { + 'None': {'metadata': {'local-hostname': 'md-hostname'}}} + cloud_cfg = safeyaml.dumps(self.cfg) + write_file(self.cloud_cfg_file, cloud_cfg) + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + + def set_hostname(name, cfg, cloud, log, args): + self.assertEqual('set-hostname', name) + updated_cfg = copy.deepcopy(self.cfg) + updated_cfg.update( + {'def_log_file': '/var/log/cloud-init.log', + 'log_cfgs': [], + 'syslog_fix_perms': [ + 'syslog:adm', 'root:adm', 'root:wheel', 'root:root' + ], + 'vendor_data': {'enabled': True, 'prefix': []}, + 'vendor_data2': {'enabled': True, 'prefix': []}}) + updated_cfg.pop('system_info') + + self.assertEqual(updated_cfg, cfg) + self.assertEqual(main.LOG, log) + self.assertIsNone(args) + + (_item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'cc_set_hostname.handle': {'side_effect': set_hostname}, + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertEqual( + 'iid-datasource-none\n', + os.path.join(load_file( + os.path.join(self.new_root, instance_id_path)))) + # modules are run (including write_files) + self.assertEqual( + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + expected_logs = [ + 'network config is disabled by fallback', # apply_network_config + 'my net debug info', # netinfo.debug_info + 'no previous run detected' + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + + +class TestShouldBringUpInterfaces: + @pytest.mark.parametrize('cfg_disable,args_local,expected', [ + (True, True, False), + (True, False, False), + (False, True, False), + (False, False, True), + ]) + def test_should_bring_up_interfaces( + self, cfg_disable, args_local, expected + ): + init = mock.Mock() + init.cfg = {'disable_network_activation': cfg_disable} + + args = mock.Mock() + args.local = args_local + + result = main._should_bring_up_interfaces(init, args) + assert result == expected + +# vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py new file mode 100644 index 00000000..b3f1d98d --- /dev/null +++ b/tests/unittests/cmd/test_query.py @@ -0,0 +1,392 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import errno +import gzip +from io import BytesIO +import json +from textwrap import dedent + +import pytest + +from collections import namedtuple +from cloudinit.cmd import query +from cloudinit.helpers import Paths +from cloudinit.sources import ( + REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE) +from tests.unittests.helpers import mock + +from cloudinit.util import b64e, write_file + + +def _gzip_data(data): + with BytesIO() as iobuf: + with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp: + gzfp.write(data) + return iobuf.getvalue() + + +@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "") +class TestQuery: + + args = namedtuple( + 'queryargs', + ('debug dump_all format instance_data list_keys user_data vendor_data' + ' varname')) + + def _setup_paths(self, tmpdir, ud_val=None, vd_val=None): + """Write userdata and vendordata into a tmpdir. + + Return: + 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path) + """ + if ud_val: + user_data = tmpdir.join('user-data') + write_file(user_data.strpath, ud_val) + else: + user_data = None + if vd_val: + vendor_data = tmpdir.join('vendor-data') + write_file(vendor_data.strpath, vd_val) + else: + vendor_data = None + run_dir = tmpdir.join('run_dir') + run_dir.ensure_dir() + return ( + Paths({'run_dir': run_dir.strpath}), + run_dir, + user_data, + vendor_data + ) + + def test_handle_args_error_on_missing_param(self, caplog, capsys): + """Error when missing required parameters and print usage.""" + args = self.args( + debug=False, dump_all=False, format=None, instance_data=None, + list_keys=False, user_data=None, vendor_data=None, varname=None) + with mock.patch( + "cloudinit.cmd.query.addLogHandlerCLI", return_value="" + ) as m_cli_log: + assert 1 == query.handle_args('anyname', args) + expected_error = ( + 'Expected one of the options: --all, --format, --list-keys' + ' or varname\n') + assert expected_error in caplog.text + out, _err = capsys.readouterr() + assert 'usage: query' in out + assert 1 == m_cli_log.call_count + + @pytest.mark.parametrize( + "inst_data,varname,expected_error", ( + ( + '{"v1": {"key-2": "value-2"}}', + 'v1.absent_leaf', + "instance-data 'v1' has no 'absent_leaf'\n" + ), + ( + '{"v1": {"key-2": "value-2"}}', + 'absent_key', + "Undefined instance-data key 'absent_key'\n" + ), + ) + ) + def test_handle_args_error_on_invalid_vaname_paths( + self, inst_data, varname, expected_error, caplog, tmpdir + ): + """Error when varname is not a valid instance-data variable path.""" + instance_data = tmpdir.join('instance-data') + instance_data.write(inst_data) + args = self.args( + debug=False, dump_all=False, format=None, + instance_data=instance_data.strpath, + list_keys=False, user_data=None, vendor_data=None, varname=varname + ) + paths, _, _, _ = self._setup_paths(tmpdir) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch( + "cloudinit.cmd.query.addLogHandlerCLI", return_value="" + ): + assert 1 == query.handle_args('anyname', args) + assert expected_error in caplog.text + + def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir): + """When instance_data file path does not exist, log an error.""" + absent_fn = tmpdir.join('absent') + args = self.args( + debug=False, dump_all=True, format=None, + instance_data=absent_fn.strpath, + list_keys=False, user_data='ud', vendor_data='vd', varname=None) + assert 1 == query.handle_args('anyname', args) + + msg = 'Missing instance-data file: %s' % absent_fn + assert msg in caplog.text + + def test_handle_args_error_when_no_read_permission_instance_data( + self, caplog, tmpdir + ): + """When instance_data file is unreadable, log an error.""" + noread_fn = tmpdir.join('unreadable') + noread_fn.write('thou shall not pass') + args = self.args( + debug=False, dump_all=True, format=None, + instance_data=noread_fn.strpath, + list_keys=False, user_data='ud', vendor_data='vd', varname=None) + with mock.patch('cloudinit.cmd.query.util.load_file') as m_load: + m_load.side_effect = OSError(errno.EACCES, 'Not allowed') + assert 1 == query.handle_args('anyname', args) + msg = "No read permission on '%s'. Try sudo" % noread_fn + assert msg in caplog.text + + def test_handle_args_defaults_instance_data(self, caplog, tmpdir): + """When no instance_data argument, default to configured run_dir.""" + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=None, vendor_data=None, varname=None) + paths, run_dir, _, _ = self._setup_paths(tmpdir) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + assert 1 == query.handle_args('anyname', args) + json_file = run_dir.join(INSTANCE_JSON_FILE) + msg = 'Missing instance-data file: %s' % json_file.strpath + assert msg in caplog.text + + def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir): + """When no instance_data argument, root falls back to redacted json.""" + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=None, vendor_data=None, varname=None) + paths, run_dir, _, _ = self._setup_paths(tmpdir) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + assert 1 == query.handle_args('anyname', args) + json_file = run_dir.join(INSTANCE_JSON_FILE) + sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) + msg = ( + 'Missing root-readable %s. Using redacted %s instead.' % + ( + sensitive_file.strpath, json_file.strpath + ) + ) + assert msg in caplog.text + + @pytest.mark.parametrize( + 'ud_src,ud_expected,vd_src,vd_expected', + ( + ('hi mom', 'hi mom', 'hi pops', 'hi pops'), + ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'), + (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'), + (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'), + ) + ) + def test_handle_args_root_processes_user_data( + self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir + ): + """Support reading multiple user-data file content types""" + paths, run_dir, user_data, vendor_data = self._setup_paths( + tmpdir, ud_val=ud_src, vd_val=vd_src + ) + sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) + sensitive_file.write('{"my-var": "it worked"}') + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=user_data.strpath, + vendor_data=vendor_data.strpath, varname=None) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + cmd_output = json.loads(out) + assert "it worked" == cmd_output['my-var'] + if ud_expected == "ci-b64:": + ud_expected = "ci-b64:{}".format(b64e(ud_src)) + if vd_expected == "ci-b64:": + vd_expected = "ci-b64:{}".format(b64e(vd_src)) + assert ud_expected == cmd_output['userdata'] + assert vd_expected == cmd_output['vendordata'] + + def test_handle_args_root_uses_instance_sensitive_data( + self, capsys, tmpdir + ): + """When no instance_data argument, root uses sensitive json.""" + paths, run_dir, user_data, vendor_data = self._setup_paths( + tmpdir, ud_val='ud', vd_val='vd' + ) + sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) + sensitive_file.write('{"my-var": "it worked"}') + args = self.args( + debug=False, dump_all=True, format=None, instance_data=None, + list_keys=False, user_data=user_data.strpath, + vendor_data=vendor_data.strpath, varname=None) + with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths: + m_paths.return_value = paths + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 0 + assert 0 == query.handle_args('anyname', args) + expected = ( + '{\n "my-var": "it worked",\n ' + '"userdata": "ud",\n "vendordata": "vd"\n}\n' + ) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir): + """When --all is specified query will dump all instance data vars.""" + instance_data = tmpdir.join('instance-data') + instance_data.write('{"my-var": "it worked"}') + args = self.args( + debug=False, dump_all=True, format=None, + instance_data=instance_data.strpath, list_keys=False, + user_data='ud', vendor_data='vd', varname=None) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + expected = ( + '{\n "my-var": "it worked",\n "userdata": "<%s> file:ud",\n' + ' "vendordata": "<%s> file:vd"\n}\n' % ( + REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE + ) + ) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_returns_top_level_varname(self, capsys, tmpdir): + """When the argument varname is passed, report its value.""" + instance_data = tmpdir.join('instance-data') + instance_data.write('{"my-var": "it worked"}') + args = self.args( + debug=False, dump_all=True, format=None, + instance_data=instance_data.strpath, list_keys=False, + user_data='ud', vendor_data='vd', varname='my_var') + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert 'it worked\n' == out + + @pytest.mark.parametrize( + 'inst_data,varname,expected', + ( + ( + '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}', + 'v1.key_2', + 'value-2\n' + ), + # Assert no jinja underscore-delimited aliases are reported on CLI + ( + '{"v1": {"something-hyphenated": {"no.underscores":"x",' + ' "no-alias": "y"}}, "my-var": "it worked"}', + 'v1.something_hyphenated', + '{\n "no-alias": "y",\n "no.underscores": "x"\n}\n' + ), + ) + ) + def test_handle_args_returns_nested_varname( + self, inst_data, varname, expected, capsys, tmpdir + ): + """If user_data file is a jinja template render instance-data vars.""" + instance_data = tmpdir.join('instance-data') + instance_data.write(inst_data) + args = self.args( + debug=False, dump_all=False, format=None, + instance_data=instance_data.strpath, user_data='ud', + vendor_data='vd', list_keys=False, varname=varname) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_returns_standardized_vars_to_top_level_aliases( + self, capsys, tmpdir + ): + """Any standardized vars under v# are promoted as top-level aliases.""" + instance_data = tmpdir.join('instance-data') + instance_data.write( + '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' + ' "top": "gun"}') + expected = dedent("""\ + { + "top": "gun", + "userdata": " file:ud", + "v1": { + "v1_1": "val1.1" + }, + "v1_1": "val1.1", + "v2": { + "v2_2": "val2.2" + }, + "v2_2": "val2.2", + "vendordata": " file:vd" + } + """) + args = self.args( + debug=False, dump_all=True, format=None, + instance_data=instance_data.strpath, user_data='ud', + vendor_data='vd', list_keys=False, varname=None) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname( + self, capsys, tmpdir + ): + """Sort all top-level keys when only --list-keys provided.""" + instance_data = tmpdir.join('instance-data') + instance_data.write( + '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' + ' "top": "gun"}') + expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n' + args = self.args( + debug=False, dump_all=False, format=None, + instance_data=instance_data.strpath, list_keys=True, + user_data='ud', vendor_data='vd', varname=None) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_list_keys_sorts_nested_keys_when_varname( + self, capsys, tmpdir + ): + """Sort all nested keys of varname object when --list-keys provided.""" + instance_data = tmpdir.join('instance-data') + instance_data.write( + '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' + + ' {"v2_2": "val2.2"}, "top": "gun"}') + expected = 'v1_1\nv1_2\n' + args = self.args( + debug=False, dump_all=False, format=None, + instance_data=instance_data.strpath, list_keys=True, + user_data='ud', vendor_data='vd', varname='v1') + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 0 == query.handle_args('anyname', args) + out, _err = capsys.readouterr() + assert expected == out + + def test_handle_args_list_keys_errors_when_varname_is_not_a_dict( + self, caplog, tmpdir + ): + """Raise an error when --list-keys and varname specify a non-list.""" + instance_data = tmpdir.join('instance-data') + instance_data.write( + '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' + + '{"v2_2": "val2.2"}, "top": "gun"}') + expected_error = "--list-keys provided but 'top' is not a dict" + args = self.args( + debug=False, dump_all=False, format=None, + instance_data=instance_data.strpath, list_keys=True, + user_data='ud', vendor_data='vd', varname='top') + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + assert 1 == query.handle_args('anyname', args) + assert expected_error in caplog.text + +# vi: ts=4 expandtab diff --git a/tests/unittests/cmd/test_status.py b/tests/unittests/cmd/test_status.py new file mode 100644 index 00000000..49eae043 --- /dev/null +++ b/tests/unittests/cmd/test_status.py @@ -0,0 +1,391 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import os +from io import StringIO +from textwrap import dedent + +from cloudinit.atomic_helper import write_json +from cloudinit.cmd import status +from cloudinit.util import ensure_file +from tests.unittests.helpers import CiTestCase, wrap_and_call, mock + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'long wait') + + +class TestStatus(CiTestCase): + + def setUp(self): + super(TestStatus, self).setUp() + self.new_root = self.tmp_dir() + self.status_file = self.tmp_path('status.json', self.new_root) + self.disable_file = self.tmp_path('cloudinit-disable', self.new_root) + self.paths = mypaths(run_dir=self.new_root) + + class FakeInit(object): + paths = self.paths + + def __init__(self, ds_deps): + pass + + def read_cfg(self): + pass + + self.init_class = FakeInit + + def test__is_cloudinit_disabled_false_on_sysvinit(self): + '''When not in an environment using systemd, return False.''' + ensure_file(self.disable_file) # Create the ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': False, + 'get_cmdline': "root=/dev/my-root not-important"}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse( + is_disabled, 'expected enabled cloud-init on sysvinit') + self.assertEqual('Cloud-init enabled on sysvinit', reason) + + def test__is_cloudinit_disabled_true_on_disable_file(self): + '''When using systemd and disable_file is present return disabled.''' + ensure_file(self.disable_file) # Create observed disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': "root=/dev/my-root not-important"}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by {0}'.format(self.disable_file), reason) + + def test__is_cloudinit_disabled_false_on_kernel_cmdline_enable(self): + '''Not disabled when using systemd and enabled via commandline.''' + ensure_file(self.disable_file) # Create ignored disable file + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=enabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse(is_disabled, 'expected enabled cloud-init') + self.assertEqual( + 'Cloud-init enabled by kernel command line cloud-init=enabled', + reason) + + def test__is_cloudinit_disabled_true_on_kernel_cmdline(self): + '''When using systemd and disable_file is present return disabled.''' + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something cloud-init=disabled else'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual( + 'Cloud-init disabled by kernel parameter cloud-init=disabled', + reason) + + def test__is_cloudinit_disabled_true_when_generator_disables(self): + '''When cloud-init-generator doesn't write enabled file return True.''' + enabled_file = os.path.join(self.paths.run_dir, 'enabled') + self.assertFalse(os.path.exists(enabled_file)) + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertTrue(is_disabled, 'expected disabled cloud-init') + self.assertEqual('Cloud-init disabled by cloud-init-generator', reason) + + def test__is_cloudinit_disabled_false_when_enabled_in_systemd(self): + '''Report enabled when systemd generator creates the enabled file.''' + enabled_file = os.path.join(self.paths.run_dir, 'enabled') + ensure_file(enabled_file) + (is_disabled, reason) = wrap_and_call( + 'cloudinit.cmd.status', + {'uses_systemd': True, + 'get_cmdline': 'something ignored'}, + status._is_cloudinit_disabled, self.disable_file, self.paths) + self.assertFalse(is_disabled, 'expected enabled cloud-init') + self.assertEqual( + 'Cloud-init enabled by systemd cloud-init-generator', reason) + + def test_status_returns_not_run(self): + '''When status.json does not exist yet, return 'not run'.''' + self.assertFalse( + os.path.exists(self.status_file), 'Unexpected status.json found') + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: not run\n', m_stdout.getvalue()) + + def test_status_returns_disabled_long_on_presence_of_disable_file(self): + '''When cloudinit is disabled, return disabled reason.''' + + checked_files = [] + + def fakeexists(filepath): + checked_files.append(filepath) + status_file = os.path.join(self.paths.run_dir, 'status.json') + return bool(not filepath == status_file) + + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'os.path.exists': {'side_effect': fakeexists}, + '_is_cloudinit_disabled': (True, 'disabled for some reason'), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual( + [os.path.join(self.paths.run_dir, 'status.json')], + checked_files) + expected = dedent('''\ + status: disabled + detail: + disabled for some reason + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running_on_no_results_json(self): + '''Report running when status.json exists but result.json does not.''' + result_file = self.tmp_path('result.json', self.new_root) + write_json(self.status_file, {}) + self.assertFalse( + os.path.exists(result_file), 'Unexpected result.json found') + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: running\n', m_stdout.getvalue()) + + def test_status_returns_running(self): + '''Report running when status exists with an unfinished stage.''' + ensure_file(self.tmp_path('result.json', self.new_root)) + write_json(self.status_file, + {'v1': {'init': {'start': 1, 'finished': None}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: running\n', m_stdout.getvalue()) + + def test_status_returns_done(self): + '''Report done results.json exists no stages are unfinished.''' + ensure_file(self.tmp_path('result.json', self.new_root)) + write_json( + self.status_file, + {'v1': {'stage': None, # No current stage running + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'blah': {'finished': 123.456}, + 'init': {'errors': [], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual('status: done\n', m_stdout.getvalue()) + + def test_status_returns_done_long(self): + '''Long format of done status includes datasource info.''' + ensure_file(self.tmp_path('result.json', self.new_root)) + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'start': 124.567, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: done + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + DataSourceNoCloud [seed=/var/.../seed/nocloud-net][dsmode=net] + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_on_errors(self): + '''Reports error when any stage has errors.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'blah': {'errors': [], 'finished': 123.456}, + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=False, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual('status: error\n', m_stdout.getvalue()) + + def test_status_on_errors_long(self): + '''Long format of error status includes all error messages.''' + write_json( + self.status_file, + {'v1': {'stage': None, + 'datasource': ( + 'DataSourceNoCloud [seed=/var/.../seed/nocloud-net]' + '[dsmode=net]'), + 'init': {'errors': ['error1'], 'start': 124.567, + 'finished': 125.678}, + 'init-local': {'errors': ['error2', 'error3'], + 'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + expected = dedent('''\ + status: error + time: Thu, 01 Jan 1970 00:02:05 +0000 + detail: + error1 + error2 + error3 + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_returns_running_long_format(self): + '''Long format reports the stage in which we are running.''' + write_json( + self.status_file, + {'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}}) + cmdargs = myargs(long=True, wait=False) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + expected = dedent('''\ + status: running + time: Thu, 01 Jan 1970 00:02:04 +0000 + detail: + Running in stage: init + ''') + self.assertEqual(expected, m_stdout.getvalue()) + + def test_status_wait_blocks_until_done(self): + '''Specifying wait will poll every 1/4 second until done state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + done_json = { + 'v1': {'stage': None, + 'init': {'start': 124.456, 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, done_json) + result_file = self.tmp_path('result.json', self.new_root) + ensure_file(result_file) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(0, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: done\n', m_stdout.getvalue()) + + def test_status_wait_blocks_until_error(self): + '''Specifying wait will poll every 1/4 second until error state.''' + running_json = { + 'v1': {'stage': 'init', + 'init': {'start': 124.456, 'finished': None}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + error_json = { + 'v1': {'stage': None, + 'init': {'errors': ['error1'], 'start': 124.456, + 'finished': 125.678}, + 'init-local': {'start': 123.45, 'finished': 123.46}}} + + self.sleep_calls = 0 + + def fake_sleep(interval): + self.assertEqual(0.25, interval) + self.sleep_calls += 1 + if self.sleep_calls == 2: + write_json(self.status_file, running_json) + elif self.sleep_calls == 3: + write_json(self.status_file, error_json) + + cmdargs = myargs(long=False, wait=True) + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + retcode = wrap_and_call( + 'cloudinit.cmd.status', + {'sleep': {'side_effect': fake_sleep}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.handle_status_args, 'ignored', cmdargs) + self.assertEqual(1, retcode) + self.assertEqual(4, self.sleep_calls) + self.assertEqual('....\nstatus: error\n', m_stdout.getvalue()) + + def test_status_main(self): + '''status.main can be run as a standalone script.''' + write_json(self.status_file, + {'v1': {'init': {'start': 1, 'finished': None}}}) + with self.assertRaises(SystemExit) as context_manager: + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + wrap_and_call( + 'cloudinit.cmd.status', + {'sys.argv': {'new': ['status']}, + '_is_cloudinit_disabled': (False, ''), + 'Init': {'side_effect': self.init_class}}, + status.main) + self.assertEqual(0, context_manager.exception.code) + self.assertEqual('status: running\n', m_stdout.getvalue()) + +# vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/config/__init__.py b/tests/unittests/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/config/test_apt_conf_v1.py b/tests/unittests/config/test_apt_conf_v1.py new file mode 100644 index 00000000..98d99945 --- /dev/null +++ b/tests/unittests/config/test_apt_conf_v1.py @@ -0,0 +1,129 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_apt_configure +from cloudinit import util + +from tests.unittests.helpers import TestCase + +import copy +import os +import re +import shutil +import tempfile + + +class TestAptProxyConfig(TestCase): + def setUp(self): + super(TestAptProxyConfig, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + self.pfile = os.path.join(self.tmp, "proxy.cfg") + self.cfile = os.path.join(self.tmp, "config.cfg") + + def _search_apt_config(self, contents, ptype, value): + return re.search( + r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), + contents, flags=re.IGNORECASE) + + def test_apt_proxy_written(self): + cfg = {'proxy': 'myproxy'} + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = util.load_file(self.pfile) + self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) + + def test_apt_http_proxy_written(self): + cfg = {'http_proxy': 'myproxy'} + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = util.load_file(self.pfile) + self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) + + def test_apt_all_proxy_written(self): + cfg = {'http_proxy': 'myproxy_http_proxy', + 'https_proxy': 'myproxy_https_proxy', + 'ftp_proxy': 'myproxy_ftp_proxy'} + + values = {'http': cfg['http_proxy'], + 'https': cfg['https_proxy'], + 'ftp': cfg['ftp_proxy'], + } + + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + contents = util.load_file(self.pfile) + + for ptype, pval in values.items(): + self.assertTrue(self._search_apt_config(contents, ptype, pval)) + + def test_proxy_deleted(self): + util.write_file(self.cfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) + self.assertFalse(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + def test_proxy_replaced(self): + util.write_file(self.cfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({'proxy': "foo"}, + self.pfile, self.cfile) + self.assertTrue(os.path.isfile(self.pfile)) + contents = util.load_file(self.pfile) + self.assertTrue(self._search_apt_config(contents, "http", "foo")) + + def test_config_written(self): + payload = 'this is my apt config' + cfg = {'conf': payload} + + cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) + + self.assertTrue(os.path.isfile(self.cfile)) + self.assertFalse(os.path.isfile(self.pfile)) + + self.assertEqual(util.load_file(self.cfile), payload) + + def test_config_replaced(self): + util.write_file(self.pfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({'conf': "foo"}, + self.pfile, self.cfile) + self.assertTrue(os.path.isfile(self.cfile)) + self.assertEqual(util.load_file(self.cfile), "foo") + + def test_config_deleted(self): + # if no 'conf' is provided, delete any previously written file + util.write_file(self.pfile, "content doesnt matter") + cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) + self.assertFalse(os.path.isfile(self.pfile)) + self.assertFalse(os.path.isfile(self.cfile)) + + +class TestConversion(TestCase): + def test_convert_with_apt_mirror_as_empty_string(self): + # an empty apt_mirror is the same as no apt_mirror + empty_m_found = cc_apt_configure.convert_to_v3_apt_format( + {'apt_mirror': ''}) + default_found = cc_apt_configure.convert_to_v3_apt_format({}) + self.assertEqual(default_found, empty_m_found) + + def test_convert_with_apt_mirror(self): + mirror = 'http://my.mirror/ubuntu' + f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror}) + self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary'])) + + def test_no_old_content(self): + mirror = 'http://my.mirror/ubuntu' + mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}} + expected = copy.deepcopy(mydata) + self.assertEqual(expected, + cc_apt_configure.convert_to_v3_apt_format(mydata)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py new file mode 100644 index 00000000..4aeaea24 --- /dev/null +++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py @@ -0,0 +1,181 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +""" test_handler_apt_configure_sources_list +Test templating of sources list +""" +import logging +import os +import shutil +import tempfile +from unittest import mock + +from cloudinit import templater +from cloudinit import subp +from cloudinit import util + +from cloudinit.config import cc_apt_configure + +from cloudinit.distros.debian import Distro + +from tests.unittests import helpers as t_help +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + +YAML_TEXT_CUSTOM_SL = """ +apt_mirror: http://archive.ubuntu.com/ubuntu/ +apt_custom_sources_list: | + ## template:jinja + ## Note, this file is written by cloud-init on first boot of an instance + ## modifications made here will not survive a re-bundle. + ## if you wish to make changes you can: + ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg + ## or do the same in user-data + ## b.) add sources in /etc/apt/sources.list.d + ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl + + # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to + # newer versions of the distribution. + deb {{mirror}} {{codename}} main restricted + deb-src {{mirror}} {{codename}} main restricted + # FIND_SOMETHING_SPECIAL +""" + +EXPECTED_CONVERTED_CONTENT = ( + """## Note, this file is written by cloud-init on first boot of an instance +## modifications made here will not survive a re-bundle. +## if you wish to make changes you can: +## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg +## or do the same in user-data +## b.) add sources in /etc/apt/sources.list.d +## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl + +# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to +# newer versions of the distribution. +deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted +deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted +# FIND_SOMETHING_SPECIAL +""") + + +class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): + """TestAptSourceConfigSourceList + Main Class to test sources list rendering + """ + def setUp(self): + super(TestAptSourceConfigSourceList, self).setUp() + self.subp = subp.subp + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + + rpatcher = mock.patch("cloudinit.util.lsb_release") + get_rel = rpatcher.start() + get_rel.return_value = {'codename': "fakerelease"} + self.addCleanup(rpatcher.stop) + apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") + get_arch = apatcher.start() + get_arch.return_value = 'amd64' + self.addCleanup(apatcher.stop) + + def apt_source_list(self, distro, mirror, mirrorcheck=None): + """apt_source_list + Test rendering of a source.list from template for a given distro + """ + if mirrorcheck is None: + mirrorcheck = mirror + + if isinstance(mirror, list): + cfg = {'apt_mirror_search': mirror} + else: + cfg = {'apt_mirror': mirror} + + mycloud = get_cloud(distro) + + with mock.patch.object(util, 'write_file') as mockwf: + with mock.patch.object(util, 'load_file', + return_value="faketmpl") as mocklf: + with mock.patch.object(os.path, 'isfile', + return_value=True) as mockisfile: + with mock.patch.object( + templater, 'render_string', + return_value='fake') as mockrnd: + with mock.patch.object(util, 'rename'): + cc_apt_configure.handle("test", cfg, mycloud, + LOG, None) + + mockisfile.assert_any_call( + ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) + mocklf.assert_any_call( + ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) + mockrnd.assert_called_once_with('faketmpl', + {'RELEASE': 'fakerelease', + 'PRIMARY': mirrorcheck, + 'MIRROR': mirrorcheck, + 'SECURITY': mirrorcheck, + 'codename': 'fakerelease', + 'primary': mirrorcheck, + 'mirror': mirrorcheck, + 'security': mirrorcheck}) + mockwf.assert_called_once_with('/etc/apt/sources.list', 'fake', + mode=0o644) + + def test_apt_v1_source_list_debian(self): + """Test rendering of a source.list from template for debian""" + self.apt_source_list('debian', 'http://httpredir.debian.org/debian') + + def test_apt_v1_source_list_ubuntu(self): + """Test rendering of a source.list from template for ubuntu""" + self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/') + + @staticmethod + def myresolve(name): + """Fake util.is_resolvable for mirrorfail tests""" + if name == "does.not.exist": + print("Faking FAIL for '%s'" % name) + return False + else: + print("Faking SUCCESS for '%s'" % name) + return True + + def test_apt_v1_srcl_debian_mirrorfail(self): + """Test rendering of a source.list from template for debian""" + with mock.patch.object(util, 'is_resolvable', + side_effect=self.myresolve) as mockresolve: + self.apt_source_list('debian', + ['http://does.not.exist', + 'http://httpredir.debian.org/debian'], + 'http://httpredir.debian.org/debian') + mockresolve.assert_any_call("does.not.exist") + mockresolve.assert_any_call("httpredir.debian.org") + + def test_apt_v1_srcl_ubuntu_mirrorfail(self): + """Test rendering of a source.list from template for ubuntu""" + with mock.patch.object(util, 'is_resolvable', + side_effect=self.myresolve) as mockresolve: + self.apt_source_list('ubuntu', + ['http://does.not.exist', + 'http://archive.ubuntu.com/ubuntu/'], + 'http://archive.ubuntu.com/ubuntu/') + mockresolve.assert_any_call("does.not.exist") + mockresolve.assert_any_call("archive.ubuntu.com") + + def test_apt_v1_srcl_custom(self): + """Test rendering from a custom source.list template""" + cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) + mycloud = get_cloud() + + # the second mock restores the original subp + with mock.patch.object(util, 'write_file') as mockwrite: + with mock.patch.object(subp, 'subp', self.subp): + with mock.patch.object(Distro, 'get_primary_arch', + return_value='amd64'): + cc_apt_configure.handle("notimportant", cfg, mycloud, + LOG, None) + + mockwrite.assert_called_once_with( + '/etc/apt/sources.list', + EXPECTED_CONVERTED_CONTENT, + mode=420) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_apt_configure_sources_list_v3.py b/tests/unittests/config/test_apt_configure_sources_list_v3.py new file mode 100644 index 00000000..a8087bd1 --- /dev/null +++ b/tests/unittests/config/test_apt_configure_sources_list_v3.py @@ -0,0 +1,226 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +""" test_apt_custom_sources_list +Test templating of custom sources list +""" +from contextlib import ExitStack +import logging +import os +import shutil +import tempfile +from unittest import mock +from unittest.mock import call + +from cloudinit import subp +from cloudinit import util +from cloudinit.config import cc_apt_configure +from cloudinit.distros.debian import Distro +from tests.unittests import helpers as t_help + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + +TARGET = "/" + +# Input and expected output for the custom template +YAML_TEXT_CUSTOM_SL = """ +apt: + primary: + - arches: [default] + uri: http://test.ubuntu.com/ubuntu/ + security: + - arches: [default] + uri: http://testsec.ubuntu.com/ubuntu/ + sources_list: | + + # Note, this file is written by cloud-init at install time. It should not + # end up on the installed system itself. + # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to + # newer versions of the distribution. + deb $MIRROR $RELEASE main restricted + deb-src $MIRROR $RELEASE main restricted + deb $PRIMARY $RELEASE universe restricted + deb $SECURITY $RELEASE-security multiverse + # FIND_SOMETHING_SPECIAL +""" + +EXPECTED_CONVERTED_CONTENT = """ +# Note, this file is written by cloud-init at install time. It should not +# end up on the installed system itself. +# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to +# newer versions of the distribution. +deb http://test.ubuntu.com/ubuntu/ fakerel main restricted +deb-src http://test.ubuntu.com/ubuntu/ fakerel main restricted +deb http://test.ubuntu.com/ubuntu/ fakerel universe restricted +deb http://testsec.ubuntu.com/ubuntu/ fakerel-security multiverse +# FIND_SOMETHING_SPECIAL +""" + +# mocked to be independent to the unittest system +MOCKED_APT_SRC_LIST = """ +deb http://test.ubuntu.com/ubuntu/ notouched main restricted +deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted +deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted +deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted +""" + +EXPECTED_BASE_CONTENT = (""" +deb http://test.ubuntu.com/ubuntu/ notouched main restricted +deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted +deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted +deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted +""") + +EXPECTED_MIRROR_CONTENT = (""" +deb http://test.ubuntu.com/ubuntu/ notouched main restricted +deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted +deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted +deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted +""") + +EXPECTED_PRIMSEC_CONTENT = (""" +deb http://test.ubuntu.com/ubuntu/ notouched main restricted +deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted +deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted +deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted +""") + + +class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): + """TestAptSourceConfigSourceList - Class to test sources list rendering""" + def setUp(self): + super(TestAptSourceConfigSourceList, self).setUp() + self.subp = subp.subp + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + + rpatcher = mock.patch("cloudinit.util.lsb_release") + get_rel = rpatcher.start() + get_rel.return_value = {'codename': "fakerel"} + self.addCleanup(rpatcher.stop) + apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") + get_arch = apatcher.start() + get_arch.return_value = 'amd64' + self.addCleanup(apatcher.stop) + + def _apt_source_list(self, distro, cfg, cfg_on_empty=False): + """_apt_source_list - Test rendering from template (generic)""" + # entry at top level now, wrap in 'apt' key + cfg = {'apt': cfg} + mycloud = get_cloud(distro) + + with ExitStack() as stack: + mock_writefile = stack.enter_context(mock.patch.object( + util, 'write_file')) + mock_loadfile = stack.enter_context(mock.patch.object( + util, 'load_file', return_value=MOCKED_APT_SRC_LIST)) + mock_isfile = stack.enter_context(mock.patch.object( + os.path, 'isfile', return_value=True)) + stack.enter_context(mock.patch.object( + util, 'del_file')) + cfg_func = ('cloudinit.config.cc_apt_configure.' + '_should_configure_on_empty_apt') + mock_shouldcfg = stack.enter_context(mock.patch( + cfg_func, return_value=(cfg_on_empty, 'test') + )) + cc_apt_configure.handle("test", cfg, mycloud, LOG, None) + + return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg + + def test_apt_v3_source_list_debian(self): + """test_apt_v3_source_list_debian - without custom sources or parms""" + cfg = {} + distro = 'debian' + expected = EXPECTED_BASE_CONTENT + + mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) + + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) + self.assertEqual(1, mock_shouldcfg.call_count) + + def test_apt_v3_source_list_ubuntu(self): + """test_apt_v3_source_list_ubuntu - without custom sources or parms""" + cfg = {} + distro = 'ubuntu' + expected = EXPECTED_BASE_CONTENT + + mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) + + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) + self.assertEqual(1, mock_shouldcfg.call_count) + + def test_apt_v3_source_list_ubuntu_snappy(self): + """test_apt_v3_source_list_ubuntu_snappy - without custom sources or + parms""" + cfg = {'apt': {}} + mycloud = get_cloud() + + with mock.patch.object(util, 'write_file') as mock_writefile: + with mock.patch.object(util, 'system_is_snappy', + return_value=True) as mock_issnappy: + cc_apt_configure.handle("test", cfg, mycloud, LOG, None) + + self.assertEqual(0, mock_writefile.call_count) + self.assertEqual(1, mock_issnappy.call_count) + + def test_apt_v3_source_list_centos(self): + """test_apt_v3_source_list_centos - without custom sources or parms""" + cfg = {} + distro = 'rhel' + + mock_writefile, _, _, _ = self._apt_source_list(distro, cfg) + + self.assertEqual(0, mock_writefile.call_count) + + def test_apt_v3_source_list_psm(self): + """test_apt_v3_source_list_psm - Test specifying prim+sec mirrors""" + pm = 'http://test.ubuntu.com/ubuntu/' + sm = 'http://testsec.ubuntu.com/ubuntu/' + cfg = {'preserve_sources_list': False, + 'primary': [{'arches': ["default"], + 'uri': pm}], + 'security': [{'arches': ["default"], + 'uri': sm}]} + distro = 'ubuntu' + expected = EXPECTED_PRIMSEC_CONTENT + + mock_writefile, mock_load_file, mock_isfile, _ = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) + + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) + + def test_apt_v3_srcl_custom(self): + """test_apt_v3_srcl_custom - Test rendering a custom source template""" + cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) + mycloud = get_cloud() + + # the second mock restores the original subp + with mock.patch.object(util, 'write_file') as mockwrite: + with mock.patch.object(subp, 'subp', self.subp): + with mock.patch.object(Distro, 'get_primary_arch', + return_value='amd64'): + cc_apt_configure.handle("notimportant", cfg, mycloud, + LOG, None) + + calls = [call('/etc/apt/sources.list', + EXPECTED_CONVERTED_CONTENT, + mode=0o644)] + mockwrite.assert_has_calls(calls) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_apt_key.py b/tests/unittests/config/test_apt_key.py new file mode 100644 index 00000000..00e5a38d --- /dev/null +++ b/tests/unittests/config/test_apt_key.py @@ -0,0 +1,137 @@ +import os +from unittest import mock + +from cloudinit.config import cc_apt_configure +from cloudinit import subp +from cloudinit import util + +TEST_KEY_HUMAN = ''' +/etc/apt/cloud-init.gpg.d/my_key.gpg +-------------------------------------------- +pub rsa4096 2021-10-22 [SC] + 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85 +uid [ unknown] Brett Holman +sub rsa4096 2021-10-22 [A] +sub rsa4096 2021-10-22 [E] +''' + +TEST_KEY_MACHINE = ''' +tru::1:1635129362:0:3:1:5 +pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: +fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: +uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \ +::::::::::0: +sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: +fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: +sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: +fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: +''' + +TEST_KEY_FINGERPRINT_HUMAN = \ + '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' + +TEST_KEY_FINGERPRINT_MACHINE = \ + '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' + + +class TestAptKey: + """TestAptKey + Class to test apt-key commands + """ + @mock.patch.object(subp, 'subp', return_value=('fakekey', '')) + @mock.patch.object(util, 'write_file') + def _apt_key_add_success_helper(self, directory, *args, hardened=False): + file = cc_apt_configure.apt_key( + 'add', + output_file='my-key', + data='fakekey', + hardened=hardened) + assert file == directory + '/my-key.gpg' + + def test_apt_key_add_success(self): + """Verify the correct directory path gets returned for unhardened case + """ + self._apt_key_add_success_helper('/etc/apt/trusted.gpg.d') + + def test_apt_key_add_success_hardened(self): + """Verify the correct directory path gets returned for hardened case + """ + self._apt_key_add_success_helper( + '/etc/apt/cloud-init.gpg.d', + hardened=True) + + def test_apt_key_add_fail_no_file_name(self): + """Verify that null filename gets handled correctly + """ + file = cc_apt_configure.apt_key( + 'add', + output_file=None, + data='') + assert '/dev/null' == file + + def _apt_key_fail_helper(self): + file = cc_apt_configure.apt_key( + 'add', + output_file='my-key', + data='fakekey') + assert file == '/dev/null' + + @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) + def test_apt_key_add_fail_no_file_name_subproc(self, *args): + """Verify that bad key value gets handled correctly + """ + self._apt_key_fail_helper() + + @mock.patch.object( + subp, 'subp', side_effect=UnicodeDecodeError('test', b'', 1, 1, '')) + def test_apt_key_add_fail_no_file_name_unicode(self, *args): + """Verify that bad key encoding gets handled correctly + """ + self._apt_key_fail_helper() + + def _apt_key_list_success_helper(self, finger, key, human_output=True): + @mock.patch.object(os, 'listdir', return_value=('/fake/dir/key.gpg',)) + @mock.patch.object(subp, 'subp', return_value=(key, '')) + def mocked_list(*a): + + keys = cc_apt_configure.apt_key('list', human_output) + assert finger in keys + mocked_list() + + def test_apt_key_list_success_human(self): + """Verify expected key output, human + """ + self._apt_key_list_success_helper( + TEST_KEY_FINGERPRINT_HUMAN, + TEST_KEY_HUMAN) + + def test_apt_key_list_success_machine(self): + """Verify expected key output, machine + """ + self._apt_key_list_success_helper( + TEST_KEY_FINGERPRINT_MACHINE, + TEST_KEY_MACHINE, human_output=False) + + @mock.patch.object(os, 'listdir', return_value=()) + @mock.patch.object(subp, 'subp', return_value=('', '')) + def test_apt_key_list_fail_no_keys(self, *args): + """Ensure falsy output for no keys + """ + keys = cc_apt_configure.apt_key('list') + assert not keys + + @mock.patch.object(os, 'listdir', return_value=('file_not_gpg_key.txt')) + @mock.patch.object(subp, 'subp', return_value=('', '')) + def test_apt_key_list_fail_no_keys_file(self, *args): + """Ensure non-gpg file is not returned. + + apt-key used file extensions for this, so we do too + """ + assert not cc_apt_configure.apt_key('list') + + @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) + @mock.patch.object(os, 'listdir', return_value=('bad_gpg_key.gpg')) + def test_apt_key_list_fail_bad_key_file(self, *args): + """Ensure bad gpg key doesn't throw exeption. + """ + assert not cc_apt_configure.apt_key('list') diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py new file mode 100644 index 00000000..684c2495 --- /dev/null +++ b/tests/unittests/config/test_apt_source_v1.py @@ -0,0 +1,651 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +""" test_handler_apt_source_v1 +Testing various config variations of the apt_source config +This calls all things with v1 format to stress the conversion code on top of +the actually tested code. +""" +import os +import re +import shutil +import tempfile +import pathlib +from unittest import mock +from unittest.mock import call + +from cloudinit.config import cc_apt_configure +from cloudinit import gpg +from cloudinit import subp +from cloudinit import util + +from tests.unittests.helpers import TestCase + +EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ +NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy +8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0 +HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG +CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29 +OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ +FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm +S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== +=ACB2 +-----END PGP PUBLIC KEY BLOCK-----""" + +ADD_APT_REPO_MATCH = r"^[\w-]+:\w" + + +class FakeDistro(object): + """Fake Distro helper object""" + def update_package_sources(self): + """Fake update_package_sources helper method""" + return + + +class FakeDatasource: + """Fake Datasource helper object""" + def __init__(self): + self.region = 'region' + + +class FakeCloud(object): + """Fake Cloud helper object""" + def __init__(self): + self.distro = FakeDistro() + self.datasource = FakeDatasource() + + +class TestAptSourceConfig(TestCase): + """TestAptSourceConfig + Main Class to test apt_source configs + """ + release = "fantastic" + + def setUp(self): + super(TestAptSourceConfig, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + self.aptlistfile = os.path.join(self.tmp, "single-deb.list") + self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list") + self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") + self.join = os.path.join + self.matcher = re.compile(ADD_APT_REPO_MATCH).search + # mock fallback filename into writable tmp dir + self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/", + "cloud_config_sources.list") + + self.fakecloud = FakeCloud() + + rpatcher = mock.patch("cloudinit.util.lsb_release") + get_rel = rpatcher.start() + get_rel.return_value = {'codename': self.release} + self.addCleanup(rpatcher.stop) + apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") + get_arch = apatcher.start() + get_arch.return_value = 'amd64' + self.addCleanup(apatcher.stop) + + def _get_default_params(self): + """get_default_params + Get the most basic default mrror and release info to be used in tests + """ + params = {} + params['RELEASE'] = self.release + params['MIRROR'] = "http://archive.ubuntu.com/ubuntu" + return params + + def wrapv1conf(self, cfg): + params = self._get_default_params() + # old v1 list format under old keys, but callabe to main handler + # disable source.list rendering and set mirror to avoid other code + return {'apt_preserve_sources_list': True, + 'apt_mirror': params['MIRROR'], + 'apt_sources': cfg} + + def myjoin(self, *args, **kwargs): + """myjoin - redir into writable tmpdir""" + if (args[0] == "/etc/apt/sources.list.d/" and + args[1] == "cloud_config_sources.list" and + len(args) == 2): + return self.join(self.tmp, args[0].lstrip("/"), args[1]) + else: + return self.join(*args, **kwargs) + + def apt_src_basic(self, filename, cfg): + """apt_src_basic + Test Fix deb source string, has to overwrite mirror conf in params + """ + cfg = self.wrapv1conf(cfg) + + cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) + + self.assertTrue(os.path.isfile(filename)) + + contents = util.load_file(filename) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", "http://archive.ubuntu.com/ubuntu", + "karmic-backports", + "main universe multiverse restricted"), + contents, flags=re.IGNORECASE)) + + def test_apt_src_basic(self): + """Test deb source string, overwrite mirror and filename""" + cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' + ' karmic-backports' + ' main universe multiverse restricted'), + 'filename': self.aptlistfile} + self.apt_src_basic(self.aptlistfile, [cfg]) + + def test_apt_src_basic_dict(self): + """Test deb source string, overwrite mirror and filename (dict)""" + cfg = {self.aptlistfile: {'source': + ('deb http://archive.ubuntu.com/ubuntu' + ' karmic-backports' + ' main universe multiverse restricted')}} + self.apt_src_basic(self.aptlistfile, cfg) + + def apt_src_basic_tri(self, cfg): + """apt_src_basic_tri + Test Fix three deb source string, has to overwrite mirror conf in + params. Test with filenames provided in config. + generic part to check three files with different content + """ + self.apt_src_basic(self.aptlistfile, cfg) + + # extra verify on two extra files of this test + contents = util.load_file(self.aptlistfile2) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", "http://archive.ubuntu.com/ubuntu", + "precise-backports", + "main universe multiverse restricted"), + contents, flags=re.IGNORECASE)) + contents = util.load_file(self.aptlistfile3) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", "http://archive.ubuntu.com/ubuntu", + "lucid-backports", + "main universe multiverse restricted"), + contents, flags=re.IGNORECASE)) + + def test_apt_src_basic_tri(self): + """Test Fix three deb source string with filenames""" + cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu' + ' karmic-backports' + ' main universe multiverse restricted'), + 'filename': self.aptlistfile} + cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu' + ' precise-backports' + ' main universe multiverse restricted'), + 'filename': self.aptlistfile2} + cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu' + ' lucid-backports' + ' main universe multiverse restricted'), + 'filename': self.aptlistfile3} + self.apt_src_basic_tri([cfg1, cfg2, cfg3]) + + def test_apt_src_basic_dict_tri(self): + """Test Fix three deb source string with filenames (dict)""" + cfg = {self.aptlistfile: {'source': + ('deb http://archive.ubuntu.com/ubuntu' + ' karmic-backports' + ' main universe multiverse restricted')}, + self.aptlistfile2: {'source': + ('deb http://archive.ubuntu.com/ubuntu' + ' precise-backports' + ' main universe multiverse restricted')}, + self.aptlistfile3: {'source': + ('deb http://archive.ubuntu.com/ubuntu' + ' lucid-backports' + ' main universe multiverse restricted')}} + self.apt_src_basic_tri(cfg) + + def test_apt_src_basic_nofn(self): + """Test Fix three deb source string without filenames (dict)""" + cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' + ' karmic-backports' + ' main universe multiverse restricted')} + with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + self.apt_src_basic(self.fallbackfn, [cfg]) + + def apt_src_replacement(self, filename, cfg): + """apt_src_replace + Test Autoreplacement of MIRROR and RELEASE in source specs + """ + cfg = self.wrapv1conf(cfg) + params = self._get_default_params() + cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) + + self.assertTrue(os.path.isfile(filename)) + + contents = util.load_file(filename) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", params['MIRROR'], params['RELEASE'], + "multiverse"), + contents, flags=re.IGNORECASE)) + + def test_apt_src_replace(self): + """Test Autoreplacement of MIRROR and RELEASE in source specs""" + cfg = {'source': 'deb $MIRROR $RELEASE multiverse', + 'filename': self.aptlistfile} + self.apt_src_replacement(self.aptlistfile, [cfg]) + + def apt_src_replace_tri(self, cfg): + """apt_src_replace_tri + Test three autoreplacements of MIRROR and RELEASE in source specs with + generic part + """ + self.apt_src_replacement(self.aptlistfile, cfg) + + # extra verify on two extra files of this test + params = self._get_default_params() + contents = util.load_file(self.aptlistfile2) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", params['MIRROR'], params['RELEASE'], + "main"), + contents, flags=re.IGNORECASE)) + contents = util.load_file(self.aptlistfile3) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", params['MIRROR'], params['RELEASE'], + "universe"), + contents, flags=re.IGNORECASE)) + + def test_apt_src_replace_tri(self): + """Test triple Autoreplacement of MIRROR and RELEASE in source specs""" + cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', + 'filename': self.aptlistfile} + cfg2 = {'source': 'deb $MIRROR $RELEASE main', + 'filename': self.aptlistfile2} + cfg3 = {'source': 'deb $MIRROR $RELEASE universe', + 'filename': self.aptlistfile3} + self.apt_src_replace_tri([cfg1, cfg2, cfg3]) + + def test_apt_src_replace_dict_tri(self): + """Test triple Autoreplacement in source specs (dict)""" + cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, + 'notused': {'source': 'deb $MIRROR $RELEASE main', + 'filename': self.aptlistfile2}, + self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} + self.apt_src_replace_tri(cfg) + + def test_apt_src_replace_nofn(self): + """Test Autoreplacement of MIRROR and RELEASE in source specs nofile""" + cfg = {'source': 'deb $MIRROR $RELEASE multiverse'} + with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + self.apt_src_replacement(self.fallbackfn, [cfg]) + + def apt_src_keyid(self, filename, cfg, keynum): + """apt_src_keyid + Test specification of a source + keyid + """ + cfg = self.wrapv1conf(cfg) + + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: + cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) + + # check if it added the right number of keys + calls = [] + sources = cfg['apt']['sources'] + for src in sources: + print(sources[src]) + calls.append(call(sources[src], None)) + + mockobj.assert_has_calls(calls, any_order=True) + + self.assertTrue(os.path.isfile(filename)) + + contents = util.load_file(filename) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "main"), + contents, flags=re.IGNORECASE)) + + def test_apt_src_keyid(self): + """Test specification of a source + keyid with filename being set""" + cfg = {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'keyid': "03683F77", + 'filename': self.aptlistfile} + self.apt_src_keyid(self.aptlistfile, [cfg], 1) + + def test_apt_src_keyid_tri(self): + """Test 3x specification of a source + keyid with filename being set""" + cfg1 = {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'keyid': "03683F77", + 'filename': self.aptlistfile} + cfg2 = {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial universe'), + 'keyid': "03683F77", + 'filename': self.aptlistfile2} + cfg3 = {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial multiverse'), + 'keyid': "03683F77", + 'filename': self.aptlistfile3} + + self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3) + contents = util.load_file(self.aptlistfile2) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "universe"), + contents, flags=re.IGNORECASE)) + contents = util.load_file(self.aptlistfile3) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "multiverse"), + contents, flags=re.IGNORECASE)) + + def test_apt_src_keyid_nofn(self): + """Test specification of a source + keyid without filename being set""" + cfg = {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'keyid': "03683F77"} + with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + self.apt_src_keyid(self.fallbackfn, [cfg], 1) + + def apt_src_key(self, filename, cfg): + """apt_src_key + Test specification of a source + key + """ + cfg = self.wrapv1conf([cfg]) + + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: + cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) + + # check if it added the right amount of keys + sources = cfg['apt']['sources'] + calls = [] + for src in sources: + print(sources[src]) + calls.append(call(sources[src], None)) + + mockobj.assert_has_calls(calls, any_order=True) + + self.assertTrue(os.path.isfile(filename)) + + contents = util.load_file(filename) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "main"), + contents, flags=re.IGNORECASE)) + + def test_apt_src_key(self): + """Test specification of a source + key with filename being set""" + cfg = {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'key': "fakekey 4321", + 'filename': self.aptlistfile} + self.apt_src_key(self.aptlistfile, cfg) + + def test_apt_src_key_nofn(self): + """Test specification of a source + key without filename being set""" + cfg = {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'key': "fakekey 4321"} + with mock.patch.object(os.path, 'join', side_effect=self.myjoin): + self.apt_src_key(self.fallbackfn, cfg) + + def test_apt_src_keyonly(self): + """Test specifying key without source""" + cfg = {'key': "fakekey 4242", + 'filename': self.aptlistfile} + cfg = self.wrapv1conf([cfg]) + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) + + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4242', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) + + # filename should be ignored on key only + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def test_apt_src_keyidonly(self): + """Test specification of a keyid without source""" + cfg = {'keyid': "03683F77", + 'filename': self.aptlistfile} + cfg = self.wrapv1conf([cfg]) + + with mock.patch.object(subp, 'subp', + return_value=('fakekey 1212', '')): + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + cc_apt_configure.handle( + "test", + cfg, + self.fakecloud, + None, + None) + + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 1212', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) + + # filename should be ignored on key only + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): + """apt_src_keyid_real + Test specification of a keyid without source including + up to addition of the key (add_apt_key_raw mocked to keep the + environment as is) + """ + key = cfg['keyid'] + keyserver = cfg.get('keyserver', 'keyserver.ubuntu.com') + cfg = self.wrapv1conf([cfg]) + + with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: + with mock.patch.object(gpg, 'getkeybyid', + return_value=expectedkey) as mockgetkey: + cc_apt_configure.handle("test", cfg, self.fakecloud, + None, None) + if is_hardened is not None: + mockkey.assert_called_with( + expectedkey, + self.aptlistfile, + hardened=is_hardened) + else: + mockkey.assert_called_with(expectedkey, self.aptlistfile) + mockgetkey.assert_called_with(key, keyserver) + + # filename should be ignored on key only + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def test_apt_src_keyid_real(self): + """test_apt_src_keyid_real - Test keyid including key add""" + keyid = "03683F77" + cfg = {'keyid': keyid, + 'filename': self.aptlistfile} + + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + + def test_apt_src_longkeyid_real(self): + """test_apt_src_longkeyid_real - Test long keyid including key add""" + keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" + cfg = {'keyid': keyid, + 'filename': self.aptlistfile} + + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + + def test_apt_src_longkeyid_ks_real(self): + """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" + keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" + cfg = {'keyid': keyid, + 'keyserver': 'keys.gnupg.net', + 'filename': self.aptlistfile} + + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + + def test_apt_src_ppa(self): + """Test adding a ppa""" + cfg = {'source': 'ppa:smoser/cloud-init-test', + 'filename': self.aptlistfile} + cfg = self.wrapv1conf([cfg]) + + with mock.patch.object(subp, 'subp') as mockobj: + cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) + mockobj.assert_called_once_with(['add-apt-repository', + 'ppa:smoser/cloud-init-test'], + target=None) + + # adding ppa should ignore filename (uses add-apt-repository) + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def test_apt_src_ppa_tri(self): + """Test adding three ppa's""" + cfg1 = {'source': 'ppa:smoser/cloud-init-test', + 'filename': self.aptlistfile} + cfg2 = {'source': 'ppa:smoser/cloud-init-test2', + 'filename': self.aptlistfile2} + cfg3 = {'source': 'ppa:smoser/cloud-init-test3', + 'filename': self.aptlistfile3} + cfg = self.wrapv1conf([cfg1, cfg2, cfg3]) + + with mock.patch.object(subp, 'subp') as mockobj: + cc_apt_configure.handle("test", cfg, self.fakecloud, + None, None) + calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], + target=None), + call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], + target=None), + call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], + target=None)] + mockobj.assert_has_calls(calls, any_order=True) + + # adding ppa should ignore all filenames (uses add-apt-repository) + self.assertFalse(os.path.isfile(self.aptlistfile)) + self.assertFalse(os.path.isfile(self.aptlistfile2)) + self.assertFalse(os.path.isfile(self.aptlistfile3)) + + def test_convert_to_new_format(self): + """Test the conversion of old to new format""" + cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', + 'filename': self.aptlistfile} + cfg2 = {'source': 'deb $MIRROR $RELEASE main', + 'filename': self.aptlistfile2} + cfg3 = {'source': 'deb $MIRROR $RELEASE universe', + 'filename': self.aptlistfile3} + cfg = {'apt_sources': [cfg1, cfg2, cfg3]} + checkcfg = {self.aptlistfile: {'filename': self.aptlistfile, + 'source': 'deb $MIRROR $RELEASE ' + 'multiverse'}, + self.aptlistfile2: {'filename': self.aptlistfile2, + 'source': 'deb $MIRROR $RELEASE main'}, + self.aptlistfile3: {'filename': self.aptlistfile3, + 'source': 'deb $MIRROR $RELEASE ' + 'universe'}} + + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg) + self.assertEqual(newcfg['apt']['sources'], checkcfg) + + # convert again, should stay the same + newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg) + self.assertEqual(newcfg2['apt']['sources'], checkcfg) + + # should work without raising an exception + cc_apt_configure.convert_to_v3_apt_format({}) + + with self.assertRaises(ValueError): + cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5}) + + def test_convert_to_new_format_collision(self): + """Test the conversion of old to new format with collisions + That matches e.g. the MAAS case specifying old and new config""" + cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, + 'apt_proxy': 'http://192.168.122.1:8000/'} + cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}} + cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, + 'apt_proxy': 'ftp://192.168.122.1:8000/'} + + # collision (equal) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) + self.assertEqual(newcfg, cfg_3_only) + # collision (equal, so ok to remove) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) + self.assertEqual(newcfg, cfg_3_only) + # collision (unequal) + match = "Old and New.*unequal.*apt_proxy" + with self.assertRaisesRegex(ValueError, match): + cc_apt_configure.convert_to_v3_apt_format(cfgconflict) + + def test_convert_to_new_format_dict_collision(self): + cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', + 'filename': self.aptlistfile} + cfg2 = {'source': 'deb $MIRROR $RELEASE main', + 'filename': self.aptlistfile2} + cfg3 = {'source': 'deb $MIRROR $RELEASE universe', + 'filename': self.aptlistfile3} + fullv3 = {self.aptlistfile: {'filename': self.aptlistfile, + 'source': 'deb $MIRROR $RELEASE ' + 'multiverse'}, + self.aptlistfile2: {'filename': self.aptlistfile2, + 'source': 'deb $MIRROR $RELEASE main'}, + self.aptlistfile3: {'filename': self.aptlistfile3, + 'source': 'deb $MIRROR $RELEASE ' + 'universe'}} + cfg_3_only = {'apt': {'sources': fullv3}} + cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]} + cfg_1_and_3.update(cfg_3_only) + + # collision (equal, so ok to remove) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) + self.assertEqual(newcfg, cfg_3_only) + # no old spec (same result) + newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) + self.assertEqual(newcfg, cfg_3_only) + + diff = {self.aptlistfile: {'filename': self.aptlistfile, + 'source': 'deb $MIRROR $RELEASE ' + 'DIFFERENTVERSE'}, + self.aptlistfile2: {'filename': self.aptlistfile2, + 'source': 'deb $MIRROR $RELEASE main'}, + self.aptlistfile3: {'filename': self.aptlistfile3, + 'source': 'deb $MIRROR $RELEASE ' + 'universe'}} + cfg_3_only = {'apt': {'sources': diff}} + cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]} + cfg_1_and_3_different.update(cfg_3_only) + + # collision (unequal by dict having a different entry) + with self.assertRaises(ValueError): + cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different) + + missing = {self.aptlistfile: {'filename': self.aptlistfile, + 'source': 'deb $MIRROR $RELEASE ' + 'multiverse'}} + cfg_3_only = {'apt': {'sources': missing}} + cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]} + cfg_1_and_3_missing.update(cfg_3_only) + # collision (unequal by dict missing an entry) + with self.assertRaises(ValueError): + cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py new file mode 100644 index 00000000..0b78037e --- /dev/null +++ b/tests/unittests/config/test_apt_source_v3.py @@ -0,0 +1,1170 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""test_handler_apt_source_v3 +Testing various config variations of the apt_source custom config +This tries to call all in the new v3 format and cares about new features +""" +import glob +import os +import re +import shutil +import socket +import tempfile +import pathlib + +from unittest import TestCase, mock +from unittest.mock import call + +from cloudinit import gpg +from cloudinit import subp +from cloudinit import util +from cloudinit.config import cc_apt_configure +from tests.unittests import helpers as t_help + +from tests.unittests.util import get_cloud + +EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ +NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy +8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0 +HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG +CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29 +OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ +FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm +S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== +=ACB2 +-----END PGP PUBLIC KEY BLOCK-----""" + +ADD_APT_REPO_MATCH = r"^[\w-]+:\w" + +TARGET = None + +MOCK_LSB_RELEASE_DATA = { + 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS', + 'release': '18.04', 'codename': 'bionic'} + + +class FakeDatasource: + """Fake Datasource helper object""" + def __init__(self): + self.region = 'region' + + +class FakeCloud: + """Fake Cloud helper object""" + def __init__(self): + self.datasource = FakeDatasource() + + +class TestAptSourceConfig(t_help.FilesystemMockingTestCase): + """TestAptSourceConfig + Main Class to test apt configs + """ + def setUp(self): + super(TestAptSourceConfig, self).setUp() + self.tmp = tempfile.mkdtemp() + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + self.addCleanup(shutil.rmtree, self.new_root) + self.aptlistfile = os.path.join(self.tmp, "single-deb.list") + self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list") + self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") + self.join = os.path.join + self.matcher = re.compile(ADD_APT_REPO_MATCH).search + self.add_patch( + 'cloudinit.config.cc_apt_configure.util.lsb_release', + 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy()) + + @staticmethod + def _add_apt_sources(*args, **kwargs): + with mock.patch.object(cc_apt_configure, 'update_packages'): + cc_apt_configure.add_apt_sources(*args, **kwargs) + + @staticmethod + def _get_default_params(): + """get_default_params + Get the most basic default mrror and release info to be used in tests + """ + params = {} + params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release'] + arch = 'amd64' + params['MIRROR'] = cc_apt_configure.\ + get_default_mirrors(arch)["PRIMARY"] + return params + + def _myjoin(self, *args, **kwargs): + """_myjoin - redir into writable tmpdir""" + if (args[0] == "/etc/apt/sources.list.d/" and + args[1] == "cloud_config_sources.list" and + len(args) == 2): + return self.join(self.tmp, args[0].lstrip("/"), args[1]) + else: + return self.join(*args, **kwargs) + + def _apt_src_basic(self, filename, cfg): + """_apt_src_basic + Test Fix deb source string, has to overwrite mirror conf in params + """ + params = self._get_default_params() + + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + self.assertTrue(os.path.isfile(filename)) + + contents = util.load_file(filename) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", "http://test.ubuntu.com/ubuntu", + "karmic-backports", + "main universe multiverse restricted"), + contents, flags=re.IGNORECASE)) + + def test_apt_v3_src_basic(self): + """test_apt_v3_src_basic - Test fix deb source string""" + cfg = {self.aptlistfile: {'source': + ('deb http://test.ubuntu.com/ubuntu' + ' karmic-backports' + ' main universe multiverse restricted')}} + self._apt_src_basic(self.aptlistfile, cfg) + + def test_apt_v3_src_basic_tri(self): + """test_apt_v3_src_basic_tri - Test multiple fix deb source strings""" + cfg = {self.aptlistfile: {'source': + ('deb http://test.ubuntu.com/ubuntu' + ' karmic-backports' + ' main universe multiverse restricted')}, + self.aptlistfile2: {'source': + ('deb http://test.ubuntu.com/ubuntu' + ' precise-backports' + ' main universe multiverse restricted')}, + self.aptlistfile3: {'source': + ('deb http://test.ubuntu.com/ubuntu' + ' lucid-backports' + ' main universe multiverse restricted')}} + self._apt_src_basic(self.aptlistfile, cfg) + + # extra verify on two extra files of this test + contents = util.load_file(self.aptlistfile2) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", "http://test.ubuntu.com/ubuntu", + "precise-backports", + "main universe multiverse restricted"), + contents, flags=re.IGNORECASE)) + contents = util.load_file(self.aptlistfile3) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", "http://test.ubuntu.com/ubuntu", + "lucid-backports", + "main universe multiverse restricted"), + contents, flags=re.IGNORECASE)) + + def _apt_src_replacement(self, filename, cfg): + """apt_src_replace + Test Autoreplacement of MIRROR and RELEASE in source specs + """ + params = self._get_default_params() + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + self.assertTrue(os.path.isfile(filename)) + + contents = util.load_file(filename) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", params['MIRROR'], params['RELEASE'], + "multiverse"), + contents, flags=re.IGNORECASE)) + + def test_apt_v3_src_replace(self): + """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE""" + cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}} + self._apt_src_replacement(self.aptlistfile, cfg) + + def test_apt_v3_src_replace_fn(self): + """test_apt_v3_src_replace_fn - Test filename overwritten in dict""" + cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse', + 'filename': self.aptlistfile}} + # second file should overwrite the dict key + self._apt_src_replacement(self.aptlistfile, cfg) + + def _apt_src_replace_tri(self, cfg): + """_apt_src_replace_tri + Test three autoreplacements of MIRROR and RELEASE in source specs with + generic part + """ + self._apt_src_replacement(self.aptlistfile, cfg) + + # extra verify on two extra files of this test + params = self._get_default_params() + contents = util.load_file(self.aptlistfile2) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", params['MIRROR'], params['RELEASE'], + "main"), + contents, flags=re.IGNORECASE)) + contents = util.load_file(self.aptlistfile3) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", params['MIRROR'], params['RELEASE'], + "universe"), + contents, flags=re.IGNORECASE)) + + def test_apt_v3_src_replace_tri(self): + """test_apt_v3_src_replace_tri - Test multiple replace/overwrites""" + cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, + 'notused': {'source': 'deb $MIRROR $RELEASE main', + 'filename': self.aptlistfile2}, + self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} + self._apt_src_replace_tri(cfg) + + def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None): + """_apt_src_keyid + Test specification of a source + keyid + """ + params = self._get_default_params() + + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + # check if it added the right number of keys + calls = [] + for key in cfg: + if is_hardened is not None: + calls.append(call(cfg[key], hardened=is_hardened)) + else: + calls.append(call(cfg[key], TARGET)) + + mockobj.assert_has_calls(calls, any_order=True) + + self.assertTrue(os.path.isfile(filename)) + + contents = util.load_file(filename) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "main"), + contents, flags=re.IGNORECASE)) + + def test_apt_v3_src_keyid(self): + """test_apt_v3_src_keyid - Test source + keyid with filename""" + cfg = {self.aptlistfile: {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'filename': self.aptlistfile, + 'keyid': "03683F77"}} + self._apt_src_keyid(self.aptlistfile, cfg, 1) + + def test_apt_v3_src_keyid_tri(self): + """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes""" + cfg = {self.aptlistfile: {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'keyid': "03683F77"}, + 'ignored': {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial universe'), + 'keyid': "03683F77", + 'filename': self.aptlistfile2}, + self.aptlistfile3: {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial multiverse'), + 'filename': self.aptlistfile3, + 'keyid': "03683F77"}} + + self._apt_src_keyid(self.aptlistfile, cfg, 3) + contents = util.load_file(self.aptlistfile2) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "universe"), + contents, flags=re.IGNORECASE)) + contents = util.load_file(self.aptlistfile3) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "multiverse"), + contents, flags=re.IGNORECASE)) + + def test_apt_v3_src_key(self): + """test_apt_v3_src_key - Test source + key""" + params = self._get_default_params() + cfg = {self.aptlistfile: {'source': ('deb ' + 'http://ppa.launchpad.net/' + 'smoser/cloud-init-test/ubuntu' + ' xenial main'), + 'filename': self.aptlistfile, + 'key': "fakekey 4321"}} + + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4321', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) + self.assertTrue(os.path.isfile(self.aptlistfile)) + + contents = util.load_file(self.aptlistfile) + self.assertTrue(re.search(r"%s %s %s %s\n" % + ("deb", + ('http://ppa.launchpad.net/smoser/' + 'cloud-init-test/ubuntu'), + "xenial", "main"), + contents, flags=re.IGNORECASE)) + + def test_apt_v3_src_keyonly(self): + """test_apt_v3_src_keyonly - Test key without source""" + params = self._get_default_params() + cfg = {self.aptlistfile: {'key': "fakekey 4242"}} + + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4242', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) + + # filename should be ignored on key only + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def test_apt_v3_src_keyidonly(self): + """test_apt_v3_src_keyidonly - Test keyid without source""" + params = self._get_default_params() + cfg = {self.aptlistfile: {'keyid': "03683F77"}} + with mock.patch.object(subp, 'subp', + return_value=('fakekey 1212', '')): + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 1212', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) + + # filename should be ignored on key only + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): + """apt_src_keyid_real + Test specification of a keyid without source including + up to addition of the key (add_apt_key_raw mocked to keep the + environment as is) + """ + params = self._get_default_params() + + with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: + with mock.patch.object(gpg, 'getkeybyid', + return_value=expectedkey) as mockgetkey: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + keycfg = cfg[self.aptlistfile] + mockgetkey.assert_called_with(keycfg['keyid'], + keycfg.get('keyserver', + 'keyserver.ubuntu.com')) + if is_hardened is not None: + mockkey.assert_called_with( + expectedkey, + keycfg['keyfile'], + hardened=is_hardened) + + # filename should be ignored on key only + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def test_apt_v3_src_keyid_real(self): + """test_apt_v3_src_keyid_real - Test keyid including key add""" + keyid = "03683F77" + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile}} + + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + + def test_apt_v3_src_longkeyid_real(self): + """test_apt_v3_src_longkeyid_real Test long keyid including key add""" + keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile}} + + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) + + def test_apt_v3_src_longkeyid_ks_real(self): + """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" + keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile, + 'keyserver': 'keys.gnupg.net'}} + + self.apt_src_keyid_real(cfg, EXPECTEDKEY) + + def test_apt_v3_src_keyid_keyserver(self): + """test_apt_v3_src_keyid_keyserver - Test custom keyserver""" + keyid = "03683F77" + params = self._get_default_params() + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile, + 'keyserver': 'test.random.com'}} + + # in some test environments only *.ubuntu.com is reachable + # so mock the call and check if the config got there + with mock.patch.object(gpg, 'getkeybyid', + return_value="fakekey") as mockgetkey: + with mock.patch.object(cc_apt_configure, + 'add_apt_key_raw') as mockadd: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + + mockgetkey.assert_called_with('03683F77', 'test.random.com') + mockadd.assert_called_with('fakekey', self.aptlistfile, hardened=False) + + # filename should be ignored on key only + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def test_apt_v3_src_ppa(self): + """test_apt_v3_src_ppa - Test specification of a ppa""" + params = self._get_default_params() + cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}} + + with mock.patch("cloudinit.subp.subp") as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + mockobj.assert_any_call(['add-apt-repository', + 'ppa:smoser/cloud-init-test'], target=TARGET) + + # adding ppa should ignore filename (uses add-apt-repository) + self.assertFalse(os.path.isfile(self.aptlistfile)) + + def test_apt_v3_src_ppa_tri(self): + """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's""" + params = self._get_default_params() + cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}, + self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'}, + self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}} + + with mock.patch("cloudinit.subp.subp") as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) + calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], + target=TARGET), + call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], + target=TARGET), + call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], + target=TARGET)] + mockobj.assert_has_calls(calls, any_order=True) + + # adding ppa should ignore all filenames (uses add-apt-repository) + self.assertFalse(os.path.isfile(self.aptlistfile)) + self.assertFalse(os.path.isfile(self.aptlistfile2)) + self.assertFalse(os.path.isfile(self.aptlistfile3)) + + @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") + def test_apt_v3_list_rename(self, m_get_dpkg_architecture): + """test_apt_v3_list_rename - Test find mirror and apt list renaming""" + pre = "/var/lib/apt/lists" + # filenames are archive dependent + + arch = 's390x' + m_get_dpkg_architecture.return_value = arch + component = "ubuntu-ports" + archive = "ports.ubuntu.com" + + cfg = {'primary': [{'arches': ["default"], + 'uri': + 'http://test.ubuntu.com/%s/' % component}], + 'security': [{'arches': ["default"], + 'uri': + 'http://testsec.ubuntu.com/%s/' % component}]} + post = ("%s_dists_%s-updates_InRelease" % + (component, MOCK_LSB_RELEASE_DATA['codename'])) + fromfn = ("%s/%s_%s" % (pre, archive, post)) + tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) + + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) + + self.assertEqual(mirrors['MIRROR'], + "http://test.ubuntu.com/%s/" % component) + self.assertEqual(mirrors['PRIMARY'], + "http://test.ubuntu.com/%s/" % component) + self.assertEqual(mirrors['SECURITY'], + "http://testsec.ubuntu.com/%s/" % component) + + with mock.patch.object(os, 'rename') as mockren: + with mock.patch.object(glob, 'glob', + return_value=[fromfn]): + cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch) + + mockren.assert_any_call(fromfn, tofn) + + @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") + def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture): + target = os.path.join(self.tmp, "rename_non_slash") + apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS) + + arch = 'amd64' + m_get_dpkg_architecture.return_value = arch + + mirror_path = "some/random/path/" + primary = "http://test.ubuntu.com/" + mirror_path + security = "http://test-security.ubuntu.com/" + mirror_path + mirrors = {'PRIMARY': primary, 'SECURITY': security} + + # these match default archive prefixes + opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial" + osec_pre = "security.ubuntu.com_ubuntu_dists_xenial" + # this one won't match and should not be renamed defaults. + other_pre = "dl.google.com_linux_chrome_deb_dists_stable" + # these are our new expected prefixes + npri_pre = "test.ubuntu.com_some_random_path_dists_xenial" + nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial" + + files = [ + # orig prefix, new prefix, suffix + (opri_pre, npri_pre, "_main_binary-amd64_Packages"), + (opri_pre, npri_pre, "_main_binary-amd64_InRelease"), + (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"), + (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"), + (other_pre, other_pre, "_main_binary-amd64_Packages"), + (other_pre, other_pre, "_Release"), + (other_pre, other_pre, "_Release.gpg"), + (osec_pre, nsec_pre, "_InRelease"), + (osec_pre, nsec_pre, "_main_binary-amd64_Packages"), + (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"), + ] + + expected = sorted([npre + suff for opre, npre, suff in files]) + # create files + for (opre, _npre, suff) in files: + fpath = os.path.join(apt_lists_d, opre + suff) + util.write_file(fpath, content=fpath) + + cc_apt_configure.rename_apt_lists(mirrors, target, arch) + found = sorted(os.listdir(apt_lists_d)) + self.assertEqual(expected, found) + + @staticmethod + def test_apt_v3_proxy(): + """test_apt_v3_proxy - Test apt_*proxy configuration""" + cfg = {"proxy": "foobar1", + "http_proxy": "foobar2", + "ftp_proxy": "foobar3", + "https_proxy": "foobar4"} + + with mock.patch.object(util, 'write_file') as mockobj: + cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused") + + mockobj.assert_called_with('proxyfn', + ('Acquire::http::Proxy "foobar1";\n' + 'Acquire::http::Proxy "foobar2";\n' + 'Acquire::ftp::Proxy "foobar3";\n' + 'Acquire::https::Proxy "foobar4";\n')) + + def test_apt_v3_mirror(self): + """test_apt_v3_mirror - Test defining a mirror""" + pmir = "http://us.archive.ubuntu.com/ubuntu/" + smir = "http://security.ubuntu.com/ubuntu/" + cfg = {"primary": [{'arches': ["default"], + "uri": pmir}], + "security": [{'arches': ["default"], + "uri": smir}]} + + mirrors = cc_apt_configure.find_apt_mirror_info( + cfg, FakeCloud(), 'amd64') + + self.assertEqual(mirrors['MIRROR'], + pmir) + self.assertEqual(mirrors['PRIMARY'], + pmir) + self.assertEqual(mirrors['SECURITY'], + smir) + + def test_apt_v3_mirror_default(self): + """test_apt_v3_mirror_default - Test without defining a mirror""" + arch = 'amd64' + default_mirrors = cc_apt_configure.get_default_mirrors(arch) + pmir = default_mirrors["PRIMARY"] + smir = default_mirrors["SECURITY"] + mycloud = get_cloud() + mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch) + + self.assertEqual(mirrors['MIRROR'], + pmir) + self.assertEqual(mirrors['PRIMARY'], + pmir) + self.assertEqual(mirrors['SECURITY'], + smir) + + def test_apt_v3_mirror_arches(self): + """test_apt_v3_mirror_arches - Test arches selection of mirror""" + pmir = "http://my-primary.ubuntu.com/ubuntu/" + smir = "http://my-security.ubuntu.com/ubuntu/" + arch = 'ppc64el' + cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"}, + {'arches': [arch], "uri": pmir}], + "security": [{'arches': ["default"], "uri": "nothis-security"}, + {'arches': [arch], "uri": smir}]} + + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) + + self.assertEqual(mirrors['PRIMARY'], pmir) + self.assertEqual(mirrors['MIRROR'], pmir) + self.assertEqual(mirrors['SECURITY'], smir) + + def test_apt_v3_mirror_arches_default(self): + """test_apt_v3_mirror_arches - Test falling back to default arch""" + pmir = "http://us.archive.ubuntu.com/ubuntu/" + smir = "http://security.ubuntu.com/ubuntu/" + cfg = {"primary": [{'arches': ["default"], + "uri": pmir}, + {'arches': ["thisarchdoesntexist"], + "uri": "notthis"}], + "security": [{'arches': ["thisarchdoesntexist"], + "uri": "nothat"}, + {'arches': ["default"], + "uri": smir}]} + + mirrors = cc_apt_configure.find_apt_mirror_info( + cfg, FakeCloud(), 'amd64') + + self.assertEqual(mirrors['MIRROR'], + pmir) + self.assertEqual(mirrors['PRIMARY'], + pmir) + self.assertEqual(mirrors['SECURITY'], + smir) + + @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") + def test_apt_v3_get_def_mir_non_intel_no_arch( + self, m_get_dpkg_architecture + ): + arch = 'ppc64el' + m_get_dpkg_architecture.return_value = arch + expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', + 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} + self.assertEqual(expected, cc_apt_configure.get_default_mirrors()) + + def test_apt_v3_get_default_mirrors_non_intel_with_arch(self): + found = cc_apt_configure.get_default_mirrors('ppc64el') + + expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', + 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} + self.assertEqual(expected, found) + + def test_apt_v3_mirror_arches_sysdefault(self): + """test_apt_v3_mirror_arches - Test arches fallback to sys default""" + arch = 'amd64' + default_mirrors = cc_apt_configure.get_default_mirrors(arch) + pmir = default_mirrors["PRIMARY"] + smir = default_mirrors["SECURITY"] + mycloud = get_cloud() + cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"], + "uri": "notthis"}, + {'arches': ["thisarchdoesntexist"], + "uri": "notthiseither"}], + "security": [{'arches': ["thisarchdoesntexist"], + "uri": "nothat"}, + {'arches': ["thisarchdoesntexist_64"], + "uri": "nothateither"}]} + + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) + + self.assertEqual(mirrors['MIRROR'], pmir) + self.assertEqual(mirrors['PRIMARY'], pmir) + self.assertEqual(mirrors['SECURITY'], smir) + + def test_apt_v3_mirror_search(self): + """test_apt_v3_mirror_search - Test searching mirrors in a list + mock checks to avoid relying on network connectivity""" + pmir = "http://us.archive.ubuntu.com/ubuntu/" + smir = "http://security.ubuntu.com/ubuntu/" + cfg = {"primary": [{'arches': ["default"], + "search": ["pfailme", pmir]}], + "security": [{'arches': ["default"], + "search": ["sfailme", smir]}]} + + with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', + side_effect=[pmir, smir]) as mocksearch: + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), + 'amd64') + + calls = [call(["pfailme", pmir]), + call(["sfailme", smir])] + mocksearch.assert_has_calls(calls) + + self.assertEqual(mirrors['MIRROR'], + pmir) + self.assertEqual(mirrors['PRIMARY'], + pmir) + self.assertEqual(mirrors['SECURITY'], + smir) + + def test_apt_v3_mirror_search_many2(self): + """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once""" + pmir = "http://us.archive.ubuntu.com/ubuntu/" + smir = "http://security.ubuntu.com/ubuntu/" + cfg = {"primary": [{'arches': ["default"], + "uri": pmir, + "search": ["pfailme", "foo"]}], + "security": [{'arches': ["default"], + "uri": smir, + "search": ["sfailme", "bar"]}]} + + arch = 'amd64' + + # should be called only once per type, despite two mirror configs + mycloud = None + with mock.patch.object(cc_apt_configure, 'get_mirror', + return_value="http://mocked/foo") as mockgm: + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) + calls = [call(cfg, 'primary', arch, mycloud), + call(cfg, 'security', arch, mycloud)] + mockgm.assert_has_calls(calls) + + # should not be called, since primary is specified + with mock.patch.object(cc_apt_configure.util, + 'search_for_mirror') as mockse: + mirrors = cc_apt_configure.find_apt_mirror_info( + cfg, FakeCloud(), arch) + mockse.assert_not_called() + + self.assertEqual(mirrors['MIRROR'], + pmir) + self.assertEqual(mirrors['PRIMARY'], + pmir) + self.assertEqual(mirrors['SECURITY'], + smir) + + def test_apt_v3_url_resolvable(self): + """test_apt_v3_url_resolvable - Test resolving urls""" + + with mock.patch.object(util, 'is_resolvable') as mockresolve: + util.is_resolvable_url("http://1.2.3.4/ubuntu") + mockresolve.assert_called_with("1.2.3.4") + + with mock.patch.object(util, 'is_resolvable') as mockresolve: + util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") + mockresolve.assert_called_with("us.archive.ubuntu.com") + + # former tests can leave this set (or not if the test is ran directly) + # do a hard reset to ensure a stable result + util._DNS_REDIRECT_IP = None + bad = [(None, None, None, "badname", ["10.3.2.1"])] + good = [(None, None, None, "goodname", ["10.2.3.4"])] + with mock.patch.object(socket, 'getaddrinfo', + side_effect=[bad, bad, bad, good, + good]) as mocksock: + ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") + ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu") + mocksock.assert_any_call('does-not-exist.example.com.', None, + 0, 0, 1, 2) + mocksock.assert_any_call('example.invalid.', None, 0, 0, 1, 2) + mocksock.assert_any_call('us.archive.ubuntu.com', None) + mocksock.assert_any_call('1.2.3.4', None) + + self.assertTrue(ret) + self.assertTrue(ret2) + + # side effect need only bad ret after initial call + with mock.patch.object(socket, 'getaddrinfo', + side_effect=[bad]) as mocksock: + ret3 = util.is_resolvable_url("http://failme.com/ubuntu") + calls = [call('failme.com', None)] + mocksock.assert_has_calls(calls) + self.assertFalse(ret3) + + def test_apt_v3_disable_suites(self): + """test_disable_suites - disable_suites with many configurations""" + release = "xenial" + orig = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + + # disable nothing + disabled = [] + expect = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # single disable release suite + disabled = ["$RELEASE"] + expect = """\ +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # single disable other suite + disabled = ["$RELEASE-updates"] + expect = ("""deb http://ubuntu.com//ubuntu xenial main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu""" + """ xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # multi disable + disabled = ["$RELEASE-updates", "$RELEASE-security"] + expect = ("""deb http://ubuntu.com//ubuntu xenial main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ + """xenial-updates main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ + """xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # multi line disable (same suite multiple times in input) + disabled = ["$RELEASE-updates", "$RELEASE-security"] + orig = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://UBUNTU.com//ubuntu xenial-updates main +deb http://UBUNTU.COM//ubuntu xenial-updates main +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + expect = ("""deb http://ubuntu.com//ubuntu xenial main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ + """xenial-updates main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ + """xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """ + """xenial-updates main +# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ + """xenial-updates main +deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # comment in input + disabled = ["$RELEASE-updates", "$RELEASE-security"] + orig = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +#foo +#deb http://UBUNTU.com//ubuntu xenial-updates main +deb http://UBUNTU.COM//ubuntu xenial-updates main +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + expect = ("""deb http://ubuntu.com//ubuntu xenial main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ + """xenial-updates main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ + """xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +#foo +#deb http://UBUNTU.com//ubuntu xenial-updates main +# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ + """xenial-updates main +deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # single disable custom suite + disabled = ["foobar"] + orig = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb http://ubuntu.com/ubuntu/ foobar main""" + expect = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main""" + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # single disable non existing suite + disabled = ["foobar"] + orig = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb http://ubuntu.com/ubuntu/ notfoobar main""" + expect = """deb http://ubuntu.com//ubuntu xenial main +deb http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb http://ubuntu.com/ubuntu/ notfoobar main""" + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # single disable suite with option + disabled = ["$RELEASE-updates"] + orig = """deb http://ubuntu.com//ubuntu xenial main +deb [a=b] http://ubu.com//ubu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + expect = ("""deb http://ubuntu.com//ubuntu xenial main +# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """ + """xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # single disable suite with more options and auto $RELEASE expansion + disabled = ["updates"] + orig = """deb http://ubuntu.com//ubuntu xenial main +deb [a=b c=d] http://ubu.com//ubu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + expect = """deb http://ubuntu.com//ubuntu xenial main +# suite disabled by cloud-init: deb [a=b c=d] \ +http://ubu.com//ubu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + # single disable suite while options at others + disabled = ["$RELEASE-security"] + orig = """deb http://ubuntu.com//ubuntu xenial main +deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main +deb http://ubuntu.com//ubuntu xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""" + expect = ("""deb http://ubuntu.com//ubuntu xenial main +deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main +# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ + """xenial-security main +deb-src http://ubuntu.com//ubuntu universe multiverse +deb http://ubuntu.com/ubuntu/ xenial-proposed main""") + result = cc_apt_configure.disable_suites(disabled, orig, release) + self.assertEqual(expect, result) + + def test_disable_suites_blank_lines(self): + """test_disable_suites_blank_lines - ensure blank lines allowed""" + lines = ["deb %(repo)s %(rel)s main universe", + "", + "deb %(repo)s %(rel)s-updates main universe", + " # random comment", + "#comment here", + ""] + rel = "trusty" + repo = 'http://example.com/mirrors/ubuntu' + orig = "\n".join(lines) % {'repo': repo, 'rel': rel} + self.assertEqual( + orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)) + + @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain') + def test_apt_v3_mirror_search_dns(self, m_get_hostname): + """test_apt_v3_mirror_search_dns - Test searching dns patterns""" + pmir = "phit" + smir = "shit" + arch = 'amd64' + mycloud = get_cloud('ubuntu') + cfg = {"primary": [{'arches': ["default"], + "search_dns": True}], + "security": [{'arches': ["default"], + "search_dns": True}]} + + with mock.patch.object(cc_apt_configure, 'get_mirror', + return_value="http://mocked/foo") as mockgm: + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) + calls = [call(cfg, 'primary', arch, mycloud), + call(cfg, 'security', arch, mycloud)] + mockgm.assert_has_calls(calls) + + with mock.patch.object(cc_apt_configure, 'search_for_mirror_dns', + return_value="http://mocked/foo") as mocksdns: + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) + calls = [call(True, 'primary', cfg, mycloud), + call(True, 'security', cfg, mycloud)] + mocksdns.assert_has_calls(calls) + + # first return is for the non-dns call before + with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', + side_effect=[None, pmir, None, smir]) as mockse: + mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) + + calls = [call(None), + call(['http://ubuntu-mirror.localdomain/ubuntu', + 'http://ubuntu-mirror/ubuntu']), + call(None), + call(['http://ubuntu-security-mirror.localdomain/ubuntu', + 'http://ubuntu-security-mirror/ubuntu'])] + mockse.assert_has_calls(calls) + + self.assertEqual(mirrors['MIRROR'], + pmir) + self.assertEqual(mirrors['PRIMARY'], + pmir) + self.assertEqual(mirrors['SECURITY'], + smir) + + def test_apt_v3_add_mirror_keys(self): + """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" + arch = 'amd64' + cfg = { + 'primary': [ + {'arches': [arch], + 'uri': 'http://test.ubuntu.com/', + 'filename': 'primary', + 'key': 'fakekey_primary'}], + 'security': [ + {'arches': [arch], + 'uri': 'http://testsec.ubuntu.com/', + 'filename': 'security', + 'key': 'fakekey_security'}] + } + + with mock.patch.object(cc_apt_configure, + 'add_apt_key_raw') as mockadd: + cc_apt_configure.add_mirror_keys(cfg, TARGET) + calls = [ + mock.call('fakekey_primary', 'primary', hardened=False), + mock.call('fakekey_security', 'security', hardened=False), + ] + mockadd.assert_has_calls(calls, any_order=True) + + +class TestDebconfSelections(TestCase): + + @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") + def test_set_sel_appends_newline_if_absent(self, m_subp): + """Automatically append a newline to debconf-set-selections config.""" + selections = b'some/setting boolean true' + cc_apt_configure.debconf_set_selections(selections=selections) + cc_apt_configure.debconf_set_selections(selections=selections + b'\n') + m_call = mock.call( + ['debconf-set-selections'], data=selections + b'\n', capture=True, + target=None) + self.assertEqual([m_call, m_call], m_subp.call_args_list) + + @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") + def test_no_set_sel_if_none_to_set(self, m_set_sel): + cc_apt_configure.apply_debconf_selections({'foo': 'bar'}) + m_set_sel.assert_not_called() + + @mock.patch("cloudinit.config.cc_apt_configure." + "debconf_set_selections") + @mock.patch("cloudinit.config.cc_apt_configure." + "util.get_installed_packages") + def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): + data = { + 'set1': 'pkga pkga/q1 mybool false', + 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' + 'pkgc\tpkgc/ip\tstring\t10.0.0.1')} + lines = '\n'.join(data.values()).split('\n') + + m_get_inst.return_value = ["adduser", "apparmor"] + m_set_sel.return_value = None + + cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) + self.assertTrue(m_get_inst.called) + self.assertEqual(m_set_sel.call_count, 1) + + # assumes called with *args value. + selections = m_set_sel.call_args_list[0][0][0].decode() + + missing = [ + line for line in lines if line not in selections.splitlines() + ] + self.assertEqual([], missing) + + @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") + @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") + @mock.patch("cloudinit.config.cc_apt_configure." + "util.get_installed_packages") + def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, + m_dpkg_r): + data = { + 'set1': 'pkga pkga/q1 mybool false', + 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' + 'pkgc\tpkgc/ip\tstring\t10.0.0.1'), + 'cloud-init': ('cloud-init cloud-init/datasources' + 'multiselect MAAS')} + + m_set_sel.return_value = None + m_get_inst.return_value = ["adduser", "apparmor", "pkgb", + "cloud-init", 'zdog'] + + cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) + + # reconfigure should be called with the intersection + # of (packages in config, packages installed) + self.assertEqual(m_dpkg_r.call_count, 1) + # assumes called with *args (dpkg_reconfigure([a,b,c], target=)) + packages = m_dpkg_r.call_args_list[0][0][0] + self.assertEqual(set(['cloud-init', 'pkgb']), set(packages)) + + @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") + @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") + @mock.patch("cloudinit.config.cc_apt_configure." + "util.get_installed_packages") + def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, + m_dpkg_r): + data = {'set1': 'pkga pkga/q1 mybool false'} + + m_get_inst.return_value = ["adduser", "apparmor", "pkgb", + "cloud-init", 'zdog'] + m_set_sel.return_value = None + + cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) + + self.assertTrue(m_get_inst.called) + self.assertEqual(m_dpkg_r.call_count, 0) + + @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") + def test_dpkg_reconfigure_does_reconfigure(self, m_subp): + target = "/foo-target" + + # due to the way the cleaners are called (via dictionary reference) + # mocking clean_cloud_init directly does not work. So we mock + # the CONFIG_CLEANERS dictionary and assert our cleaner is called. + ci_cleaner = mock.MagicMock() + with mock.patch.dict(("cloudinit.config.cc_apt_configure." + "CONFIG_CLEANERS"), + values={'cloud-init': ci_cleaner}, clear=True): + cc_apt_configure.dpkg_reconfigure(['pkga', 'cloud-init'], + target=target) + # cloud-init is actually the only package we have a cleaner for + # so for now, its the only one that should reconfigured + self.assertTrue(m_subp.called) + ci_cleaner.assert_called_with(target) + self.assertEqual(m_subp.call_count, 1) + found = m_subp.call_args_list[0][0][0] + expected = ['dpkg-reconfigure', '--frontend=noninteractive', + 'cloud-init'] + self.assertEqual(expected, found) + + @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") + def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp): + cc_apt_configure.dpkg_reconfigure([]) + m_subp.assert_not_called() + + @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") + def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp): + cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar']) + m_subp.assert_not_called() + +# +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_apk_configure.py b/tests/unittests/config/test_cc_apk_configure.py new file mode 100644 index 00000000..70139451 --- /dev/null +++ b/tests/unittests/config/test_cc_apk_configure.py @@ -0,0 +1,299 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +""" test_apk_configure +Test creation of repositories file +""" + +import logging +import os +import textwrap + +from cloudinit import (cloud, helpers, util) + +from cloudinit.config import cc_apk_configure +from tests.unittests.helpers import (FilesystemMockingTestCase, mock) + +REPO_FILE = "/etc/apk/repositories" +DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine" +CC_APK = 'cloudinit.config.cc_apk_configure' + + +class TestNoConfig(FilesystemMockingTestCase): + def setUp(self): + super(TestNoConfig, self).setUp() + self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos') + self.name = "apk-configure" + self.cloud_init = None + self.log = logging.getLogger("TestNoConfig") + self.args = [] + + def test_no_config(self): + """ + Test that nothing is done if no apk-configure + configuration is provided. + """ + config = util.get_builtin_cfg() + + cc_apk_configure.handle(self.name, config, self.cloud_init, + self.log, self.args) + + self.assertEqual(0, self.m_write_repos.call_count) + + +class TestConfig(FilesystemMockingTestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.new_root = self.tmp_dir() + self.new_root = self.reRoot(root=self.new_root) + for dirname in ['tmp', 'etc/apk']: + util.ensure_dir(os.path.join(self.new_root, dirname)) + self.paths = helpers.Paths({'templates_dir': self.new_root}) + self.name = "apk-configure" + self.cloud = cloud.Cloud(None, self.paths, None, None, None) + self.log = logging.getLogger("TestNoConfig") + self.args = [] + + @mock.patch(CC_APK + '._write_repositories_file') + def test_no_repo_settings(self, m_write_repos): + """ + Test that nothing is written if the 'alpine-repo' key + is not present. + """ + config = {"apk_repos": {}} + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + self.assertEqual(0, m_write_repos.call_count) + + @mock.patch(CC_APK + '._write_repositories_file') + def test_empty_repo_settings(self, m_write_repos): + """ + Test that nothing is written if 'alpine_repo' list is empty. + """ + config = {"apk_repos": {"alpine_repo": []}} + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + self.assertEqual(0, m_write_repos.call_count) + + def test_only_main_repo(self): + """ + Test when only details of main repo is written to file. + """ + alpine_version = 'v3.12' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_main_and_community_repos(self): + """ + Test when only details of main and community repos are + written to file. + """ + alpine_version = 'edge' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_main_community_testing_repos(self): + """ + Test when details of main, community and testing repos + are written to file. + """ + alpine_version = 'v3.12' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + # + # Testing - using with non-Edge installation may cause problems! + # + {0}/edge/testing + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_edge_main_community_testing_repos(self): + """ + Test when details of main, community and testing repos + for Edge version of Alpine are written to file. + """ + alpine_version = 'edge' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + } + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + {0}/{1}/testing + + """.format(DEFAULT_MIRROR_URL, alpine_version)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_main_community_testing_local_repos(self): + """ + Test when details of main, community, testing and + local repos are written to file. + """ + alpine_version = 'v3.12' + local_repo_url = 'http://some.mirror/whereever' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + }, + "local_repo_base_url": local_repo_url + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + # + # Testing - using with non-Edge installation may cause problems! + # + {0}/edge/testing + + # + # Local repo + # + {2}/{1} + + """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + def test_edge_main_community_testing_local_repos(self): + """ + Test when details of main, community, testing and local repos + for Edge version of Alpine are written to file. + """ + alpine_version = 'edge' + local_repo_url = 'http://some.mirror/whereever' + config = { + "apk_repos": { + "alpine_repo": { + "version": alpine_version, + "community_enabled": True, + "testing_enabled": True + }, + "local_repo_base_url": local_repo_url + } + } + + cc_apk_configure.handle(self.name, config, self.cloud, self.log, + self.args) + + expected_content = textwrap.dedent("""\ + # + # Created by cloud-init + # + # This file is written on first boot of an instance + # + + {0}/{1}/main + {0}/{1}/community + {0}/edge/testing + + # + # Local repo + # + {2}/{1} + + """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) + + self.assertEqual(expected_content, util.load_file(REPO_FILE)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_apt_pipelining.py b/tests/unittests/config/test_cc_apt_pipelining.py new file mode 100644 index 00000000..d7589d35 --- /dev/null +++ b/tests/unittests/config/test_cc_apt_pipelining.py @@ -0,0 +1,28 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_apt_pipelining handler""" + +import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining + +from tests.unittests.helpers import CiTestCase, mock + + +class TestAptPipelining(CiTestCase): + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_not_disabled_by_default(self, m_write_file): + """ensure that default behaviour is to not disable pipelining""" + cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None) + self.assertEqual(0, m_write_file.call_count) + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_false_disables_pipelining(self, m_write_file): + """ensure that pipelining can be disabled with correct config""" + cc_apt_pipelining.handle( + 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None) + self.assertEqual(1, m_write_file.call_count) + args, _ = m_write_file.call_args + self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0]) + self.assertIn('Pipeline-Depth "0"', args[1]) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_bootcmd.py b/tests/unittests/config/test_cc_bootcmd.py new file mode 100644 index 00000000..6f38f12a --- /dev/null +++ b/tests/unittests/config/test_cc_bootcmd.py @@ -0,0 +1,152 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +import tempfile + +from cloudinit.config.cc_bootcmd import handle, schema +from cloudinit import (subp, util) +from tests.unittests.helpers import ( + CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +class FakeExtendedTempFile(object): + def __init__(self, suffix): + self.suffix = suffix + self.handle = tempfile.NamedTemporaryFile( + prefix="ci-%s." % self.__class__.__name__, delete=False) + + def __enter__(self): + return self.handle + + def __exit__(self, exc_type, exc_value, traceback): + self.handle.close() + util.del_file(self.handle.name) + + +class TestBootcmd(CiTestCase): + + with_logs = True + + _etmpfile_path = ('cloudinit.config.cc_bootcmd.temp_utils.' + 'ExtendedTemporaryFile') + + def setUp(self): + super(TestBootcmd, self).setUp() + self.subp = subp.subp + self.new_root = self.tmp_dir() + + def test_handler_skip_if_no_bootcmd(self): + """When the provided config doesn't contain bootcmd, skip it.""" + cfg = {} + mycloud = get_cloud() + handle('notimportant', cfg, mycloud, LOG, None) + self.assertIn( + "Skipping module named notimportant, no 'bootcmd' key", + self.logs.getvalue()) + + def test_handler_invalid_command_set(self): + """Commands which can't be converted to shell will raise errors.""" + invalid_config = {'bootcmd': 1} + cc = get_cloud() + with self.assertRaises(TypeError) as context_manager: + handle('cc_bootcmd', invalid_config, cc, LOG, []) + self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) + self.assertEqual( + "Input to shellify was type 'int'. Expected list or tuple.", + str(context_manager.exception)) + + @skipUnlessJsonSchema() + def test_handler_schema_validation_warns_non_array_type(self): + """Schema validation warns of non-array type for bootcmd key. + + Schema validation is not strict, so bootcmd attempts to shellify the + invalid content. + """ + invalid_config = {'bootcmd': 1} + cc = get_cloud() + with self.assertRaises(TypeError): + handle('cc_bootcmd', invalid_config, cc, LOG, []) + self.assertIn( + 'Invalid config:\nbootcmd: 1 is not of type \'array\'', + self.logs.getvalue()) + self.assertIn('Failed to shellify', self.logs.getvalue()) + + @skipUnlessJsonSchema() + def test_handler_schema_validation_warns_non_array_item_type(self): + """Schema validation warns of non-array or string bootcmd items. + + Schema validation is not strict, so bootcmd attempts to shellify the + invalid content. + """ + invalid_config = { + 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} + cc = get_cloud() + with self.assertRaises(TypeError) as context_manager: + handle('cc_bootcmd', invalid_config, cc, LOG, []) + expected_warnings = [ + 'bootcmd.1: 20 is not valid under any of the given schemas', + 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given' + ' schema' + ] + logs = self.logs.getvalue() + for warning in expected_warnings: + self.assertIn(warning, logs) + self.assertIn('Failed to shellify', logs) + self.assertEqual( + ("Unable to shellify type 'int'. Expected list, string, tuple. " + "Got: 20"), + str(context_manager.exception)) + + def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self): + """Valid schema runs a bootcmd script with INSTANCE_ID in the env.""" + cc = get_cloud() + out_file = self.tmp_path('bootcmd.out', self.new_root) + my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425" + valid_config = {'bootcmd': [ + 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} + + with mock.patch(self._etmpfile_path, FakeExtendedTempFile): + with self.allow_subp(['/bin/sh']): + handle('cc_bootcmd', valid_config, cc, LOG, []) + self.assertEqual(my_id + ' iid-datasource-none\n', + util.load_file(out_file)) + + def test_handler_runs_bootcmd_script_with_error(self): + """When a valid script generates an error, that error is raised.""" + cc = get_cloud() + valid_config = {'bootcmd': ['exit 1']} # Script with error + + with mock.patch(self._etmpfile_path, FakeExtendedTempFile): + with self.allow_subp(['/bin/sh']): + with self.assertRaises(subp.ProcessExecutionError) as ctxt: + handle('does-not-matter', valid_config, cc, LOG, []) + self.assertIn( + 'Unexpected error while running command.\n' + "Command: ['/bin/sh',", + str(ctxt.exception)) + self.assertIn( + 'Failed to run bootcmd module does-not-matter', + self.logs.getvalue()) + + +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): + """Directly test schema rather than through handle.""" + + schema = schema + + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + ["byebye", "byebye"], 'command entries can be duplicate') + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + ["echo bye", "echo bye"], "command entries can be duplicate.") + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py new file mode 100644 index 00000000..91b005d0 --- /dev/null +++ b/tests/unittests/config/test_cc_ca_certs.py @@ -0,0 +1,361 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +import shutil +import tempfile +import unittest +from contextlib import ExitStack +from unittest import mock + +from cloudinit import distros +from cloudinit.config import cc_ca_certs +from cloudinit import helpers +from cloudinit import subp +from cloudinit import util +from tests.unittests.helpers import TestCase + +from tests.unittests.util import get_cloud + + +class TestNoConfig(unittest.TestCase): + def setUp(self): + super(TestNoConfig, self).setUp() + self.name = "ca-certs" + self.cloud_init = None + self.log = logging.getLogger("TestNoConfig") + self.args = [] + + def test_no_config(self): + """ + Test that nothing is done if no ca-certs configuration is provided. + """ + config = util.get_builtin_cfg() + with ExitStack() as mocks: + util_mock = mocks.enter_context( + mock.patch.object(util, 'write_file')) + certs_mock = mocks.enter_context( + mock.patch.object(cc_ca_certs, 'update_ca_certs')) + + cc_ca_certs.handle(self.name, config, self.cloud_init, self.log, + self.args) + + self.assertEqual(util_mock.call_count, 0) + self.assertEqual(certs_mock.call_count, 0) + + +class TestConfig(TestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.name = "ca-certs" + self.paths = None + self.log = logging.getLogger("TestNoConfig") + self.args = [] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def _mock_init(self): + self.mocks = ExitStack() + self.addCleanup(self.mocks.close) + + # Mock out the functions that actually modify the system + self.mock_add = self.mocks.enter_context( + mock.patch.object(cc_ca_certs, 'add_ca_certs')) + self.mock_update = self.mocks.enter_context( + mock.patch.object(cc_ca_certs, 'update_ca_certs')) + self.mock_remove = self.mocks.enter_context( + mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) + + def test_no_trusted_list(self): + """ + Test that no certificates are written if the 'trusted' key is not + present. + """ + config = {"ca-certs": {}} + + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) + + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) + + def test_empty_trusted_list(self): + """Test that no certificate are written if 'trusted' list is empty.""" + config = {"ca-certs": {"trusted": []}} + + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) + + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) + + def test_single_trusted(self): + """Test that a single cert gets passed to add_ca_certs.""" + config = {"ca-certs": {"trusted": ["CERT1"]}} + + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) + + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) + + def test_multiple_trusted(self): + """Test that multiple certs get passed to add_ca_certs.""" + config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} + + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) + + self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) + + def test_remove_default_ca_certs(self): + """Test remove_defaults works as expected.""" + config = {"ca-certs": {"remove-defaults": True}} + + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) + + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) + + def test_no_remove_defaults_if_false(self): + """Test remove_defaults is not called when config value is False.""" + config = {"ca-certs": {"remove-defaults": False}} + + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) + + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) + + def test_correct_order_for_remove_then_add(self): + """Test remove_defaults is not called when config value is False.""" + config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} + + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) + + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) + + +class TestAddCaCerts(TestCase): + + def setUp(self): + super(TestAddCaCerts, self).setUp() + tmpdir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, tmpdir) + self.paths = helpers.Paths({ + 'cloud_dir': tmpdir, + }) + self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat") + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def test_no_certs_in_list(self): + """Test that no certificate are written if not provided.""" + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file') as mockobj: + cc_ca_certs.add_ca_certs(conf, []) + self.assertEqual(mockobj.call_count, 0) + + def test_single_cert_trailing_cr(self): + """Test adding a single certificate to the trusted CAs + when existing ca-certificates has trailing newline""" + cert = "CERT1\nLINE2\nLINE3" + + ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" + expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" + + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) + + cc_ca_certs.add_ca_certs(conf, [cert]) + + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) + mock_load.assert_called_once_with(conf['ca_cert_config']) + + def test_single_cert_no_trailing_cr(self): + """Test adding a single certificate to the trusted CAs + when existing ca-certificates has no trailing newline""" + cert = "CERT1\nLINE2\nLINE3" + + ca_certs_content = "line1\nline2\nline3" + + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) + + cc_ca_certs.add_ca_certs(conf, [cert]) + + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode="wb")]) + + mock_load.assert_called_once_with(conf['ca_cert_config']) + + def test_single_cert_to_empty_existing_ca_file(self): + """Test adding a single certificate to the trusted CAs + when existing ca-certificates.conf is empty""" + cert = "CERT1\nLINE2\nLINE3" + + expected = "cloud-init-ca-certs.crt\n" + + self.m_stat.return_value.st_size = 0 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file', + autospec=True) as m_write: + + cc_ca_certs.add_ca_certs(conf, [cert]) + + m_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + m_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) + + def test_multiple_certs(self): + """Test adding multiple certificates to the trusted CAs.""" + certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"] + expected_cert_file = "\n".join(certs) + ca_certs_content = "line1\nline2\nline3" + + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) + + cc_ca_certs.add_ca_certs(conf, certs) + + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + expected_cert_file, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode='wb')]) + + mock_load.assert_called_once_with(conf['ca_cert_config']) + + +class TestUpdateCaCerts(unittest.TestCase): + def test_commands(self): + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(subp, 'subp') as mockobj: + cc_ca_certs.update_ca_certs(conf) + mockobj.assert_called_once_with( + conf['ca_cert_update_cmd'], capture=False) + + +class TestRemoveDefaultCaCerts(TestCase): + + def setUp(self): + super(TestRemoveDefaultCaCerts, self).setUp() + tmpdir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, tmpdir) + self.paths = helpers.Paths({ + 'cloud_dir': tmpdir, + }) + + def test_commands(self): + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_delete = mocks.enter_context( + mock.patch.object(util, 'delete_dir_contents')) + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_subp = mocks.enter_context( + mock.patch.object(subp, 'subp')) + + cc_ca_certs.remove_default_ca_certs(distro_name, conf) + + mock_delete.assert_has_calls([ + mock.call(conf['ca_cert_path']), + mock.call(conf['ca_cert_system_path'])]) + + if conf['ca_cert_config'] is not None: + mock_write.assert_called_once_with( + conf['ca_cert_config'], "", mode=0o644) + + if distro_name in ['debian', 'ubuntu']: + mock_subp.assert_called_once_with( + ('debconf-set-selections', '-'), + "ca-certificates \ +ca-certificates/trust_new_crts select no") + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_chef.py b/tests/unittests/config/test_cc_chef.py new file mode 100644 index 00000000..060293c8 --- /dev/null +++ b/tests/unittests/config/test_cc_chef.py @@ -0,0 +1,271 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import httpretty +import json +import logging +import os + +from cloudinit.config import cc_chef +from cloudinit import util + +from tests.unittests.helpers import ( + HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + +CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) + +# This is adjusted to use http because using with https causes issue +# in some openssl/httpretty combinations. +# https://github.com/gabrielfalcao/HTTPretty/issues/242 +# We saw issue in opensuse 42.3 with +# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1 +OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:") + + +class TestInstallChefOmnibus(HttprettyTestCase): + + def setUp(self): + super(TestInstallChefOmnibus, self).setUp() + self.new_root = self.tmp_dir() + + @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) + def test_install_chef_from_omnibus_runs_chef_url_content(self): + """install_chef_from_omnibus calls subp_blob_in_tempfile.""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200) + ret = (None, None) # stdout, stderr but capture=False + + with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile", + return_value=ret) as m_subp_blob: + cc_chef.install_chef_from_omnibus() + # admittedly whitebox, but assuming subp_blob_in_tempfile works + # this should be fine. + self.assertEqual( + [mock.call(blob=response, args=[], basename='chef-omnibus-install', + capture=False)], + m_subp_blob.call_args_list) + + @mock.patch('cloudinit.config.cc_chef.url_helper.readurl') + @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') + def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl): + """install_chef_from_omnibus retries OMNIBUS_URL upon failure.""" + + class FakeURLResponse(object): + contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format( + self.new_root) + + m_rdurl.return_value = FakeURLResponse() + + cc_chef.install_chef_from_omnibus() + expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES, + 'url': cc_chef.OMNIBUS_URL} + self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1]) + cc_chef.install_chef_from_omnibus(retries=10) + expected_kwargs = {'retries': 10, + 'url': cc_chef.OMNIBUS_URL} + self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1]) + expected_subp_kwargs = { + 'args': ['-v', '2.0'], + 'basename': 'chef-omnibus-install', + 'blob': m_rdurl.return_value.contents, + 'capture': False + } + self.assertCountEqual( + expected_subp_kwargs, + m_subp_blob.call_args_list[0][1]) + + @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) + @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') + def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob): + """install_chef_from_omnibus provides version arg to OMNIBUS_URL.""" + chef_outfile = self.tmp_path('chef.out', self.new_root) + response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile) + httpretty.register_uri( + httpretty.GET, cc_chef.OMNIBUS_URL, body=response) + cc_chef.install_chef_from_omnibus(omnibus_version='2.0') + + called_kwargs = m_subp_blob.call_args_list[0][1] + expected_kwargs = { + 'args': ['-v', '2.0'], + 'basename': 'chef-omnibus-install', + 'blob': response, + 'capture': False + } + self.assertCountEqual(expected_kwargs, called_kwargs) + + +class TestChef(FilesystemMockingTestCase): + + def setUp(self): + super(TestChef, self).setUp() + self.tmp = self.tmp_dir() + + def test_no_config(self): + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + + cfg = {} + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + for d in cc_chef.CHEF_DIRS: + self.assertFalse(os.path.isdir(d)) + + @skipIf(not os.path.isfile(CLIENT_TEMPL), + CLIENT_TEMPL + " is not available") + def test_basic_config(self): + """ + test basic config looks sane + + # This should create a file of the format... + # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000 + chef_license "accept" + log_level :info + ssl_verify_mode :verify_none + log_location "/var/log/chef/client.log" + validation_client_name "bob" + validation_key "/etc/chef/validation.pem" + client_key "/etc/chef/client.pem" + chef_server_url "localhost" + environment "_default" + node_name "iid-datasource-none" + json_attribs "/etc/chef/firstboot.json" + file_cache_path "/var/cache/chef" + file_backup_path "/var/backups/chef" + pid_file "/var/run/chef/client.pid" + Chef::Log::Formatter.show_time = true + encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" + """ + tpl_file = util.load_file('templates/chef_client.rb.tmpl') + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + + util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) + cfg = { + 'chef': { + 'chef_license': "accept", + 'server_url': 'localhost', + 'validation_name': 'bob', + 'validation_key': "/etc/chef/vkey.pem", + 'validation_cert': "this is my cert", + 'encrypted_data_bag_secret': + '/etc/chef/encrypted_data_bag_secret' + }, + } + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + for d in cc_chef.CHEF_DIRS: + self.assertTrue(os.path.isdir(d)) + c = util.load_file(cc_chef.CHEF_RB_PATH) + + # the content of these keys is not expected to be rendered to tmpl + unrendered_keys = ('validation_cert',) + for k, v in cfg['chef'].items(): + if k in unrendered_keys: + continue + self.assertIn(v, c) + for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items(): + if k in unrendered_keys: + continue + # the value from the cfg overrides that in the default + val = cfg['chef'].get(k, v) + if isinstance(val, str): + self.assertIn(val, c) + c = util.load_file(cc_chef.CHEF_FB_PATH) + self.assertEqual({}, json.loads(c)) + + def test_firstboot_json(self): + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + + cfg = { + 'chef': { + 'server_url': 'localhost', + 'validation_name': 'bob', + 'run_list': ['a', 'b', 'c'], + 'initial_attributes': { + 'c': 'd', + } + }, + } + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + c = util.load_file(cc_chef.CHEF_FB_PATH) + self.assertEqual( + { + 'run_list': ['a', 'b', 'c'], + 'c': 'd', + }, json.loads(c)) + + @skipIf(not os.path.isfile(CLIENT_TEMPL), + CLIENT_TEMPL + " is not available") + def test_template_deletes(self): + tpl_file = util.load_file('templates/chef_client.rb.tmpl') + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + + util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) + cfg = { + 'chef': { + 'server_url': 'localhost', + 'validation_name': 'bob', + 'json_attribs': None, + 'show_time': None, + }, + } + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + c = util.load_file(cc_chef.CHEF_RB_PATH) + self.assertNotIn('json_attribs', c) + self.assertNotIn('Formatter.show_time', c) + + @skipIf(not os.path.isfile(CLIENT_TEMPL), + CLIENT_TEMPL + " is not available") + def test_validation_cert_and_validation_key(self): + # test validation_cert content is written to validation_key path + tpl_file = util.load_file('templates/chef_client.rb.tmpl') + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + + util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) + v_path = '/etc/chef/vkey.pem' + v_cert = 'this is my cert' + cfg = { + 'chef': { + 'server_url': 'localhost', + 'validation_name': 'bob', + 'validation_key': v_path, + 'validation_cert': v_cert + }, + } + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + content = util.load_file(cc_chef.CHEF_RB_PATH) + self.assertIn(v_path, content) + util.load_file(v_path) + self.assertEqual(v_cert, util.load_file(v_path)) + + def test_validation_cert_with_system(self): + # test validation_cert content is not written over system file + tpl_file = util.load_file('templates/chef_client.rb.tmpl') + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + + v_path = '/etc/chef/vkey.pem' + v_cert = "system" + expected_cert = "this is the system file certificate" + cfg = { + 'chef': { + 'server_url': 'localhost', + 'validation_name': 'bob', + 'validation_key': v_path, + 'validation_cert': v_cert + }, + } + util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) + util.write_file(v_path, expected_cert) + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) + content = util.load_file(cc_chef.CHEF_RB_PATH) + self.assertIn(v_path, content) + util.load_file(v_path) + self.assertEqual(expected_cert, util.load_file(v_path)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_debug.py b/tests/unittests/config/test_cc_debug.py new file mode 100644 index 00000000..174f772f --- /dev/null +++ b/tests/unittests/config/test_cc_debug.py @@ -0,0 +1,59 @@ +# Copyright (C) 2014 Yahoo! Inc. +# +# This file is part of cloud-init. See LICENSE file for license information. +import logging +import shutil +import tempfile + +from cloudinit import util +from cloudinit.config import cc_debug +from tests.unittests.helpers import (FilesystemMockingTestCase, mock) + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +@mock.patch('cloudinit.distros.debian.read_system_locale') +class TestDebug(FilesystemMockingTestCase): + def setUp(self): + super(TestDebug, self).setUp() + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + self.patchUtils(self.new_root) + + def test_debug_write(self, m_locale): + m_locale.return_value = 'en_US.UTF-8' + cfg = { + 'abc': '123', + 'c': '\u20a0', + 'debug': { + 'verbose': True, + # Does not actually write here due to mocking... + 'output': '/var/log/cloud-init-debug.log', + }, + } + cc = get_cloud() + cc_debug.handle('cc_debug', cfg, cc, LOG, []) + contents = util.load_file('/var/log/cloud-init-debug.log') + # Some basic sanity tests... + self.assertNotEqual(0, len(contents)) + for k in cfg.keys(): + self.assertIn(k, contents) + + def test_debug_no_write(self, m_locale): + m_locale.return_value = 'en_US.UTF-8' + cfg = { + 'abc': '123', + 'debug': { + 'verbose': False, + # Does not actually write here due to mocking... + 'output': '/var/log/cloud-init-debug.log', + }, + } + cc = get_cloud() + cc_debug.handle('cc_debug', cfg, cc, LOG, []) + self.assertRaises(IOError, + util.load_file, '/var/log/cloud-init-debug.log') + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_disable_ec2_metadata.py b/tests/unittests/config/test_cc_disable_ec2_metadata.py new file mode 100644 index 00000000..7a794845 --- /dev/null +++ b/tests/unittests/config/test_cc_disable_ec2_metadata.py @@ -0,0 +1,48 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_disable_ec2_metadata handler""" + +import cloudinit.config.cc_disable_ec2_metadata as ec2_meta + +from tests.unittests.helpers import CiTestCase, mock + +import logging + +LOG = logging.getLogger(__name__) + +DISABLE_CFG = {'disable_ec2_metadata': 'true'} + + +class TestEC2MetadataRoute(CiTestCase): + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') + def test_disable_ifconfig(self, m_subp, m_which): + """Set the route if ifconfig command is available""" + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['route', 'add', '-host', '169.254.169.254', 'reject'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') + def test_disable_ip(self, m_subp, m_which): + """Set the route if ip command is available""" + m_which.side_effect = lambda x: x if x == 'ip' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp') + def test_disable_no_tool(self, m_subp, m_which): + """Log error when neither route nor ip commands are available""" + m_which.return_value = None # Find neither ifconfig nor ip + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + self.assertEqual( + [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py new file mode 100644 index 00000000..fa565559 --- /dev/null +++ b/tests/unittests/config/test_cc_disk_setup.py @@ -0,0 +1,243 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import random + +from cloudinit.config import cc_disk_setup +from tests.unittests.helpers import CiTestCase, ExitStack, mock, TestCase + + +class TestIsDiskUsed(TestCase): + + def setUp(self): + super(TestIsDiskUsed, self).setUp() + self.patches = ExitStack() + mod_name = 'cloudinit.config.cc_disk_setup' + self.enumerate_disk = self.patches.enter_context( + mock.patch('{0}.enumerate_disk'.format(mod_name))) + self.check_fs = self.patches.enter_context( + mock.patch('{0}.check_fs'.format(mod_name))) + + def tearDown(self): + super(TestIsDiskUsed, self).tearDown() + self.patches.close() + + def test_multiple_child_nodes_returns_true(self): + self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2)) + self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) + self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) + + def test_valid_filesystem_returns_true(self): + self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) + self.check_fs.return_value = ( + mock.MagicMock(), 'ext4', mock.MagicMock()) + self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) + + def test_one_child_nodes_and_no_fs_returns_false(self): + self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) + self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) + self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock())) + + +class TestGetMbrHddSize(TestCase): + + def setUp(self): + super(TestGetMbrHddSize, self).setUp() + self.patches = ExitStack() + self.subp = self.patches.enter_context( + mock.patch.object(cc_disk_setup.subp, 'subp')) + + def tearDown(self): + super(TestGetMbrHddSize, self).tearDown() + self.patches.close() + + def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes): + def _subp(cmd, *args, **kwargs): + self.assertEqual(3, len(cmd)) + if '--getsize64' in cmd: + return hdd_size_in_bytes, None + elif '--getss' in cmd: + return sector_size_in_bytes, None + raise Exception('Unexpected blockdev command called') + + self.subp.side_effect = _subp + + def _test_for_sector_size(self, sector_size): + size_in_bytes = random.randint(10000, 10000000) * 512 + size_in_sectors = size_in_bytes / sector_size + self._configure_subp_mock(size_in_bytes, sector_size) + self.assertEqual(size_in_sectors, + cc_disk_setup.get_hdd_size('/dev/sda1')) + + def test_size_for_512_byte_sectors(self): + self._test_for_sector_size(512) + + def test_size_for_1024_byte_sectors(self): + self._test_for_sector_size(1024) + + def test_size_for_2048_byte_sectors(self): + self._test_for_sector_size(2048) + + def test_size_for_4096_byte_sectors(self): + self._test_for_sector_size(4096) + + +class TestGetPartitionMbrLayout(TestCase): + + def test_single_partition_using_boolean(self): + self.assertEqual('0,', + cc_disk_setup.get_partition_mbr_layout(1000, True)) + + def test_single_partition_using_list(self): + disk_size = random.randint(1000000, 1000000000000) + self.assertEqual( + ',,83', + cc_disk_setup.get_partition_mbr_layout(disk_size, [100])) + + def test_half_and_half(self): + disk_size = random.randint(1000000, 1000000000000) + expected_partition_size = int(float(disk_size) / 2) + self.assertEqual( + ',{0},83\n,,83'.format(expected_partition_size), + cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50])) + + def test_thirds_with_different_partition_type(self): + disk_size = random.randint(1000000, 1000000000000) + expected_partition_size = int(float(disk_size) * 0.33) + self.assertEqual( + ',{0},83\n,,82'.format(expected_partition_size), + cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]])) + + +class TestUpdateFsSetupDevices(TestCase): + def test_regression_1634678(self): + # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678 + fs_setup = { + 'partition': 'auto', + 'device': '/dev/xvdb1', + 'overwrite': False, + 'label': 'test', + 'filesystem': 'ext4' + } + + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + + self.assertEqual({ + '_origname': '/dev/xvdb1', + 'partition': 'auto', + 'device': '/dev/xvdb1', + 'overwrite': False, + 'label': 'test', + 'filesystem': 'ext4' + }, fs_setup) + + def test_dotted_devname(self): + fs_setup = { + 'partition': 'auto', + 'device': 'ephemeral0.0', + 'label': 'test2', + 'filesystem': 'xfs' + } + + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + + self.assertEqual({ + '_origname': 'ephemeral0.0', + '_partition': 'auto', + 'partition': '0', + 'device': 'ephemeral0', + 'label': 'test2', + 'filesystem': 'xfs' + }, fs_setup) + + def test_dotted_devname_populates_partition(self): + fs_setup = { + 'device': 'ephemeral0.1', + 'label': 'test2', + 'filesystem': 'xfs' + } + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + self.assertEqual({ + '_origname': 'ephemeral0.1', + 'device': 'ephemeral0', + 'partition': '1', + 'label': 'test2', + 'filesystem': 'xfs' + }, fs_setup) + + +@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device', + return_value=None) +@mock.patch('cloudinit.config.cc_disk_setup.find_device_node', + return_value=('/dev/xdb1', False)) +@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) +@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', '')) +class TestMkfsCommandHandling(CiTestCase): + + with_logs = True + + def test_with_cmd(self, subp, *args): + """mkfs honors cmd and logs warnings when extra_opts or overwrite are + provided.""" + cc_disk_setup.mkfs({ + 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', + 'filesystem': 'ext4', + 'device': '/dev/xdb1', + 'label': 'with_cmd', + 'extra_opts': ['should', 'generate', 'warning'], + 'overwrite': 'should generate warning too' + }) + + self.assertIn( + 'extra_opts ' + + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + + '/dev/xdb1', + self.logs.getvalue()) + self.assertIn( + 'overwrite ' + + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + + '/dev/xdb1', + self.logs.getvalue()) + + subp.assert_called_once_with( + 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) + + @mock.patch('cloudinit.config.cc_disk_setup.subp.which') + def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args): + """mkfs observes extra_opts and overwrite settings when cmd is not + present.""" + m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p] + cc_disk_setup.mkfs({ + 'filesystem': 'ext4', + 'device': '/dev/xdb1', + 'label': 'without_cmd', + 'extra_opts': ['are', 'added'], + 'overwrite': True + }) + + subp.assert_called_once_with( + ['/sbin/mkfs.ext4', '/dev/xdb1', + '-L', 'without_cmd', '-F', 'are', 'added'], + shell=False) + + @mock.patch('cloudinit.config.cc_disk_setup.subp.which') + def test_mkswap(self, m_which, subp, *args): + """mkfs observes extra_opts and overwrite settings when cmd is not + present.""" + m_which.side_effect = iter([None, '/sbin/mkswap']) + cc_disk_setup.mkfs({ + 'filesystem': 'swap', + 'device': '/dev/xdb1', + 'label': 'swap', + 'overwrite': True, + }) + + self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')], + m_which.call_args_list) + subp.assert_called_once_with( + ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False) + +# +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_final_message.py b/tests/unittests/config/test_cc_final_message.py new file mode 100644 index 00000000..46ba99b2 --- /dev/null +++ b/tests/unittests/config/test_cc_final_message.py @@ -0,0 +1,46 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +from unittest import mock + +import pytest + +from cloudinit.config.cc_final_message import handle + + +class TestHandle: + # TODO: Expand these tests to cover full functionality; currently they only + # cover the logic around how the boot-finished file is written (and not its + # contents). + + @pytest.mark.parametrize( + "instance_dir_exists,file_is_written,expected_log_substring", + [ + (True, True, None), + (False, False, "Failed to write boot finished file "), + ], + ) + def test_boot_finished_written( + self, + instance_dir_exists, + file_is_written, + expected_log_substring, + caplog, + tmpdir, + ): + instance_dir = tmpdir.join("var/lib/cloud/instance") + if instance_dir_exists: + instance_dir.ensure_dir() + boot_finished = instance_dir.join("boot-finished") + + m_cloud = mock.Mock( + paths=mock.Mock(boot_finished=boot_finished.strpath) + ) + + handle(None, {}, m_cloud, logging.getLogger(), []) + + # We should not change the status of the instance directory + assert instance_dir_exists == instance_dir.exists() + assert file_is_written == boot_finished.exists() + + if expected_log_substring: + assert expected_log_substring in caplog.text diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py new file mode 100644 index 00000000..b007f24f --- /dev/null +++ b/tests/unittests/config/test_cc_growpart.py @@ -0,0 +1,309 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import cloud +from cloudinit.config import cc_growpart +from cloudinit import subp +from cloudinit import temp_utils + +from tests.unittests.helpers import TestCase + +import errno +import logging +import os +import shutil +import re +import unittest +from contextlib import ExitStack +from unittest import mock +import stat + +# growpart: +# mode: auto # off, on, auto, 'growpart' +# devices: ['root'] + +HELP_GROWPART_RESIZE = """ +growpart disk partition + rewrite partition table so that partition takes up all the space it can + options: + -h | --help print Usage and exit + + -u | --update R update the the kernel partition table info after growing + this requires kernel support and 'partx --update' + R is one of: + - 'auto' : [default] update partition if possible + + Example: + - growpart /dev/sda 1 + Resize partition 1 on /dev/sda +""" + +HELP_GROWPART_NO_RESIZE = """ +growpart disk partition + rewrite partition table so that partition takes up all the space it can + options: + -h | --help print Usage and exit + + Example: + - growpart /dev/sda 1 + Resize partition 1 on /dev/sda +""" + +HELP_GPART = """ +usage: gpart add -t type [-a alignment] [-b start] geom + gpart backup geom + gpart bootcode [-b bootcode] [-p partcode -i index] [-f flags] geom + + gpart resize -i index [-a alignment] [-s size] [-f flags] geom + gpart restore [-lF] [-f flags] provider [...] + gpart recover [-f flags] geom + gpart help + +""" + + +class Dir: + '''Stub object''' + def __init__(self, name): + self.name = name + self.st_mode = name + + def is_dir(self, *args, **kwargs): + return True + + def stat(self, *args, **kwargs): + return self + + +class Scanner: + '''Stub object''' + def __enter__(self): + return (Dir(''), Dir(''),) + + def __exit__(self, *args): + pass + + +class TestDisabled(unittest.TestCase): + def setUp(self): + super(TestDisabled, self).setUp() + self.name = "growpart" + self.cloud_init = None + self.log = logging.getLogger("TestDisabled") + self.args = [] + + self.handle = cc_growpart.handle + + def test_mode_off(self): + # Test that nothing is done if mode is off. + + # this really only verifies that resizer_factory isn't called + config = {'growpart': {'mode': 'off'}} + + with mock.patch.object(cc_growpart, 'resizer_factory') as mockobj: + self.handle(self.name, config, self.cloud_init, self.log, + self.args) + self.assertEqual(mockobj.call_count, 0) + + +class TestConfig(TestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.name = "growpart" + self.paths = None + self.cloud = cloud.Cloud(None, self.paths, None, None, None) + self.log = logging.getLogger("TestConfig") + self.args = [] + + self.cloud_init = None + self.handle = cc_growpart.handle + self.tmppath = '/tmp/cloudinit-test-file' + self.tmpdir = os.scandir('/tmp') + self.tmpfile = open(self.tmppath, 'w') + + def tearDown(self): + self.tmpfile.close() + os.remove(self.tmppath) + + @mock.patch.dict("os.environ", clear=True) + def test_no_resizers_auto_is_fine(self): + with mock.patch.object( + subp, 'subp', + return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: + + config = {'growpart': {'mode': 'auto'}} + self.handle(self.name, config, self.cloud_init, self.log, + self.args) + + mockobj.assert_has_calls([ + mock.call(['growpart', '--help'], env={'LANG': 'C'}), + mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) + + @mock.patch.dict("os.environ", clear=True) + def test_no_resizers_mode_growpart_is_exception(self): + with mock.patch.object( + subp, 'subp', + return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: + config = {'growpart': {'mode': "growpart"}} + self.assertRaises( + ValueError, self.handle, self.name, config, + self.cloud_init, self.log, self.args) + + mockobj.assert_called_once_with( + ['growpart', '--help'], env={'LANG': 'C'}) + + @mock.patch.dict("os.environ", clear=True) + def test_mode_auto_prefers_growpart(self): + with mock.patch.object( + subp, 'subp', + return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: + ret = cc_growpart.resizer_factory(mode="auto") + self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) + + mockobj.assert_called_once_with( + ['growpart', '--help'], env={'LANG': 'C'}) + + @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) + @mock.patch.object(temp_utils, 'mkdtemp', return_value='/tmp/much-random') + @mock.patch.object(stat, 'S_ISDIR', return_value=False) + @mock.patch.object(os.path, 'samestat', return_value=True) + @mock.patch.object(os.path, "join", return_value='/tmp') + @mock.patch.object(os, 'scandir', return_value=Scanner()) + @mock.patch.object(os, 'mkdir') + @mock.patch.object(os, 'unlink') + @mock.patch.object(os, 'rmdir') + @mock.patch.object(os, 'open', return_value=1) + @mock.patch.object(os, 'close') + @mock.patch.object(shutil, 'rmtree') + @mock.patch.object(os, 'lseek', return_value=1024) + @mock.patch.object(os, 'lstat', return_value='interesting metadata') + def test_force_lang_check_tempfile(self, *args, **kwargs): + with mock.patch.object( + subp, + 'subp', + return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: + + ret = cc_growpart.resizer_factory(mode="auto") + self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) + diskdev = '/dev/sdb' + partnum = 1 + partdev = '/dev/sdb' + ret.resize(diskdev, partnum, partdev) + mockobj.assert_has_calls([ + mock.call( + ["growpart", '--dry-run', diskdev, partnum], + env={'LANG': 'C', 'TMPDIR': '/tmp'}), + mock.call( + ["growpart", diskdev, partnum], + env={'LANG': 'C', 'TMPDIR': '/tmp'}), + ]) + + @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) + def test_mode_auto_falls_back_to_gpart(self): + with mock.patch.object( + subp, 'subp', + return_value=("", HELP_GPART)) as mockobj: + ret = cc_growpart.resizer_factory(mode="auto") + self.assertIsInstance(ret, cc_growpart.ResizeGpart) + + mockobj.assert_has_calls([ + mock.call(['growpart', '--help'], env={'LANG': 'C'}), + mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) + + def test_handle_with_no_growpart_entry(self): + # if no 'growpart' entry in config, then mode=auto should be used + + myresizer = object() + retval = (("/", cc_growpart.RESIZE.CHANGED, "my-message",),) + + with ExitStack() as mocks: + factory = mocks.enter_context( + mock.patch.object(cc_growpart, 'resizer_factory', + return_value=myresizer)) + rsdevs = mocks.enter_context( + mock.patch.object(cc_growpart, 'resize_devices', + return_value=retval)) + mocks.enter_context( + mock.patch.object(cc_growpart, 'RESIZERS', + (('mysizer', object),) + )) + + self.handle(self.name, {}, self.cloud_init, self.log, self.args) + + factory.assert_called_once_with('auto') + rsdevs.assert_called_once_with(myresizer, ['/']) + + +class TestResize(unittest.TestCase): + def setUp(self): + super(TestResize, self).setUp() + self.name = "growpart" + self.log = logging.getLogger("TestResize") + + def test_simple_devices(self): + # test simple device list + # this patches out devent2dev, os.stat, and device_part_info + # so in the end, doesn't test a lot + devs = ["/dev/XXda1", "/dev/YYda2"] + devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, + st_nlink=1, st_uid=0, st_gid=6, st_size=0, + st_atime=0, st_mtime=0, st_ctime=0) + enoent = ["/dev/NOENT"] + real_stat = os.stat + resize_calls = [] + + class myresizer(object): + def resize(self, diskdev, partnum, partdev): + resize_calls.append((diskdev, partnum, partdev)) + if partdev == "/dev/YYda2": + return (1024, 2048) + return (1024, 1024) # old size, new size + + def mystat(path): + if path in devs: + return devstat_ret + if path in enoent: + e = OSError("%s: does not exist" % path) + e.errno = errno.ENOENT + raise e + return real_stat(path) + + try: + opinfo = cc_growpart.device_part_info + cc_growpart.device_part_info = simple_device_part_info + os.stat = mystat + + resized = cc_growpart.resize_devices(myresizer(), devs + enoent) + + def find(name, res): + for f in res: + if f[0] == name: + return f + return None + + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, + find("/dev/XXda1", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.CHANGED, + find("/dev/YYda2", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.SKIPPED, + find(enoent[0], resized)[1]) + # self.assertEqual(resize_calls, + # [("/dev/XXda", "1", "/dev/XXda1"), + # ("/dev/YYda", "2", "/dev/YYda2")]) + finally: + cc_growpart.device_part_info = opinfo + os.stat = real_stat + + +def simple_device_part_info(devpath): + # simple stupid return (/dev/vda, 1) for /dev/vda + ret = re.search("([^0-9]*)([0-9]*)$", devpath) + x = (ret.group(1), ret.group(2)) + return x + + +class Bunch(object): + def __init__(self, **kwds): + self.__dict__.update(kwds) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py new file mode 100644 index 00000000..99c05bb5 --- /dev/null +++ b/tests/unittests/config/test_cc_grub_dpkg.py @@ -0,0 +1,176 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import pytest + +from unittest import mock +from logging import Logger +from cloudinit.subp import ProcessExecutionError +from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle + + +class TestFetchIdevs: + """Tests cc_grub_dpkg.fetch_idevs()""" + + # Note: udevadm info returns devices in a large single line string + @pytest.mark.parametrize( + "grub_output,path_exists,expected_log_call,udevadm_output" + ",expected_idevs", + [ + # Inside a container, grub not installed + ( + ProcessExecutionError(reason=FileNotFoundError()), + False, + mock.call("'grub-probe' not found in $PATH"), + '', + '', + ), + # Inside a container, grub installed + ( + ProcessExecutionError(stderr="failed to get canonical path"), + False, + mock.call("grub-probe 'failed to get canonical path'"), + '', + '', + ), + # KVM Instance + ( + ['/dev/vda'], + True, + None, + ( + '/dev/disk/by-path/pci-0000:00:00.0 ', + '/dev/disk/by-path/virtio-pci-0000:00:00.0 ' + ), + '/dev/vda', + ), + # Xen Instance + ( + ['/dev/xvda'], + True, + None, + '', + '/dev/xvda', + ), + # NVMe Hardware Instance + ( + ['/dev/nvme1n1'], + True, + None, + ( + '/dev/disk/by-id/nvme-Company_hash000 ', + '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ', + '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ' + ), + '/dev/disk/by-id/nvme-Company_hash000', + ), + # SCSI Hardware Instance + ( + ['/dev/sda'], + True, + None, + ( + '/dev/disk/by-id/company-user-1 ', + '/dev/disk/by-id/scsi-0Company_user-1 ', + '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ' + ), + '/dev/disk/by-id/company-user-1', + ), + ], + ) + @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") + @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists") + @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") + def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output, + path_exists, expected_log_call, udevadm_output, + expected_idevs): + """Tests outputs from grub-probe and udevadm info against grub-dpkg""" + m_subp.side_effect = [ + grub_output, + ["".join(udevadm_output)] + ] + m_exists.return_value = path_exists + log = mock.Mock(spec=Logger) + idevs = fetch_idevs(log) + assert expected_idevs == idevs + if expected_log_call is not None: + assert expected_log_call in log.debug.call_args_list + + +class TestHandle: + """Tests cc_grub_dpkg.handle()""" + + @pytest.mark.parametrize( + "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output", + [ + ( + # No configuration + None, + None, + '/dev/disk/by-id/nvme-Company_hash000', + ( + "Setting grub debconf-set-selections with ", + "'/dev/disk/by-id/nvme-Company_hash000','false'" + ), + ), + ( + # idevs set, idevs_empty unset + '/dev/sda', + None, + '/dev/sda', + ( + "Setting grub debconf-set-selections with ", + "'/dev/sda','false'" + ), + ), + ( + # idevs unset, idevs_empty set + None, + 'true', + '/dev/xvda', + ( + "Setting grub debconf-set-selections with ", + "'/dev/xvda','true'" + ), + ), + ( + # idevs set, idevs_empty set + '/dev/vda', + 'false', + '/dev/disk/by-id/company-user-1', + ( + "Setting grub debconf-set-selections with ", + "'/dev/vda','false'" + ), + ), + ( + # idevs set, idevs_empty set + # Respect what the user defines, even if its logically wrong + '/dev/nvme0n1', + 'true', + '', + ( + "Setting grub debconf-set-selections with ", + "'/dev/nvme0n1','true'" + ), + ) + ], + ) + @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs") + @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str") + @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc") + @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp") + def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs, + cfg_idevs, cfg_idevs_empty, fetch_idevs_output, + expected_log_output): + """Test setting of correct debconf database entries""" + m_get_cfg_str.side_effect = [ + cfg_idevs, + cfg_idevs_empty + ] + m_fetch_idevs.return_value = fetch_idevs_output + log = mock.Mock(spec=Logger) + handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock()) + log.debug.assert_called_with("".join(expected_log_output)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_install_hotplug.py b/tests/unittests/config/test_cc_install_hotplug.py new file mode 100644 index 00000000..5d6b1e77 --- /dev/null +++ b/tests/unittests/config/test_cc_install_hotplug.py @@ -0,0 +1,113 @@ +# This file is part of cloud-init. See LICENSE file for license information. +from collections import namedtuple +from unittest import mock + +import pytest + +from cloudinit.config.cc_install_hotplug import ( + handle, + HOTPLUG_UDEV_PATH, + HOTPLUG_UDEV_RULES_TEMPLATE, +) +from cloudinit.event import EventScope, EventType + + +@pytest.yield_fixture() +def mocks(): + m_update_enabled = mock.patch('cloudinit.stages.update_event_enabled') + m_write = mock.patch('cloudinit.util.write_file', autospec=True) + m_del = mock.patch('cloudinit.util.del_file', autospec=True) + m_subp = mock.patch('cloudinit.subp.subp') + m_which = mock.patch('cloudinit.subp.which', return_value=None) + m_path_exists = mock.patch('os.path.exists', return_value=False) + + yield namedtuple( + 'Mocks', + 'm_update_enabled m_write m_del m_subp m_which m_path_exists' + )( + m_update_enabled.start(), m_write.start(), m_del.start(), + m_subp.start(), m_which.start(), m_path_exists.start() + ) + + m_update_enabled.stop() + m_write.stop() + m_del.stop() + m_subp.stop() + m_which.stop() + m_path_exists.stop() + + +class TestInstallHotplug: + @pytest.mark.parametrize('libexec_exists', [True, False]) + def test_rules_installed_when_supported_and_enabled( + self, mocks, libexec_exists + ): + mocks.m_which.return_value = 'udevadm' + mocks.m_update_enabled.return_value = True + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + + if libexec_exists: + libexecdir = "/usr/libexec/cloud-init" + else: + libexecdir = "/usr/lib/cloud-init" + with mock.patch('os.path.exists', return_value=libexec_exists): + handle(None, {}, m_cloud, mock.Mock(), None) + mocks.m_write.assert_called_once_with( + filename=HOTPLUG_UDEV_PATH, + content=HOTPLUG_UDEV_RULES_TEMPLATE.format( + libexecdir=libexecdir), + ) + assert mocks.m_subp.call_args_list == [mock.call([ + 'udevadm', 'control', '--reload-rules', + ])] + assert mocks.m_del.call_args_list == [] + + def test_rules_not_installed_when_unsupported(self, mocks): + mocks.m_update_enabled.return_value = True + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = {} + + handle(None, {}, m_cloud, mock.Mock(), None) + assert mocks.m_write.call_args_list == [] + assert mocks.m_del.call_args_list == [] + assert mocks.m_subp.call_args_list == [] + + def test_rules_not_installed_when_disabled(self, mocks): + mocks.m_update_enabled.return_value = False + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + + handle(None, {}, m_cloud, mock.Mock(), None) + assert mocks.m_write.call_args_list == [] + assert mocks.m_del.call_args_list == [] + assert mocks.m_subp.call_args_list == [] + + def test_rules_uninstalled_when_disabled(self, mocks): + mocks.m_path_exists.return_value = True + mocks.m_update_enabled.return_value = False + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = {} + + handle(None, {}, m_cloud, mock.Mock(), None) + mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH) + assert mocks.m_subp.call_args_list == [mock.call([ + 'udevadm', 'control', '--reload-rules', + ])] + assert mocks.m_write.call_args_list == [] + + def test_rules_not_installed_when_no_udevadm(self, mocks): + mocks.m_update_enabled.return_value = True + m_cloud = mock.MagicMock() + m_cloud.datasource.get_supported_events.return_value = { + EventScope.NETWORK: {EventType.HOTPLUG} + } + + handle(None, {}, m_cloud, mock.Mock(), None) + assert mocks.m_del.call_args_list == [] + assert mocks.m_write.call_args_list == [] + assert mocks.m_subp.call_args_list == [] diff --git a/tests/unittests/config/test_cc_keys_to_console.py b/tests/unittests/config/test_cc_keys_to_console.py new file mode 100644 index 00000000..4083fc54 --- /dev/null +++ b/tests/unittests/config/test_cc_keys_to_console.py @@ -0,0 +1,34 @@ +"""Tests for cc_keys_to_console.""" +from unittest import mock + +import pytest + +from cloudinit.config import cc_keys_to_console + + +class TestHandle: + """Tests for cloudinit.config.cc_keys_to_console.handle. + + TODO: These tests only cover the emit_keys_to_console config option, they + should be expanded to cover the full functionality. + """ + + @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log") + @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists") + @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp") + @pytest.mark.parametrize("cfg,subp_called", [ + ({}, True), # Default to emitting keys + ({"ssh": {}}, True), # Default even if we have the parent key + ({"ssh": {"emit_keys_to_console": True}}, True), # Explicitly enabled + ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled + ]) + def test_emit_keys_to_console_config( + self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called + ): + # Ensure we always find the helper + m_path_exists.return_value = True + m_subp.return_value = ("", "") + + cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ()) + + assert subp_called == (m_subp.call_count == 1) diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py new file mode 100644 index 00000000..07b3f899 --- /dev/null +++ b/tests/unittests/config/test_cc_landscape.py @@ -0,0 +1,126 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +from configobj import ConfigObj + +from cloudinit.config import cc_landscape +from cloudinit import util +from tests.unittests.helpers import (FilesystemMockingTestCase, mock, + wrap_and_call) + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +class TestLandscape(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestLandscape, self).setUp() + self.new_root = self.tmp_dir() + self.conf = self.tmp_path('client.conf', self.new_root) + self.default_file = self.tmp_path('default_landscape', self.new_root) + self.patchUtils(self.new_root) + self.add_patch( + 'cloudinit.distros.ubuntu.Distro.install_packages', + 'm_install_packages' + ) + + def test_handler_skips_empty_landscape_cloudconfig(self): + """Empty landscape cloud-config section does no work.""" + mycloud = get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'landscape': {}} + cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) + self.assertFalse(mycloud.distro.install_packages.called) + + def test_handler_error_on_invalid_landscape_type(self): + """Raise an error when landscape configuraiton option is invalid.""" + mycloud = get_cloud('ubuntu') + cfg = {'landscape': 'wrongtype'} + with self.assertRaises(RuntimeError) as context_manager: + cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) + self.assertIn( + "'landscape' key existed in config, but not a dict", + str(context_manager.exception)) + + @mock.patch('cloudinit.config.cc_landscape.subp') + def test_handler_restarts_landscape_client(self, m_subp): + """handler restarts lansdscape-client after install.""" + mycloud = get_cloud('ubuntu') + cfg = {'landscape': {'client': {}}} + wrap_and_call( + 'cloudinit.config.cc_landscape', + {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, + cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + self.assertEqual( + [mock.call(['service', 'landscape-client', 'restart'])], + m_subp.subp.call_args_list) + + def test_handler_installs_client_and_creates_config_file(self): + """Write landscape client.conf and install landscape-client.""" + mycloud = get_cloud('ubuntu') + cfg = {'landscape': {'client': {}}} + expected = {'client': { + 'log_level': 'info', + 'url': 'https://landscape.canonical.com/message-system', + 'ping_url': 'http://landscape.canonical.com/ping', + 'data_path': '/var/lib/landscape/client'}} + mycloud.distro = mock.MagicMock() + wrap_and_call( + 'cloudinit.config.cc_landscape', + {'LSC_CLIENT_CFG_FILE': {'new': self.conf}, + 'LS_DEFAULT_FILE': {'new': self.default_file}}, + cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + self.assertEqual( + [mock.call('landscape-client')], + mycloud.distro.install_packages.call_args) + self.assertEqual(expected, dict(ConfigObj(self.conf))) + self.assertIn( + 'Wrote landscape config file to {0}'.format(self.conf), + self.logs.getvalue()) + default_content = util.load_file(self.default_file) + self.assertEqual('RUN=1\n', default_content) + + def test_handler_writes_merged_client_config_file_with_defaults(self): + """Merge and write options from LSC_CLIENT_CFG_FILE with defaults.""" + # Write existing sparse client.conf file + util.write_file(self.conf, '[client]\ncomputer_title = My PC\n') + mycloud = get_cloud('ubuntu') + cfg = {'landscape': {'client': {}}} + expected = {'client': { + 'log_level': 'info', + 'url': 'https://landscape.canonical.com/message-system', + 'ping_url': 'http://landscape.canonical.com/ping', + 'data_path': '/var/lib/landscape/client', + 'computer_title': 'My PC'}} + wrap_and_call( + 'cloudinit.config.cc_landscape', + {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, + cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + self.assertEqual(expected, dict(ConfigObj(self.conf))) + self.assertIn( + 'Wrote landscape config file to {0}'.format(self.conf), + self.logs.getvalue()) + + def test_handler_writes_merged_provided_cloudconfig_with_defaults(self): + """Merge and write options from cloud-config options with defaults.""" + # Write empty sparse client.conf file + util.write_file(self.conf, '') + mycloud = get_cloud('ubuntu') + cfg = {'landscape': {'client': {'computer_title': 'My PC'}}} + expected = {'client': { + 'log_level': 'info', + 'url': 'https://landscape.canonical.com/message-system', + 'ping_url': 'http://landscape.canonical.com/ping', + 'data_path': '/var/lib/landscape/client', + 'computer_title': 'My PC'}} + wrap_and_call( + 'cloudinit.config.cc_landscape', + {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, + cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) + self.assertEqual(expected, dict(ConfigObj(self.conf))) + self.assertIn( + 'Wrote landscape config file to {0}'.format(self.conf), + self.logs.getvalue()) diff --git a/tests/unittests/config/test_cc_locale.py b/tests/unittests/config/test_cc_locale.py new file mode 100644 index 00000000..6cd95a29 --- /dev/null +++ b/tests/unittests/config/test_cc_locale.py @@ -0,0 +1,116 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Author: Juerg Haefliger +# +# This file is part of cloud-init. See LICENSE file for license information. +import logging +import os +import shutil +import tempfile +from io import BytesIO +from configobj import ConfigObj +from unittest import mock + +from cloudinit import util +from cloudinit.config import cc_locale +from tests.unittests import helpers as t_help + +from tests.unittests.util import get_cloud + + +LOG = logging.getLogger(__name__) + + +class TestLocale(t_help.FilesystemMockingTestCase): + + def setUp(self): + super(TestLocale, self).setUp() + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + self.patchUtils(self.new_root) + + def test_set_locale_arch(self): + locale = 'en_GB.UTF-8' + locale_configfile = '/etc/invalid-locale-path' + cfg = { + 'locale': locale, + 'locale_configfile': locale_configfile, + } + cc = get_cloud('arch') + + with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp: + with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG: + cc_locale.handle('cc_locale', cfg, cc, LOG, []) + m_LOG.assert_called_with('Invalid locale_configfile %s, ' + 'only supported value is ' + '/etc/locale.conf', + locale_configfile) + + contents = util.load_file(cc.distro.locale_gen_fn) + self.assertIn('%s UTF-8' % locale, contents) + m_subp.assert_called_with(['localectl', + 'set-locale', + locale], capture=False) + + def test_set_locale_sles(self): + + cfg = { + 'locale': 'My.Locale', + } + cc = get_cloud('sles') + cc_locale.handle('cc_locale', cfg, cc, LOG, []) + if cc.distro.uses_systemd(): + locale_conf = cc.distro.systemd_locale_conf_fn + else: + locale_conf = cc.distro.locale_conf_fn + contents = util.load_file(locale_conf, decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + if cc.distro.uses_systemd(): + self.assertEqual({'LANG': cfg['locale']}, dict(n_cfg)) + else: + self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg)) + + def test_set_locale_sles_default(self): + cfg = {} + cc = get_cloud('sles') + cc_locale.handle('cc_locale', cfg, cc, LOG, []) + + if cc.distro.uses_systemd(): + locale_conf = cc.distro.systemd_locale_conf_fn + keyname = 'LANG' + else: + locale_conf = cc.distro.locale_conf_fn + keyname = 'RC_LANG' + + contents = util.load_file(locale_conf, decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual({keyname: 'en_US.UTF-8'}, dict(n_cfg)) + + def test_locale_update_config_if_different_than_default(self): + """Test cc_locale writes updates conf if different than default""" + locale_conf = os.path.join(self.new_root, "etc/default/locale") + util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n') + cfg = {'locale': 'C.UTF-8'} + cc = get_cloud('ubuntu') + with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp: + with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN', + locale_conf): + cc_locale.handle('cc_locale', cfg, cc, LOG, []) + m_subp.assert_called_with(['update-locale', + '--locale-file=%s' % locale_conf, + 'LANG=C.UTF-8'], capture=False) + + def test_locale_rhel_defaults_en_us_utf8(self): + """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback""" + cfg = {} + cc = get_cloud('rhel') + update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file' + with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd: + m_use_sd.return_value = True + with mock.patch(update_sysconfig) as m_update_syscfg: + cc_locale.handle('cc_locale', cfg, cc, LOG, []) + m_update_syscfg.assert_called_with('/etc/locale.conf', + {'LANG': 'en_US.UTF-8'}) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py new file mode 100644 index 00000000..887987c0 --- /dev/null +++ b/tests/unittests/config/test_cc_lxd.py @@ -0,0 +1,222 @@ +# This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + +from cloudinit.config import cc_lxd +from tests.unittests import helpers as t_help + +from tests.unittests.util import get_cloud + + +class TestLxd(t_help.CiTestCase): + + with_logs = True + + lxd_cfg = { + 'lxd': { + 'init': { + 'network_address': '0.0.0.0', + 'storage_backend': 'zfs', + 'storage_pool': 'poolname', + } + } + } + + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") + @mock.patch("cloudinit.config.cc_lxd.subp") + def test_lxd_init(self, mock_subp, m_maybe_clean): + cc = get_cloud() + mock_subp.which.return_value = True + m_maybe_clean.return_value = None + cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + self.assertTrue(mock_subp.which.called) + # no bridge config, so maybe_cleanup should not be called. + self.assertFalse(m_maybe_clean.called) + self.assertEqual( + [mock.call(['lxd', 'waitready', '--timeout=300']), + mock.call( + ['lxd', 'init', '--auto', '--network-address=0.0.0.0', + '--storage-backend=zfs', '--storage-pool=poolname'])], + mock_subp.subp.call_args_list) + + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") + @mock.patch("cloudinit.config.cc_lxd.subp") + def test_lxd_install(self, mock_subp, m_maybe_clean): + cc = get_cloud() + cc.distro = mock.MagicMock() + mock_subp.which.return_value = None + cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + self.assertNotIn('WARN', self.logs.getvalue()) + self.assertTrue(cc.distro.install_packages.called) + cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + self.assertFalse(m_maybe_clean.called) + install_pkg = cc.distro.install_packages.call_args_list[0][0][0] + self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux']) + + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") + @mock.patch("cloudinit.config.cc_lxd.subp") + def test_no_init_does_nothing(self, mock_subp, m_maybe_clean): + cc = get_cloud() + cc.distro = mock.MagicMock() + cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) + self.assertFalse(cc.distro.install_packages.called) + self.assertFalse(mock_subp.subp.called) + self.assertFalse(m_maybe_clean.called) + + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") + @mock.patch("cloudinit.config.cc_lxd.subp") + def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean): + cc = get_cloud() + cc.distro = mock.MagicMock() + cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) + self.assertFalse(cc.distro.install_packages.called) + self.assertFalse(mock_subp.subp.called) + self.assertFalse(m_maybe_clean.called) + + def test_lxd_debconf_new_full(self): + data = {"mode": "new", + "name": "testbr0", + "ipv4_address": "10.0.8.1", + "ipv4_netmask": "24", + "ipv4_dhcp_first": "10.0.8.2", + "ipv4_dhcp_last": "10.0.8.254", + "ipv4_dhcp_leases": "250", + "ipv4_nat": "true", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + "domain": "lxd"} + self.assertEqual( + cc_lxd.bridge_to_debconf(data), + {"lxd/setup-bridge": "true", + "lxd/bridge-name": "testbr0", + "lxd/bridge-ipv4": "true", + "lxd/bridge-ipv4-address": "10.0.8.1", + "lxd/bridge-ipv4-netmask": "24", + "lxd/bridge-ipv4-dhcp-first": "10.0.8.2", + "lxd/bridge-ipv4-dhcp-last": "10.0.8.254", + "lxd/bridge-ipv4-dhcp-leases": "250", + "lxd/bridge-ipv4-nat": "true", + "lxd/bridge-ipv6": "true", + "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", + "lxd/bridge-ipv6-netmask": "64", + "lxd/bridge-ipv6-nat": "true", + "lxd/bridge-domain": "lxd"}) + + def test_lxd_debconf_new_partial(self): + data = {"mode": "new", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true"} + self.assertEqual( + cc_lxd.bridge_to_debconf(data), + {"lxd/setup-bridge": "true", + "lxd/bridge-ipv6": "true", + "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", + "lxd/bridge-ipv6-netmask": "64", + "lxd/bridge-ipv6-nat": "true"}) + + def test_lxd_debconf_existing(self): + data = {"mode": "existing", + "name": "testbr0"} + self.assertEqual( + cc_lxd.bridge_to_debconf(data), + {"lxd/setup-bridge": "false", + "lxd/use-existing-bridge": "true", + "lxd/bridge-name": "testbr0"}) + + def test_lxd_debconf_none(self): + data = {"mode": "none"} + self.assertEqual( + cc_lxd.bridge_to_debconf(data), + {"lxd/setup-bridge": "false", + "lxd/bridge-name": ""}) + + def test_lxd_cmd_new_full(self): + data = {"mode": "new", + "name": "testbr0", + "ipv4_address": "10.0.8.1", + "ipv4_netmask": "24", + "ipv4_dhcp_first": "10.0.8.2", + "ipv4_dhcp_last": "10.0.8.254", + "ipv4_dhcp_leases": "250", + "ipv4_nat": "true", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + "domain": "lxd"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (["network", "create", "testbr0", + "ipv4.address=10.0.8.1/24", "ipv4.nat=true", + "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", + "ipv6.address=fd98:9e0:3744::1/64", + "ipv6.nat=true", "dns.domain=lxd"], + ["network", "attach-profile", + "testbr0", "default", "eth0"])) + + def test_lxd_cmd_new_partial(self): + data = {"mode": "new", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (["network", "create", "lxdbr0", "ipv4.address=none", + "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"], + ["network", "attach-profile", + "lxdbr0", "default", "eth0"])) + + def test_lxd_cmd_existing(self): + data = {"mode": "existing", + "name": "testbr0"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (None, ["network", "attach-profile", + "testbr0", "default", "eth0"])) + + def test_lxd_cmd_none(self): + data = {"mode": "none"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (None, None)) + + +class TestLxdMaybeCleanupDefault(t_help.CiTestCase): + """Test the implementation of maybe_cleanup_default.""" + + defnet = cc_lxd._DEFAULT_NETWORK_NAME + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_network_other_than_default_not_deleted(self, m_lxc): + """deletion or removal should only occur if bridge is default.""" + cc_lxd.maybe_cleanup_default( + net_name="lxdbr1", did_init=True, create=True, attach=True) + m_lxc.assert_not_called() + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_did_init_false_does_not_delete(self, m_lxc): + """deletion or removal should only occur if did_init is True.""" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=False, create=True, attach=True) + m_lxc.assert_not_called() + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_network_deleted_if_create_true(self, m_lxc): + """deletion of network should occur if create is True.""" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=True, create=True, attach=False) + m_lxc.assert_called_with(["network", "delete", self.defnet]) + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_device_removed_if_attach_true(self, m_lxc): + """deletion of network should occur if create is True.""" + nic_name = "my_nic" + profile = "my_profile" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=True, create=False, attach=True, + profile=profile, nic_name=nic_name) + m_lxc.assert_called_once_with( + ["profile", "device", "remove", profile, nic_name]) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_mcollective.py b/tests/unittests/config/test_cc_mcollective.py new file mode 100644 index 00000000..fff777b6 --- /dev/null +++ b/tests/unittests/config/test_cc_mcollective.py @@ -0,0 +1,146 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import configobj +import logging +import os +import shutil +import tempfile +from io import BytesIO + +from cloudinit import (util) +from cloudinit.config import cc_mcollective +from tests.unittests import helpers as t_help + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +STOCK_CONFIG = """\ +main_collective = mcollective +collectives = mcollective +libdir = /usr/share/mcollective/plugins +logfile = /var/log/mcollective.log +loglevel = info +daemonize = 1 + +# Plugins +securityprovider = psk +plugin.psk = unset + +connector = activemq +plugin.activemq.pool.size = 1 +plugin.activemq.pool.1.host = stomp1 +plugin.activemq.pool.1.port = 61613 +plugin.activemq.pool.1.user = mcollective +plugin.activemq.pool.1.password = marionette + +# Facts +factsource = yaml +plugin.yaml = /etc/mcollective/facts.yaml +""" + + +class TestConfig(t_help.FilesystemMockingTestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + # "./": make os.path.join behave correctly with abs path as second arg + self.server_cfg = os.path.join( + self.tmp, "./" + cc_mcollective.SERVER_CFG) + self.pubcert_file = os.path.join( + self.tmp, "./" + cc_mcollective.PUBCERT_FILE) + self.pricert_file = os.path.join( + self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE) + + def test_basic_config(self): + cfg = { + 'mcollective': { + 'conf': { + 'loglevel': 'debug', + 'connector': 'rabbitmq', + 'logfile': '/var/log/mcollective.log', + 'ttl': '4294957', + 'collectives': 'mcollective', + 'main_collective': 'mcollective', + 'securityprovider': 'psk', + 'daemonize': '1', + 'factsource': 'yaml', + 'direct_addressing': '1', + 'plugin.psk': 'unset', + 'libdir': '/usr/share/mcollective/plugins', + 'identity': '1', + }, + }, + } + expected = cfg['mcollective']['conf'] + + self.patchUtils(self.tmp) + cc_mcollective.configure(cfg['mcollective']['conf']) + contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False) + contents = configobj.ConfigObj(BytesIO(contents)) + self.assertEqual(expected, dict(contents)) + + def test_existing_config_is_saved(self): + cfg = {'loglevel': 'warn'} + util.write_file(self.server_cfg, STOCK_CONFIG) + cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) + self.assertTrue(os.path.exists(self.server_cfg)) + self.assertTrue(os.path.exists(self.server_cfg + ".old")) + self.assertEqual(util.load_file(self.server_cfg + ".old"), + STOCK_CONFIG) + + def test_existing_updated(self): + cfg = {'loglevel': 'warn'} + util.write_file(self.server_cfg, STOCK_CONFIG) + cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) + cfgobj = configobj.ConfigObj(self.server_cfg) + self.assertEqual(cfg['loglevel'], cfgobj['loglevel']) + + def test_certificats_written(self): + # check public-cert and private-cert keys in config get written + cfg = {'loglevel': 'debug', + 'public-cert': "this is my public-certificate", + 'private-cert': "secret private certificate"} + + cc_mcollective.configure( + config=cfg, server_cfg=self.server_cfg, + pricert_file=self.pricert_file, pubcert_file=self.pubcert_file) + + found = configobj.ConfigObj(self.server_cfg) + + # make sure these didnt get written in + self.assertFalse('public-cert' in found) + self.assertFalse('private-cert' in found) + + # these need updating to the specified paths + self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file) + self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file) + + # and the security provider should be ssl + self.assertEqual(found['securityprovider'], 'ssl') + + self.assertEqual( + util.load_file(self.pricert_file), cfg['private-cert']) + self.assertEqual( + util.load_file(self.pubcert_file), cfg['public-cert']) + + +class TestHandler(t_help.TestCase): + @t_help.mock.patch("cloudinit.config.cc_mcollective.subp") + @t_help.mock.patch("cloudinit.config.cc_mcollective.util") + def test_mcollective_install(self, mock_util, mock_subp): + cc = get_cloud() + cc.distro = t_help.mock.MagicMock() + mock_util.load_file.return_value = b"" + mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}} + cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, []) + self.assertTrue(cc.distro.install_packages.called) + install_pkg = cc.distro.install_packages.call_args_list[0][0][0] + self.assertEqual(install_pkg, ('mcollective',)) + + self.assertTrue(mock_subp.subp.called) + self.assertEqual(mock_subp.subp.call_args_list[0][0][0], + ['service', 'mcollective', 'restart']) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py new file mode 100644 index 00000000..fc65f108 --- /dev/null +++ b/tests/unittests/config/test_cc_mounts.py @@ -0,0 +1,461 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import pytest +import os.path +from unittest import mock + +from tests.unittests import helpers as test_helpers +from cloudinit.config import cc_mounts +from cloudinit.config.cc_mounts import create_swapfile +from cloudinit.subp import ProcessExecutionError + +M_PATH = 'cloudinit.config.cc_mounts.' + + +class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): + + def setUp(self): + super(TestSanitizeDevname, self).setUp() + self.new_root = self.tmp_dir() + self.patchOS(self.new_root) + + def _touch(self, path): + path = os.path.join(self.new_root, path.lstrip('/')) + basedir = os.path.dirname(path) + if not os.path.exists(basedir): + os.makedirs(basedir) + open(path, 'a').close() + + def _makedirs(self, directory): + directory = os.path.join(self.new_root, directory.lstrip('/')) + if not os.path.exists(directory): + os.makedirs(directory) + + def mock_existence_of_disk(self, disk_path): + self._touch(disk_path) + self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1])) + + def mock_existence_of_partition(self, disk_path, partition_number): + self.mock_existence_of_disk(disk_path) + self._touch(disk_path + str(partition_number)) + disk_name = disk_path.split('/')[-1] + self._makedirs(os.path.join('/sys/block', + disk_name, + disk_name + str(partition_number))) + + def test_existent_full_disk_path_is_returned(self): + disk_path = '/dev/sda' + self.mock_existence_of_disk(disk_path) + self.assertEqual(disk_path, + cc_mounts.sanitize_devname(disk_path, + lambda x: None, + mock.Mock())) + + def test_existent_disk_name_returns_full_path(self): + disk_name = 'sda' + disk_path = '/dev/' + disk_name + self.mock_existence_of_disk(disk_path) + self.assertEqual(disk_path, + cc_mounts.sanitize_devname(disk_name, + lambda x: None, + mock.Mock())) + + def test_existent_meta_disk_is_returned(self): + actual_disk_path = '/dev/sda' + self.mock_existence_of_disk(actual_disk_path) + self.assertEqual( + actual_disk_path, + cc_mounts.sanitize_devname('ephemeral0', + lambda x: actual_disk_path, + mock.Mock())) + + def test_existent_meta_partition_is_returned(self): + disk_name, partition_part = '/dev/sda', '1' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0.1', + lambda x: disk_name, + mock.Mock())) + + def test_existent_meta_partition_with_p_is_returned(self): + disk_name, partition_part = '/dev/sda', 'p1' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0.1', + lambda x: disk_name, + mock.Mock())) + + def test_first_partition_returned_if_existent_disk_is_partitioned(self): + disk_name, partition_part = '/dev/sda', '1' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0', + lambda x: disk_name, + mock.Mock())) + + def test_nth_partition_returned_if_requested(self): + disk_name, partition_part = '/dev/sda', '3' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0.3', + lambda x: disk_name, + mock.Mock())) + + def test_transformer_returning_none_returns_none(self): + self.assertIsNone( + cc_mounts.sanitize_devname( + 'ephemeral0', lambda x: None, mock.Mock())) + + def test_missing_device_returns_none(self): + self.assertIsNone( + cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock())) + + def test_missing_sys_returns_none(self): + disk_path = '/dev/sda' + self._makedirs(disk_path) + self.assertIsNone( + cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) + + def test_existent_disk_but_missing_partition_returns_none(self): + disk_path = '/dev/sda' + self.mock_existence_of_disk(disk_path) + self.assertIsNone( + cc_mounts.sanitize_devname( + 'ephemeral0.1', lambda x: disk_path, mock.Mock())) + + def test_network_device_returns_network_device(self): + disk_path = 'netdevice:/path' + self.assertEqual( + disk_path, + cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) + + def test_device_aliases_remapping(self): + disk_path = '/dev/sda' + self.mock_existence_of_disk(disk_path) + self.assertEqual(disk_path, + cc_mounts.sanitize_devname('mydata', + lambda x: None, + mock.Mock(), + {'mydata': disk_path})) + + +class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): + + def setUp(self): + super(TestSwapFileCreation, self).setUp() + self.new_root = self.tmp_dir() + self.patchOS(self.new_root) + + self.fstab_path = os.path.join(self.new_root, 'etc/fstab') + self.swap_path = os.path.join(self.new_root, 'swap.img') + self._makedirs('/etc') + + self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', + 'mock_fstab_path', + self.fstab_path, + autospec=False) + + self.add_patch('cloudinit.config.cc_mounts.subp.subp', + 'm_subp_subp') + + self.add_patch('cloudinit.config.cc_mounts.util.mounts', + 'mock_util_mounts', + return_value={ + '/dev/sda1': {'fstype': 'ext4', + 'mountpoint': '/', + 'opts': 'rw,relatime,discard' + }}) + + self.mock_cloud = mock.Mock() + self.mock_log = mock.Mock() + self.mock_cloud.device_name_to_device = self.device_name_to_device + + self.cc = { + 'swap': { + 'filename': self.swap_path, + 'size': '512', + 'maxsize': '512'}} + + def _makedirs(self, directory): + directory = os.path.join(self.new_root, directory.lstrip('/')) + if not os.path.exists(directory): + os.makedirs(directory) + + def device_name_to_device(self, path): + if path == 'swap': + return self.swap_path + else: + dev = None + + return dev + + @mock.patch('cloudinit.util.get_mount_info') + @mock.patch('cloudinit.util.kernel_version') + def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version, + m_get_mount_info): + m_kernel_version.return_value = (4, 20) + m_get_mount_info.return_value = ["", "xfs"] + + cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) + self.m_subp_subp.assert_has_calls([ + mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), + mock.call(['mkswap', self.swap_path]), + mock.call(['swapon', '-a'])]) + + @mock.patch('cloudinit.util.get_mount_info') + @mock.patch('cloudinit.util.kernel_version') + def test_swap_creation_method_xfs(self, m_kernel_version, + m_get_mount_info): + m_kernel_version.return_value = (3, 18) + m_get_mount_info.return_value = ["", "xfs"] + + cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) + self.m_subp_subp.assert_has_calls([ + mock.call(['dd', 'if=/dev/zero', + 'of=' + self.swap_path, + 'bs=1M', 'count=0'], capture=True), + mock.call(['mkswap', self.swap_path]), + mock.call(['swapon', '-a'])]) + + @mock.patch('cloudinit.util.get_mount_info') + @mock.patch('cloudinit.util.kernel_version') + def test_swap_creation_method_btrfs(self, m_kernel_version, + m_get_mount_info): + m_kernel_version.return_value = (4, 20) + m_get_mount_info.return_value = ["", "btrfs"] + + cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) + self.m_subp_subp.assert_has_calls([ + mock.call(['dd', 'if=/dev/zero', + 'of=' + self.swap_path, + 'bs=1M', 'count=0'], capture=True), + mock.call(['mkswap', self.swap_path]), + mock.call(['swapon', '-a'])]) + + @mock.patch('cloudinit.util.get_mount_info') + @mock.patch('cloudinit.util.kernel_version') + def test_swap_creation_method_ext4(self, m_kernel_version, + m_get_mount_info): + m_kernel_version.return_value = (5, 14) + m_get_mount_info.return_value = ["", "ext4"] + + cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) + self.m_subp_subp.assert_has_calls([ + mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), + mock.call(['mkswap', self.swap_path]), + mock.call(['swapon', '-a'])]) + + +class TestFstabHandling(test_helpers.FilesystemMockingTestCase): + + swap_path = '/dev/sdb1' + + def setUp(self): + super(TestFstabHandling, self).setUp() + self.new_root = self.tmp_dir() + self.patchOS(self.new_root) + + self.fstab_path = os.path.join(self.new_root, 'etc/fstab') + self._makedirs('/etc') + + self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', + 'mock_fstab_path', + self.fstab_path, + autospec=False) + + self.add_patch('cloudinit.config.cc_mounts._is_block_device', + 'mock_is_block_device', + return_value=True) + + self.add_patch('cloudinit.config.cc_mounts.subp.subp', + 'm_subp_subp') + + self.add_patch('cloudinit.config.cc_mounts.util.mounts', + 'mock_util_mounts', + return_value={ + '/dev/sda1': {'fstype': 'ext4', + 'mountpoint': '/', + 'opts': 'rw,relatime,discard' + }}) + + self.mock_cloud = mock.Mock() + self.mock_log = mock.Mock() + self.mock_cloud.device_name_to_device = self.device_name_to_device + + def _makedirs(self, directory): + directory = os.path.join(self.new_root, directory.lstrip('/')) + if not os.path.exists(directory): + os.makedirs(directory) + + def device_name_to_device(self, path): + if path == 'swap': + return self.swap_path + else: + dev = None + + return dev + + def test_no_fstab(self): + """ Handle images which do not include an fstab. """ + self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH)) + fstab_expected_content = ( + '%s\tnone\tswap\tsw,comment=cloudconfig\t' + '0\t0\n' % (self.swap_path,) + ) + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_swap_integrity(self): + '''Ensure that the swap file is correctly created and can + swapon successfully. Fixing the corner case of: + kernel: swapon: swapfile has holes''' + + fstab = '/swap.img swap swap defaults 0 0\n' + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab) + cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']} + cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) + + def test_fstab_no_swap_device(self): + '''Ensure that cloud-init adds a discovered swap partition + to /etc/fstab.''' + + fstab_original_content = '' + fstab_expected_content = ( + '%s\tnone\tswap\tsw,comment=cloudconfig\t' + '0\t0\n' % (self.swap_path,) + ) + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_fstab_same_swap_device_already_configured(self): + '''Ensure that cloud-init will not add a swap device if the same + device already exists in /etc/fstab.''' + + fstab_original_content = '%s swap swap defaults 0 0\n' % ( + self.swap_path,) + fstab_expected_content = fstab_original_content + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_fstab_alternate_swap_device_already_configured(self): + '''Ensure that cloud-init will add a discovered swap device to + /etc/fstab even when there exists a swap definition on another + device.''' + + fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n' + fstab_expected_content = ( + fstab_original_content + + '%s\tnone\tswap\tsw,comment=cloudconfig\t' + '0\t0\n' % (self.swap_path,) + ) + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_no_change_fstab_sets_needs_mount_all(self): + '''verify unchanged fstab entries are mounted if not call mount -a''' + fstab_original_content = ( + 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' + 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' + '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' + ) + fstab_expected_content = fstab_original_content + cc = { + 'mounts': [ + ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec'] + ] + } + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) + self.m_subp_subp.assert_has_calls([ + mock.call(['mount', '-a']), + mock.call(['systemctl', 'daemon-reload'])]) + + +class TestCreateSwapfile: + + @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other')) + @mock.patch(M_PATH + 'util.get_mount_info') + @mock.patch(M_PATH + 'subp.subp') + def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir): + swap_file = tmpdir.join("swap-file") + fname = str(swap_file) + + # Some of the calls to subp.subp should create the swap file; this + # roughly approximates that + m_subp.side_effect = lambda *args, **kwargs: swap_file.write('') + + m_get_mount_info.return_value = (mock.ANY, fstype) + + create_swapfile(fname, '') + assert mock.call(['mkswap', fname]) in m_subp.call_args_list + + @mock.patch(M_PATH + "util.get_mount_info") + @mock.patch(M_PATH + "subp.subp") + def test_fallback_from_fallocate_to_dd( + self, m_subp, m_get_mount_info, caplog, tmpdir + ): + swap_file = tmpdir.join("swap-file") + fname = str(swap_file) + + def subp_side_effect(cmd, *args, **kwargs): + # Mock fallocate failing, to initiate fallback + if cmd[0] == "fallocate": + raise ProcessExecutionError() + + m_subp.side_effect = subp_side_effect + # Use ext4 so both fallocate and dd are valid swap creation methods + m_get_mount_info.return_value = (mock.ANY, "ext4") + + create_swapfile(fname, "") + + cmds = [args[0][0] for args, _kwargs in m_subp.call_args_list] + assert "fallocate" in cmds, "fallocate was not called" + assert "dd" in cmds, "fallocate failure did not fallback to dd" + + assert cmds.index("dd") > cmds.index( + "fallocate" + ), "dd ran before fallocate" + + assert mock.call(["mkswap", fname]) in m_subp.call_args_list + + msg = "fallocate swap creation failed, will attempt with dd" + assert msg in caplog.text + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ntp.py b/tests/unittests/config/test_cc_ntp.py new file mode 100644 index 00000000..3426533a --- /dev/null +++ b/tests/unittests/config/test_cc_ntp.py @@ -0,0 +1,765 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import copy +import os +import shutil +from functools import partial +from os.path import dirname + +from cloudinit import (helpers, util) +from cloudinit.config import cc_ntp +from tests.unittests.helpers import ( + CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + +from tests.unittests.util import get_cloud + + +NTP_TEMPLATE = """\ +## template: jinja +servers {{servers}} +pools {{pools}} +""" + +TIMESYNCD_TEMPLATE = """\ +## template:jinja +[Time] +{% if servers or pools -%} +NTP={% for host in servers|list + pools|list %}{{ host }} {% endfor -%} +{% endif -%} +""" + + +class TestNtp(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestNtp, self).setUp() + self.new_root = self.tmp_dir() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.m_snappy.return_value = False + self.new_root = self.reRoot() + self._get_cloud = partial( + get_cloud, + paths=helpers.Paths({'templates_dir': self.new_root}) + ) + + def _get_template_path(self, template_name, distro, basepath=None): + # ntp.conf.{distro} -> ntp.conf.debian.tmpl + template_fn = '{0}.tmpl'.format( + template_name.replace('{distro}', distro)) + if not basepath: + basepath = self.new_root + path = os.path.join(basepath, template_fn) + return path + + def _generate_template(self, template=None): + if not template: + template = NTP_TEMPLATE + confpath = os.path.join(self.new_root, 'client.conf') + template_fn = os.path.join(self.new_root, 'client.conf.tmpl') + util.write_file(template_fn, content=template) + return (confpath, template_fn) + + def _mock_ntp_client_config(self, client=None, distro=None): + if not client: + client = 'ntp' + if not distro: + distro = 'ubuntu' + dcfg = cc_ntp.distro_ntp_client_configs(distro) + if client == 'systemd-timesyncd': + template = TIMESYNCD_TEMPLATE + else: + template = NTP_TEMPLATE + (confpath, _template_fn) = self._generate_template(template=template) + ntpconfig = copy.deepcopy(dcfg[client]) + ntpconfig['confpath'] = confpath + ntpconfig['template_name'] = os.path.basename(confpath) + return ntpconfig + + @mock.patch("cloudinit.config.cc_ntp.subp") + def test_ntp_install(self, mock_subp): + """ntp_install_client runs install_func when check_exe is absent.""" + mock_subp.which.return_value = None # check_exe not found. + install_func = mock.MagicMock() + cc_ntp.install_ntp_client(install_func, + packages=['ntpx'], check_exe='ntpdx') + mock_subp.which.assert_called_with('ntpdx') + install_func.assert_called_once_with(['ntpx']) + + @mock.patch("cloudinit.config.cc_ntp.subp") + def test_ntp_install_not_needed(self, mock_subp): + """ntp_install_client doesn't install when check_exe is found.""" + client = 'chrony' + mock_subp.which.return_value = [client] # check_exe found. + install_func = mock.MagicMock() + cc_ntp.install_ntp_client(install_func, packages=[client], + check_exe=client) + install_func.assert_not_called() + + @mock.patch("cloudinit.config.cc_ntp.subp") + def test_ntp_install_no_op_with_empty_pkg_list(self, mock_subp): + """ntp_install_client runs install_func with empty list""" + mock_subp.which.return_value = None # check_exe not found + install_func = mock.MagicMock() + cc_ntp.install_ntp_client(install_func, packages=[], + check_exe='timesyncd') + install_func.assert_called_once_with([]) + + def test_ntp_rename_ntp_conf(self): + """When NTP_CONF exists, rename_ntp moves it.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + util.write_file(ntpconf, "") + cc_ntp.rename_ntp_conf(confpath=ntpconf) + self.assertFalse(os.path.exists(ntpconf)) + self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) + + def test_ntp_rename_ntp_conf_skip_missing(self): + """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + self.assertFalse(os.path.exists(ntpconf)) + cc_ntp.rename_ntp_conf(confpath=ntpconf) + self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) + self.assertFalse(os.path.exists(ntpconf)) + + def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): + """write_ntp_config_template reads from $client.conf.distro.tmpl""" + servers = [] + pools = ['10.0.0.1', '10.0.0.2'] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + self.assertEqual( + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + util.load_file(confpath)) + + def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): + """write_ntp_config_template defaults pools servers upon empty config. + + When both pools and servers are empty, default NR_POOL_SERVERS get + configured. + """ + distro = 'ubuntu' + pools = cc_ntp.generate_server_names(distro) + servers = [] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template(distro, + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + self.assertEqual( + "servers []\npools {0}\n".format(pools), + util.load_file(confpath)) + + def test_defaults_pools_empty_lists_sles(self): + """write_ntp_config_template defaults opensuse pools upon empty config. + + When both pools and servers are empty, default NR_POOL_SERVERS get + configured. + """ + distro = 'sles' + default_pools = cc_ntp.generate_server_names(distro) + (confpath, template_fn) = self._generate_template() + + cc_ntp.write_ntp_config_template(distro, + servers=[], pools=[], + path=confpath, + template_fn=template_fn, + template=None) + for pool in default_pools: + self.assertIn('opensuse', pool) + self.assertEqual( + "servers []\npools {0}\n".format(default_pools), + util.load_file(confpath)) + self.assertIn( + "Adding distro default ntp pool servers: {0}".format( + ",".join(default_pools)), + self.logs.getvalue()) + + def test_timesyncd_template(self): + """Test timesycnd template is correct""" + pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] + servers = ['192.168.23.3', '192.168.23.4'] + (confpath, template_fn) = self._generate_template( + template=TIMESYNCD_TEMPLATE) + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + self.assertEqual( + "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), + util.load_file(confpath)) + + def test_distro_ntp_client_configs(self): + """Test we have updated ntp client configs on different distros""" + delta = copy.deepcopy(cc_ntp.DISTRO_CLIENT_CONFIG) + base = copy.deepcopy(cc_ntp.NTP_CLIENT_CONFIG) + # confirm no-delta distros match the base config + for distro in cc_ntp.distros: + if distro not in delta: + result = cc_ntp.distro_ntp_client_configs(distro) + self.assertEqual(base, result) + # for distros with delta, ensure the merged config values match + # what is set in the delta + for distro in delta.keys(): + result = cc_ntp.distro_ntp_client_configs(distro) + for client in delta[distro].keys(): + for key in delta[distro][client].keys(): + self.assertEqual(delta[distro][client][key], + result[client][key]) + + def _get_expected_pools(self, pools, distro, client): + if client in ['ntp', 'chrony']: + if client == 'ntp' and distro == 'alpine': + # NTP for Alpine Linux is Busybox's ntp which does not + # support 'pool' lines in its configuration file. + expected_pools = [] + else: + expected_pools = [ + 'pool {0} iburst'.format(pool) for pool in pools] + elif client == 'systemd-timesyncd': + expected_pools = " ".join(pools) + + return expected_pools + + def _get_expected_servers(self, servers, distro, client): + if client in ['ntp', 'chrony']: + if client == 'ntp' and distro == 'alpine': + # NTP for Alpine Linux is Busybox's ntp which only supports + # 'server' lines without iburst option. + expected_servers = [ + 'server {0}'.format(srv) for srv in servers] + else: + expected_servers = [ + 'server {0} iburst'.format(srv) for srv in servers] + elif client == 'systemd-timesyncd': + expected_servers = " ".join(servers) + + return expected_servers + + def test_ntp_handler_real_distro_ntp_templates(self): + """Test ntp handler renders the shipped distro ntp client templates.""" + pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] + servers = ['192.168.23.3', '192.168.23.4'] + for client in ['ntp', 'systemd-timesyncd', 'chrony']: + for distro in cc_ntp.distros: + distro_cfg = cc_ntp.distro_ntp_client_configs(distro) + ntpclient = distro_cfg[client] + confpath = ( + os.path.join(self.new_root, ntpclient.get('confpath')[1:])) + template = ntpclient.get('template_name') + # find sourcetree template file + root_dir = ( + dirname(dirname(os.path.realpath(util.__file__))) + + '/templates') + source_fn = self._get_template_path(template, distro, + basepath=root_dir) + template_fn = self._get_template_path(template, distro) + # don't fail if cloud-init doesn't have a template for + # a distro,client pair + if not os.path.exists(source_fn): + continue + # Create a copy in our tmp_dir + shutil.copy(source_fn, template_fn) + cc_ntp.write_ntp_config_template(distro, servers=servers, + pools=pools, path=confpath, + template_fn=template_fn) + content = util.load_file(confpath) + if client in ['ntp', 'chrony']: + content_lines = content.splitlines() + expected_servers = self._get_expected_servers(servers, + distro, + client) + print('distro=%s client=%s' % (distro, client)) + for sline in expected_servers: + self.assertIn(sline, content_lines, + ('failed to render {0} conf' + ' for distro:{1}'.format(client, + distro))) + expected_pools = self._get_expected_pools(pools, distro, + client) + if expected_pools != []: + for pline in expected_pools: + self.assertIn(pline, content_lines, + ('failed to render {0} conf' + ' for distro:{1}'.format(client, + distro))) + elif client == 'systemd-timesyncd': + expected_servers = self._get_expected_servers(servers, + distro, + client) + expected_pools = self._get_expected_pools(pools, + distro, + client) + expected_content = ( + "# cloud-init generated file\n" + + "# See timesyncd.conf(5) for details.\n\n" + + "[Time]\nNTP=%s %s \n" % (expected_servers, + expected_pools)) + self.assertEqual(expected_content, content) + + def test_no_ntpcfg_does_nothing(self): + """When no ntp section is defined handler logs a warning and noops.""" + cc_ntp.handle('cc_ntp', {}, None, None, []) + self.assertEqual( + 'DEBUG: Skipping module named cc_ntp, ' + 'not present or disabled by cfg\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_allows_empty_ntp_config(self, + m_select): + """Ntp schema validation allows for an empty ntp: configuration.""" + valid_empty_configs = [{'ntp': {}}, {'ntp': None}] + for valid_empty_config in valid_empty_configs: + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) + if distro == 'alpine': + # _mock_ntp_client_config call above did not specify a + # client value and so it defaults to "ntp" which on + # Alpine Linux only supports servers and not pools. + + servers = cc_ntp.generate_server_names(mycloud.distro.name) + self.assertEqual( + "servers {0}\npools []\n".format(servers), + util.load_file(confpath)) + else: + pools = cc_ntp.generate_server_names(mycloud.distro.name) + self.assertEqual( + "servers []\npools {0}\n".format(pools), + util.load_file(confpath)) + self.assertNotIn('Invalid config:', self.logs.getvalue()) + + @skipUnlessJsonSchema() + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_non_string_item_type(self, + m_sel): + """Ntp schema validation warns of non-strings in pools or servers. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_sel.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" + "ntp.servers.1: None is not of type 'string'", + self.logs.getvalue()) + self.assertEqual("servers ['valid', None]\npools [123]\n", + util.load_file(confpath)) + + @skipUnlessJsonSchema() + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_non_array_type(self, + m_select): + """Ntp schema validation warns of non-array pools or servers types. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} + + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: 123 is not of type 'array'\n" + "ntp.servers: 'non-array' is not of type 'array'", + self.logs.getvalue()) + self.assertEqual("servers non-array\npools 123\n", + util.load_file(confpath)) + + @skipUnlessJsonSchema() + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_invalid_key_present(self, + m_select): + """Ntp schema validation warns of invalid keys present in ntp config. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = { + 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} + for distro in cc_ntp.distros: + if distro != 'alpine': + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp: Additional properties are not " + "allowed ('invalidkey' was unexpected)", + self.logs.getvalue()) + self.assertEqual( + "servers []\npools ['0.mycompany.pool.ntp.org']\n", + util.load_file(confpath)) + + @skipUnlessJsonSchema() + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): + """Ntp schema validation warns of duplicates in servers or pools. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = { + 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], + 'servers': ['10.0.0.1', '10.0.0.1']}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" + " has non-unique elements\nntp.servers: " + "['10.0.0.1', '10.0.0.1'] has non-unique elements", + self.logs.getvalue()) + self.assertEqual( + "servers ['10.0.0.1', '10.0.0.1']\n" + "pools ['0.mypool.org', '0.mypool.org']\n", + util.load_file(confpath)) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_timesyncd(self, m_select): + """Test ntp handler configures timesyncd""" + servers = ['192.168.2.1', '192.168.2.2'] + pools = ['0.mypool.org'] + cfg = {'ntp': {'servers': servers, 'pools': pools}} + client = 'systemd-timesyncd' + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro, + client=client) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) + self.assertEqual( + "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", + util.load_file(confpath)) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_enabled_false(self, m_select): + """Test ntp handler does not run if enabled: false """ + cfg = {'ntp': {'enabled': False}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + self.assertEqual(0, m_select.call_count) + + @mock.patch("cloudinit.distros.subp") + @mock.patch("cloudinit.config.cc_ntp.subp") + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.distros.Distro.uses_systemd") + def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp): + """Test enabled config renders template, and restarts service """ + cfg = {'ntp': {'enabled': True}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + service_name = ntpconfig['service_name'] + m_select.return_value = ntpconfig + + hosts = cc_ntp.generate_server_names(mycloud.distro.name) + uses_systemd = True + expected_service_call = ['systemctl', 'reload-or-restart', + service_name] + expected_content = "servers []\npools {0}\n".format(hosts) + + if distro == 'alpine': + uses_systemd = False + expected_service_call = ['rc-service', service_name, 'restart'] + # _mock_ntp_client_config call above did not specify a client + # value and so it defaults to "ntp" which on Alpine Linux only + # supports servers and not pools. + expected_content = "servers {0}\npools []\n".format(hosts) + + m_sysd.return_value = uses_systemd + with mock.patch('cloudinit.config.cc_ntp.util') as m_util: + # allow use of util.mergemanydict + m_util.mergemanydict.side_effect = util.mergemanydict + # default client is present + m_subp.which.return_value = True + # use the config 'enabled' value + m_util.is_false.return_value = util.is_false( + cfg['ntp']['enabled']) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_dsubp.subp.assert_called_with( + expected_service_call, capture=True) + + self.assertEqual(expected_content, util.load_file(confpath)) + + @mock.patch('cloudinit.util.system_info') + def test_opensuse_picks_chrony(self, m_sysinfo): + """Test opensuse picks chrony or ntp on certain distro versions""" + # < 15.0 => ntp + m_sysinfo.return_value = { + 'dist': ('openSUSE', '13.2', 'Harlequin') + } + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) + + # >= 15.0 and not openSUSE => chrony + m_sysinfo.return_value = { + 'dist': ('SLES', '15.0', 'SUSE Linux Enterprise Server 15') + } + mycloud = self._get_cloud('sles') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + # >= 15.0 and openSUSE and ver != 42 => chrony + m_sysinfo.return_value = { + 'dist': ('openSUSE Tumbleweed', '20180326', 'timbleweed') + } + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + @mock.patch('cloudinit.util.system_info') + def test_ubuntu_xenial_picks_ntp(self, m_sysinfo): + """Test Ubuntu picks ntp on xenial release""" + + m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')} + mycloud = self._get_cloud('ubuntu') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) + + @mock.patch('cloudinit.config.cc_ntp.subp.which') + def test_snappy_system_picks_timesyncd(self, m_which): + """Test snappy systems prefer installed clients""" + + # we are on ubuntu-core here + self.m_snappy.return_value = True + + # ubuntu core systems will have timesyncd installed + m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd', + None, None, None]) + distro = 'ubuntu' + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = 'systemd-timesyncd' + expected_cfg = distro_configs[expected_client] + expected_calls = [] + # we only get to timesyncd + for client in mycloud.distro.preferred_ntp_clients[0:2]: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + result = cc_ntp.select_ntp_client(None, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + self.assertEqual(sorted(expected_cfg), sorted(result)) + + @mock.patch('cloudinit.config.cc_ntp.subp.which') + def test_ntp_distro_searches_all_preferred_clients(self, m_which): + """Test select_ntp_client search all distro perferred clients """ + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client({}, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.subp.which') + def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which): + """Test user_cfg.ntp_client='auto' defaults to distro search""" + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client('auto', mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template') + @mock.patch('cloudinit.cloud.Cloud.get_template_filename') + @mock.patch('cloudinit.config.cc_ntp.subp.which') + def test_ntp_custom_client_overrides_installed_clients(self, m_which, + m_tmpfn, m_write): + """Test user client is installed despite other clients present """ + client = 'ntpdate' + cfg = {'ntp': {'ntp_client': client}} + for distro in cc_ntp.distros: + # client is not installed + m_which.side_effect = iter([None]) + mycloud = self._get_cloud(distro) + with mock.patch.object(mycloud.distro, + 'install_packages') as m_install: + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_install.assert_called_with([client]) + m_which.assert_called_with(client) + + @mock.patch('cloudinit.config.cc_ntp.subp.which') + def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which): + """Test distro system_config overrides builtin preferred ntp clients""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[system_client] + result = cc_ntp.select_ntp_client(None, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.subp.which') + def test_ntp_user_config_overrides_system_cfg(self, m_which): + """Test user-data overrides system_config ntp_client""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + user_client = 'systemd-timesyncd' + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[user_client] + result = cc_ntp.select_ntp_client(user_client, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + def test_ntp_user_provided_config_with_template(self, m_install): + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') + cfg = { + 'ntp': { + 'pools': ['mypool.org'], + 'ntp_client': 'myntpd', + 'config': { + 'check_exe': 'myntpd', + 'confpath': confpath, + 'packages': ['myntp'], + 'service_name': 'myntp', + 'template': user_template, + } + } + } + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + util.load_file(confpath)) + + @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_user_provided_config_template_only(self, m_select, m_install, + m_schema): + """Test custom template for default client""" + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + client = 'chrony' + cfg = { + 'pools': ['mypool.org'], + 'ntp_client': client, + 'config': { + 'template': user_template, + } + } + expected_merged_cfg = { + 'check_exe': 'chronyd', + 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), + 'template_name': 'client.conf', 'template': user_template, + 'service_name': 'chrony', 'packages': ['chrony']} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(client=client, + distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', + {'ntp': cfg}, mycloud, None, None) + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + util.load_file(confpath)) + m_schema.assert_called_with(expected_merged_cfg) + + +class TestSupplementalSchemaValidation(CiTestCase): + + def test_error_on_missing_keys(self): + """ValueError raised reporting any missing required ntp:config keys""" + cfg = {} + match = (r'Invalid ntp configuration:\\nMissing required ntp:config' + ' keys: check_exe, confpath, packages, service_name') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_requiring_either_template_or_template_name(self): + """ValueError raised if both template not template_name are None.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': None, 'template_name': None, 'packages': []} + match = (r'Invalid ntp configuration:\\nEither ntp:config:template' + ' or ntp:config:template_name values are required') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_list_values(self): + """ValueError raised when packages is not of type list.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} + match = (r'Invalid ntp configuration:\\nExpected a list of required' + ' package names for ntp:config:packages. Found \\(NOPE\\)') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_string_values(self): + """ValueError raised for any values expected as string type.""" + cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3, + 'template': 4, 'template_name': 5, 'packages': []} + errors = [ + 'Expected a config file path ntp:config:confpath. Found (1)', + 'Expected a string type for ntp:config:check_exe. Found (2)', + 'Expected a string type for ntp:config:service_name. Found (3)', + 'Expected a string type for ntp:config:template. Found (4)', + 'Expected a string type for ntp:config:template_name. Found (5)'] + with self.assertRaises(ValueError) as context_mgr: + cc_ntp.supplemental_schema_validation(cfg) + error_msg = str(context_mgr.exception) + for error in errors: + self.assertIn(error, error_msg) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py new file mode 100644 index 00000000..e699f424 --- /dev/null +++ b/tests/unittests/config/test_cc_power_state_change.py @@ -0,0 +1,159 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import sys + +from cloudinit.config import cc_power_state_change as psc + +from cloudinit import distros +from cloudinit import helpers + +from tests.unittests import helpers as t_help +from tests.unittests.helpers import mock + + +class TestLoadPowerState(t_help.TestCase): + def setUp(self): + super(TestLoadPowerState, self).setUp() + cls = distros.fetch('ubuntu') + paths = helpers.Paths({}) + self.dist = cls('ubuntu', {}, paths) + + def test_no_config(self): + # completely empty config should mean do nothing + (cmd, _timeout, _condition) = psc.load_power_state({}, self.dist) + self.assertIsNone(cmd) + + def test_irrelevant_config(self): + # no power_state field in config should return None for cmd + (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}, + self.dist) + self.assertIsNone(cmd) + + def test_invalid_mode(self): + + cfg = {'power_state': {'mode': 'gibberish'}} + self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) + + cfg = {'power_state': {'mode': ''}} + self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) + + def test_empty_mode(self): + cfg = {'power_state': {'message': 'goodbye'}} + self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) + + def test_valid_modes(self): + cfg = {'power_state': {}} + for mode in ('halt', 'poweroff', 'reboot'): + cfg['power_state']['mode'] = mode + check_lps_ret(psc.load_power_state(cfg, self.dist), mode=mode) + + def test_invalid_delay(self): + cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}} + self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) + + def test_valid_delay(self): + cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} + for delay in ("now", "+1", "+30"): + cfg['power_state']['delay'] = delay + check_lps_ret(psc.load_power_state(cfg, self.dist)) + + def test_message_present(self): + cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}} + ret = psc.load_power_state(cfg, self.dist) + check_lps_ret(psc.load_power_state(cfg, self.dist)) + self.assertIn(cfg['power_state']['message'], ret[0]) + + def test_no_message(self): + # if message is not present, then no argument should be passed for it + cfg = {'power_state': {'mode': 'poweroff'}} + (cmd, _timeout, _condition) = psc.load_power_state(cfg, self.dist) + self.assertNotIn("", cmd) + check_lps_ret(psc.load_power_state(cfg, self.dist)) + self.assertTrue(len(cmd) == 3) + + def test_condition_null_raises(self): + cfg = {'power_state': {'mode': 'poweroff', 'condition': None}} + self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) + + def test_condition_default_is_true(self): + cfg = {'power_state': {'mode': 'poweroff'}} + _cmd, _timeout, cond = psc.load_power_state(cfg, self.dist) + self.assertEqual(cond, True) + + def test_freebsd_poweroff_uses_lowercase_p(self): + cls = distros.fetch('freebsd') + paths = helpers.Paths({}) + freebsd = cls('freebsd', {}, paths) + cfg = {'power_state': {'mode': 'poweroff'}} + ret = psc.load_power_state(cfg, freebsd) + self.assertIn('-p', ret[0]) + + def test_alpine_delay(self): + # alpine takes delay in seconds. + cls = distros.fetch('alpine') + paths = helpers.Paths({}) + alpine = cls('alpine', {}, paths) + cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} + for delay, value in (('now', 0), ("+1", 60), ("+30", 1800)): + cfg['power_state']['delay'] = delay + ret = psc.load_power_state(cfg, alpine) + self.assertEqual('-d', ret[0][1]) + self.assertEqual(str(value), ret[0][2]) + + +class TestCheckCondition(t_help.TestCase): + def cmd_with_exit(self, rc): + return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc]) + + def test_true_is_true(self): + self.assertEqual(psc.check_condition(True), True) + + def test_false_is_false(self): + self.assertEqual(psc.check_condition(False), False) + + def test_cmd_exit_zero_true(self): + self.assertEqual(psc.check_condition(self.cmd_with_exit(0)), True) + + def test_cmd_exit_one_false(self): + self.assertEqual(psc.check_condition(self.cmd_with_exit(1)), False) + + def test_cmd_exit_nonzero_warns(self): + mocklog = mock.Mock() + self.assertEqual( + psc.check_condition(self.cmd_with_exit(2), mocklog), False) + self.assertEqual(mocklog.warning.call_count, 1) + + +def check_lps_ret(psc_return, mode=None): + if len(psc_return) != 3: + raise TypeError("length returned = %d" % len(psc_return)) + + errs = [] + cmd = psc_return[0] + timeout = psc_return[1] + condition = psc_return[2] + + if 'shutdown' not in psc_return[0][0]: + errs.append("string 'shutdown' not in cmd") + + if condition is None: + errs.append("condition was not returned") + + if mode is not None: + opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode] + if opt not in psc_return[0]: + errs.append("opt '%s' not in cmd: %s" % (opt, cmd)) + + if len(cmd) != 3 and len(cmd) != 4: + errs.append("Invalid command length: %s" % len(cmd)) + + try: + float(timeout) + except Exception: + errs.append("timeout failed convert to float") + + if len(errs): + lines = ["Errors in result: %s" % str(psc_return)] + errs + raise Exception('\n'.join(lines)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py new file mode 100644 index 00000000..1f67dc4c --- /dev/null +++ b/tests/unittests/config/test_cc_puppet.py @@ -0,0 +1,380 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +import textwrap + +from cloudinit.config import cc_puppet +from cloudinit import util +from tests.unittests.helpers import CiTestCase, HttprettyTestCase, mock + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +@mock.patch('cloudinit.config.cc_puppet.subp.subp') +@mock.patch('cloudinit.config.cc_puppet.os') +class TestAutostartPuppet(CiTestCase): + + def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp): + """Update /etc/default/puppet to autostart if it exists.""" + + def _fake_exists(path): + return path == '/etc/default/puppet' + + m_os.path.exists.side_effect = _fake_exists + cc_puppet._autostart_puppet(LOG) + self.assertEqual( + [mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False)], + m_subp.call_args_list) + + def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp): + """If systemctl is present, enable puppet via systemctl.""" + + def _fake_exists(path): + return path == '/bin/systemctl' + + m_os.path.exists.side_effect = _fake_exists + cc_puppet._autostart_puppet(LOG) + expected_calls = [mock.call( + ['/bin/systemctl', 'enable', 'puppet.service'], capture=False)] + self.assertEqual(expected_calls, m_subp.call_args_list) + + def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp): + """If chkconfig is present, enable puppet via checkcfg.""" + + def _fake_exists(path): + return path == '/sbin/chkconfig' + + m_os.path.exists.side_effect = _fake_exists + cc_puppet._autostart_puppet(LOG) + expected_calls = [mock.call( + ['/sbin/chkconfig', 'puppet', 'on'], capture=False)] + self.assertEqual(expected_calls, m_subp.call_args_list) + + +@mock.patch('cloudinit.config.cc_puppet._autostart_puppet') +class TestPuppetHandle(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestPuppetHandle, self).setUp() + self.new_root = self.tmp_dir() + self.conf = self.tmp_path('puppet.conf') + self.csr_attributes_path = self.tmp_path( + 'csr_attributes.yaml') + self.cloud = get_cloud() + + def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto): + """Cloud-config containing no 'puppet' key is skipped.""" + + cfg = {} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertIn( + "no 'puppet' configuration found", self.logs.getvalue()) + self.assertEqual(0, m_auto.call_count) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_starts_puppet_service(self, m_subp, m_auto): + """Cloud-config 'puppet' configuration starts puppet.""" + + cfg = {'puppet': {'install': False}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call(['service', 'puppet', 'start'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto): + """Cloud-config empty 'puppet' configuration installs latest puppet.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': {}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual( + [mock.call(('puppet', None))], + self.cloud.distro.install_packages.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_on_true(self, m_subp, _): + """Cloud-config with 'puppet' key installs when 'install' is True.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual( + [mock.call(('puppet', None))], + self.cloud.distro.install_packages.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio'.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_version(self, + m_subp, m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'version' is specified.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'version': '6.24.0', 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + '6.24.0', None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_collection(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'collection' is specified.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'collection': 'puppet6', 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, 'puppet6', True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_custom_url(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'aio_install_url' is specified.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': + {'install': True, + 'aio_install_url': 'http://test.url/path/to/script.sh', + 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + m_aio.assert_called_with( + 'http://test.url/path/to/script.sh', None, None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_without_cleanup(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and no cleanup.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'cleanup': False, 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, None, False) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_version(self, m_subp, _): + """Cloud-config 'puppet' configuration can specify a version.""" + + self.cloud.distro = mock.MagicMock() + cfg = {'puppet': {'version': '3.8'}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual( + [mock.call(('puppet', '3.8'))], + self.cloud.distro.install_packages.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.get_config_value') + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_updates_puppet_conf(self, + m_subp, m_default, m_auto): + """When 'conf' is provided update values in PUPPET_CONF_PATH.""" + + def _fake_get_config_value(puppet_bin, setting): + return self.conf + + m_default.side_effect = _fake_get_config_value + + cfg = { + 'puppet': { + 'conf': {'agent': {'server': 'puppetserver.example.org'}}}} + util.write_file( + self.conf, '[agent]\nserver = origpuppet\nother = 3') + self.cloud.distro = mock.MagicMock() + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + content = util.load_file(self.conf) + expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n' + self.assertEqual(expected, content) + + @mock.patch('cloudinit.config.cc_puppet.get_config_value') + @mock.patch('cloudinit.config.cc_puppet.subp.subp') + def test_puppet_writes_csr_attributes_file(self, + m_subp, m_default, m_auto): + """When csr_attributes is provided + creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" + + def _fake_get_config_value(puppet_bin, setting): + return self.csr_attributes_path + + m_default.side_effect = _fake_get_config_value + + self.cloud.distro = mock.MagicMock() + cfg = { + 'puppet': { + 'csr_attributes': { + 'custom_attributes': { + '1.2.840.113549.1.9.7': + '342thbjkt82094y0uthhor289jnqthpc2290' + }, + 'extension_requests': { + 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E', + 'pp_image_name': 'my_ami_image', + 'pp_preshared_key': + '342thbjkt82094y0uthhor289jnqthpc2290' + } + } + } + } + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + content = util.load_file(self.csr_attributes_path) + expected = textwrap.dedent("""\ + custom_attributes: + 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 + extension_requests: + pp_image_name: my_ami_image + pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 + pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E + """) + self.assertEqual(expected, content) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto): + """Run puppet with default args if 'exec' is set to True.""" + + cfg = {'puppet': {'exec': True}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call(['puppet', 'agent', '--test'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_starts_puppetd(self, m_subp, m_auto): + """Run puppet with default args if 'exec' is set to True.""" + + cfg = {'puppet': {}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call(['service', 'puppet', 'start'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_skips_puppetd(self, m_subp, m_auto): + """Run puppet with default args if 'exec' is set to True.""" + + cfg = {'puppet': {'start_service': False}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual(0, m_auto.call_count) + self.assertNotIn( + [mock.call(['service', 'puppet', 'start'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_with_args_list_if_requested(self, + m_subp, m_auto): + """Run puppet with 'exec_args' list if 'exec' is set to True.""" + + cfg = {'puppet': {'exec': True, 'exec_args': [ + '--onetime', '--detailed-exitcodes']}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call( + ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], + capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_with_args_string_if_requested(self, + m_subp, m_auto): + """Run puppet with 'exec_args' string if 'exec' is set to True.""" + + cfg = {'puppet': {'exec': True, + 'exec_args': '--onetime --detailed-exitcodes'}} + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call( + ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], + capture=False)], + m_subp.call_args_list) + + +URL_MOCK = mock.Mock() +URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"' + + +@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=(None, None)) +@mock.patch( + 'cloudinit.config.cc_puppet.url_helper.readurl', + return_value=URL_MOCK, autospec=True, +) +class TestInstallPuppetAio(HttprettyTestCase): + def test_install_with_default_arguments(self, m_readurl, m_subp): + """Install AIO with no arguments""" + cc_puppet.install_puppet_aio() + + self.assertEqual( + [mock.call([mock.ANY, '--cleanup'], capture=False)], + m_subp.call_args_list) + + def test_install_with_custom_url(self, m_readurl, m_subp): + """Install AIO from custom URL""" + cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh') + m_readurl.assert_called_with( + url='http://custom.url/path/to/script.sh', + retries=5) + + self.assertEqual( + [mock.call([mock.ANY, '--cleanup'], capture=False)], + m_subp.call_args_list) + + def test_install_with_version(self, m_readurl, m_subp): + """Install AIO with specific version""" + cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0') + + self.assertEqual( + [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)], + m_subp.call_args_list) + + def test_install_with_collection(self, m_readurl, m_subp): + """Install AIO with specific collection""" + cc_puppet.install_puppet_aio( + cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly') + + self.assertEqual( + [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'], + capture=False)], + m_subp.call_args_list) + + def test_install_with_no_cleanup(self, m_readurl, m_subp): + """Install AIO with no cleanup""" + cc_puppet.install_puppet_aio( + cc_puppet.AIO_INSTALL_URL, None, None, False) + + self.assertEqual( + [mock.call([mock.ANY], capture=False)], + m_subp.call_args_list) diff --git a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py new file mode 100644 index 00000000..522de23d --- /dev/null +++ b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py @@ -0,0 +1,109 @@ +from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci + +from cloudinit import util + +from tests.unittests import helpers as t_help +from tests.unittests.helpers import mock + +from textwrap import dedent +import logging + +LOG = logging.getLogger(__name__) +MPATH = "cloudinit.config.cc_refresh_rmc_and_interface" +NET_INFO = { + 'lo': {'ipv4': [{'ip': '127.0.0.1', + 'bcast': '', 'mask': '255.0.0.0', + 'scope': 'host'}], + 'ipv6': [{'ip': '::1/128', + 'scope6': 'host'}], 'hwaddr': '', + 'up': 'True'}, + 'env2': {'ipv4': [{'ip': '8.0.0.19', + 'bcast': '8.0.0.255', 'mask': '255.255.255.0', + 'scope': 'global'}], + 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64', + 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20', + 'up': 'True'}, + 'env3': {'ipv4': [{'ip': '90.0.0.14', + 'bcast': '90.0.0.255', 'mask': '255.255.255.0', + 'scope': 'global'}], + 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64', + 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21', + 'up': 'True'}, + 'env4': {'ipv4': [{'ip': '9.114.23.7', + 'bcast': '9.114.23.255', 'mask': '255.255.255.0', + 'scope': 'global'}], + 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64', + 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22', + 'up': 'True'}, + 'env5': {'ipv4': [], + 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64', + 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c', + 'up': 'True'}} + + +class TestRsctNodeFile(t_help.CiTestCase): + def test_disable_ipv6_interface(self): + """test parsing of iface files.""" + fname = self.tmp_path("iface-eth5") + util.write_file(fname, dedent("""\ + BOOTPROTO=static + DEVICE=eth5 + HWADDR=42:20:86:df:fa:4c + IPV6INIT=yes + IPADDR6=fe80::9c26:c3ff:fea4:62c8/64 + IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64 + NM_CONTROLLED=yes + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """)) + + ccrmci.disable_ipv6(fname) + self.assertEqual(dedent("""\ + BOOTPROTO=static + DEVICE=eth5 + HWADDR=42:20:86:df:fa:4c + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + NM_CONTROLLED=no + """), util.load_file(fname)) + + @mock.patch(MPATH + '.refresh_rmc') + @mock.patch(MPATH + '.restart_network_manager') + @mock.patch(MPATH + '.disable_ipv6') + @mock.patch(MPATH + '.refresh_ipv6') + @mock.patch(MPATH + '.netinfo.netdev_info') + @mock.patch(MPATH + '.subp.which') + def test_handle(self, m_refresh_rmc, + m_netdev_info, m_refresh_ipv6, m_disable_ipv6, + m_restart_nm, m_which): + """Basic test of handle.""" + m_netdev_info.return_value = NET_INFO + m_which.return_value = '/opt/rsct/bin/rmcctrl' + ccrmci.handle( + "refresh_rmc_and_interface", None, None, None, None) + self.assertEqual(1, m_netdev_info.call_count) + m_refresh_ipv6.assert_called_with('env5') + m_disable_ipv6.assert_called_with( + '/etc/sysconfig/network-scripts/ifcfg-env5') + self.assertEqual(1, m_restart_nm.call_count) + self.assertEqual(1, m_refresh_rmc.call_count) + + @mock.patch(MPATH + '.netinfo.netdev_info') + def test_find_ipv6(self, m_netdev_info): + """find_ipv6_ifaces parses netdev_info returning those with ipv6""" + m_netdev_info.return_value = NET_INFO + found = ccrmci.find_ipv6_ifaces() + self.assertEqual(['env5'], found) + + @mock.patch(MPATH + '.subp.subp') + def test_refresh_ipv6(self, m_subp): + """refresh_ipv6 should ip down and up the interface.""" + iface = "myeth0" + ccrmci.refresh_ipv6(iface) + m_subp.assert_has_calls([ + mock.call(['ip', 'link', 'set', iface, 'down']), + mock.call(['ip', 'link', 'set', iface, 'up'])]) diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py new file mode 100644 index 00000000..1f9e24da --- /dev/null +++ b/tests/unittests/config/test_cc_resizefs.py @@ -0,0 +1,398 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config.cc_resizefs import ( + can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs, + _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs) + +from collections import namedtuple +import logging + +from cloudinit.subp import ProcessExecutionError +from tests.unittests.helpers import ( + CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call) + + +LOG = logging.getLogger(__name__) + + +class TestResizefs(CiTestCase): + with_logs = True + + def setUp(self): + super(TestResizefs, self).setUp() + self.name = "resizefs" + + @mock.patch('cloudinit.subp.subp') + def test_skip_ufs_resize(self, m_subp): + fs_type = "ufs" + resize_what = "/" + devpth = "/dev/da0p2" + err = ("growfs: requested size 2.0GB is not larger than the " + "current filesystem size 2.0GB\n") + exception = ProcessExecutionError(stderr=err, exit_code=1) + m_subp.side_effect = exception + res = can_skip_resize(fs_type, resize_what, devpth) + self.assertTrue(res) + + @mock.patch('cloudinit.subp.subp') + def test_cannot_skip_ufs_resize(self, m_subp): + fs_type = "ufs" + resize_what = "/" + devpth = "/dev/da0p2" + m_subp.return_value = ( + ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"), + ("growfs: no room to allocate last cylinder group; " + "leaving 364KB unused\n") + ) + res = can_skip_resize(fs_type, resize_what, devpth) + self.assertFalse(res) + + @mock.patch('cloudinit.subp.subp') + def test_cannot_skip_ufs_growfs_exception(self, m_subp): + fs_type = "ufs" + resize_what = "/" + devpth = "/dev/da0p2" + err = "growfs: /dev/da0p2 is not clean - run fsck.\n" + exception = ProcessExecutionError(stderr=err, exit_code=1) + m_subp.side_effect = exception + with self.assertRaises(ProcessExecutionError): + can_skip_resize(fs_type, resize_what, devpth) + + def test_can_skip_resize_ext(self): + self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1')) + + def test_handle_noops_on_disabled(self): + """The handle function logs when the configuration disables resize.""" + cfg = {'resize_rootfs': False} + handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + self.assertIn( + 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', + self.logs.getvalue()) + + @skipUnlessJsonSchema() + def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self): + """The handle reports json schema violations as a warning. + + Invalid values for resize_rootfs result in disabling the module. + """ + cfg = {'resize_rootfs': 'junk'} + handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + logs = self.logs.getvalue() + self.assertIn( + "WARNING: Invalid config:\nresize_rootfs: 'junk' is not one of" + " [True, False, 'noblock']", + logs) + self.assertIn( + 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', + logs) + + @mock.patch('cloudinit.config.cc_resizefs.util.get_mount_info') + def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info): + """handle warns when get_mount_info sees unknown filesystem for /.""" + m_get_mount_info.return_value = None + cfg = {'resize_rootfs': True} + handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + logs = self.logs.getvalue() + self.assertNotIn("WARNING: Invalid config:\nresize_rootfs:", logs) + self.assertIn( + 'WARNING: Could not determine filesystem type of /\n', + logs) + self.assertEqual( + [mock.call('/', LOG)], + m_get_mount_info.call_args_list) + + def test_handle_warns_on_undiscoverable_root_path_in_commandline(self): + """handle noops when the root path is not found on the commandline.""" + cfg = {'resize_rootfs': True} + exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' + + def fake_mount_info(path, log): + self.assertEqual('/', path) + self.assertEqual(LOG, log) + return ('/dev/root', 'ext4', '/') + + with mock.patch(exists_mock_path) as m_exists: + m_exists.return_value = False + wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}, + 'get_mount_info': {'side_effect': fake_mount_info}, + 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, + handle, 'cc_resizefs', cfg, _cloud=None, log=LOG, + args=[]) + logs = self.logs.getvalue() + self.assertIn("WARNING: Unable to find device '/dev/root'", logs) + + def test_resize_zfs_cmd_return(self): + zpool = 'zroot' + devpth = 'gpt/system' + self.assertEqual(('zpool', 'online', '-e', zpool, devpth), + _resize_zfs(zpool, devpth)) + + def test_resize_xfs_cmd_return(self): + mount_point = '/mnt/test' + devpth = '/dev/sda1' + self.assertEqual(('xfs_growfs', mount_point), + _resize_xfs(mount_point, devpth)) + + def test_resize_ext_cmd_return(self): + mount_point = '/' + devpth = '/dev/sdb1' + self.assertEqual(('resize2fs', devpth), + _resize_ext(mount_point, devpth)) + + def test_resize_ufs_cmd_return(self): + mount_point = '/' + devpth = '/dev/sda2' + self.assertEqual(('growfs', '-y', mount_point), + _resize_ufs(mount_point, devpth)) + + @mock.patch('cloudinit.util.is_container', return_value=False) + @mock.patch('cloudinit.util.parse_mount') + @mock.patch('cloudinit.util.get_device_info_from_zpool') + @mock.patch('cloudinit.util.get_mount_info') + def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount, + is_container): + devpth = 'vmzroot/ROOT/freebsd' + disk = 'gpt/system' + fs_type = 'zfs' + mount_point = '/' + + mount_info.return_value = (devpth, fs_type, mount_point) + zpool_info.return_value = disk + parse_mount.return_value = (devpth, fs_type, mount_point) + + cfg = {'resize_rootfs': True} + + with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: + handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + ret = dresize.call_args[0][0] + + self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret) + + @mock.patch('cloudinit.util.is_container', return_value=False) + @mock.patch('cloudinit.util.get_mount_info') + @mock.patch('cloudinit.util.get_device_info_from_zpool') + @mock.patch('cloudinit.util.parse_mount') + def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount, + is_container): + devpth = 'zroot/ROOT/default' + disk = 'da0p3' + fs_type = 'zfs' + mount_point = '/' + + mount_info.return_value = (devpth, fs_type, mount_point) + zpool_info.return_value = disk + parse_mount.return_value = (devpth, fs_type, mount_point) + + cfg = {'resize_rootfs': True} + + def fake_stat(devpath): + if devpath == disk: + raise OSError("not here") + FakeStat = namedtuple( + 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat + return FakeStat(25008, 0, 1) # fake char block device + + with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: + with mock.patch('cloudinit.config.cc_resizefs.os.stat') as m_stat: + m_stat.side_effect = fake_stat + handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) + + self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk), + dresize.call_args[0][0]) + + +class TestRootDevFromCmdline(CiTestCase): + + def test_rootdev_from_cmdline_with_no_root(self): + """Return None from rootdev_from_cmdline when root is not present.""" + invalid_cases = [ + 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', ''] + for case in invalid_cases: + self.assertIsNone(util.rootdev_from_cmdline(case)) + + def test_rootdev_from_cmdline_with_root_startswith_dev(self): + """Return the cmdline root when the path starts with /dev.""" + self.assertEqual( + '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this')) + + def test_rootdev_from_cmdline_with_root_without_dev_prefix(self): + """Add /dev prefix to cmdline root when the path lacks the prefix.""" + self.assertEqual( + '/dev/this', util.rootdev_from_cmdline('asdf root=this')) + + def test_rootdev_from_cmdline_with_root_with_label(self): + """When cmdline root contains a LABEL, our root is disk/by-label.""" + self.assertEqual( + '/dev/disk/by-label/unique', + util.rootdev_from_cmdline('asdf root=LABEL=unique')) + + def test_rootdev_from_cmdline_with_root_with_uuid(self): + """When cmdline root contains a UUID, our root is disk/by-uuid.""" + self.assertEqual( + '/dev/disk/by-uuid/adsfdsaf-adsf', + util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) + + +class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): + + with_logs = True + + def test_maybe_get_writable_device_path_none_on_overlayroot(self): + """When devpath is overlayroot (on MAAS), is_dev_writable is False.""" + info = 'does not matter' + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}}, + maybe_get_writable_device_path, 'overlayroot', info, LOG) + self.assertIsNone(devpath) + self.assertIn( + "Not attempting to resize devpath 'overlayroot'", + self.logs.getvalue()) + + def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self): + """When root does not exist isn't in the cmdline, log warning.""" + info = 'does not matter' + + def fake_mount_info(path, log): + self.assertEqual('/', path) + self.assertEqual(LOG, log) + return ('/dev/root', 'ext4', '/') + + exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' + with mock.patch(exists_mock_path) as m_exists: + m_exists.return_value = False + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}, + 'get_mount_info': {'side_effect': fake_mount_info}, + 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, + maybe_get_writable_device_path, '/dev/root', info, LOG) + self.assertIsNone(devpath) + logs = self.logs.getvalue() + self.assertIn("WARNING: Unable to find device '/dev/root'", logs) + + def test_maybe_get_writable_device_path_does_not_exist(self): + """When devpath does not exist, a warning is logged.""" + info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}}, + maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) + self.assertIsNone(devpath) + self.assertIn( + "WARNING: Device '/dev/I/dont/exist' did not exist." + ' cannot resize: %s' % info, + self.logs.getvalue()) + + def test_maybe_get_writable_device_path_does_not_exist_in_container(self): + """When devpath does not exist in a container, log a debug message.""" + info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': True}}, + maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) + self.assertIsNone(devpath) + self.assertIn( + "DEBUG: Device '/dev/I/dont/exist' did not exist in container." + ' cannot resize: %s' % info, + self.logs.getvalue()) + + def test_maybe_get_writable_device_path_raises_oserror(self): + """When unexpected OSError is raises by os.stat it is reraised.""" + info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' + with self.assertRaises(OSError) as context_manager: + wrap_and_call( + 'cloudinit.config.cc_resizefs', + {'util.is_container': {'return_value': True}, + 'os.stat': {'side_effect': OSError('Something unexpected')}}, + maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) + self.assertEqual( + 'Something unexpected', str(context_manager.exception)) + + def test_maybe_get_writable_device_path_non_block(self): + """When device is not a block device, emit warning return False.""" + fake_devpath = self.tmp_path('dev/readwrite') + util.write_file(fake_devpath, '', mode=0o600) # read-write + info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) + + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}}, + maybe_get_writable_device_path, fake_devpath, info, LOG) + self.assertIsNone(devpath) + self.assertIn( + "WARNING: device '{0}' not a block device. cannot resize".format( + fake_devpath), + self.logs.getvalue()) + + def test_maybe_get_writable_device_path_non_block_on_container(self): + """When device is non-block device in container, emit debug log.""" + fake_devpath = self.tmp_path('dev/readwrite') + util.write_file(fake_devpath, '', mode=0o600) # read-write + info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) + + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': True}}, + maybe_get_writable_device_path, fake_devpath, info, LOG) + self.assertIsNone(devpath) + self.assertIn( + "DEBUG: device '{0}' not a block device in container." + ' cannot resize'.format(fake_devpath), + self.logs.getvalue()) + + def test_maybe_get_writable_device_path_returns_cmdline_root(self): + """When root device is UUID in kernel commandline, update devpath.""" + # XXX Long-term we want to use FilesystemMocking test to avoid + # touching os.stat. + FakeStat = namedtuple( + 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def. + info = 'dev=/dev/root mnt_point=/ path=/does/not/matter' + devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs', + {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'}, + 'util.is_container': False, + 'os.path.exists': False, # /dev/root doesn't exist + 'os.stat': { + 'return_value': FakeStat(25008, 0, 1)} # char block device + }, + maybe_get_writable_device_path, '/dev/root', info, LOG) + self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath) + self.assertIn( + "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'" + " per kernel cmdline", + self.logs.getvalue()) + + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = False + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'), + _resize_btrfs("/", "/dev/sda1")) + + @mock.patch('cloudinit.util.mount_is_read_write') + @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') + def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw): + """Do not resize / directly if it is read-only. (LP: #1734787).""" + m_is_rw.return_value = True + m_is_dir.return_value = True + self.assertEqual( + ('btrfs', 'filesystem', 'resize', 'max', '/'), + _resize_btrfs("/", "/dev/sda1")) + + @mock.patch('cloudinit.util.is_container', return_value=True) + @mock.patch('cloudinit.util.is_FreeBSD') + def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd, + m_is_container): + freebsd.return_value = True + info = 'dev=gpt/system mnt_point=/ path=/' + devpth = maybe_get_writable_device_path('gpt/system', info, LOG) + self.assertEqual('gpt/system', devpth) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_resolv_conf.py b/tests/unittests/config/test_cc_resolv_conf.py new file mode 100644 index 00000000..0aa90a23 --- /dev/null +++ b/tests/unittests/config/test_cc_resolv_conf.py @@ -0,0 +1,193 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import logging +import os +import shutil +import tempfile +import pytest +from copy import deepcopy +from unittest import mock + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from tests.unittests import helpers as t_help +from tests.unittests.util import MockDistro +from cloudinit.config import cc_resolv_conf +from cloudinit.config.cc_resolv_conf import generate_resolv_conf + +LOG = logging.getLogger(__name__) +EXPECTED_HEADER = """\ +# Your system has been configured with 'manage-resolv-conf' set to true. +# As a result, cloud-init has written this file with configuration data +# that it has been provided. Cloud-init, by default, will write this file +# a single time (PER_ONCE). +#\n\n""" + + +class TestResolvConf(t_help.FilesystemMockingTestCase): + with_logs = True + cfg = {'manage_resolv_conf': True, 'resolv_conf': {}} + + def setUp(self): + super(TestResolvConf, self).setUp() + self.tmp = tempfile.mkdtemp() + util.ensure_dir(os.path.join(self.tmp, 'data')) + self.addCleanup(shutil.rmtree, self.tmp) + + def _fetch_distro(self, kind, conf=None): + cls = distros.fetch(kind) + paths = helpers.Paths({'cloud_dir': self.tmp}) + conf = {} if conf is None else conf + return cls(kind, conf, paths) + + def call_resolv_conf_handler(self, distro_name, conf, cc=None): + if not cc: + ds = None + distro = self._fetch_distro(distro_name, conf) + paths = helpers.Paths({'cloud_dir': self.tmp}) + cc = cloud.Cloud(ds, paths, {}, distro, None) + cc_resolv_conf.handle('cc_resolv_conf', conf, cc, LOG, []) + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_systemd_resolved(self, m_render_to_file): + self.call_resolv_conf_handler('photon', self.cfg) + + assert [ + mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + ] == m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_no_param(self, m_render_to_file): + tmp = deepcopy(self.cfg) + self.logs.truncate(0) + tmp.pop('resolv_conf') + self.call_resolv_conf_handler('photon', tmp) + + self.assertIn('manage_resolv_conf True but no parameters provided', + self.logs.getvalue()) + assert [ + mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + ] not in m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file): + tmp = deepcopy(self.cfg) + self.logs.truncate(0) + tmp['manage_resolv_conf'] = False + self.call_resolv_conf_handler('photon', tmp) + self.assertIn("'manage_resolv_conf' present but set to False", + self.logs.getvalue()) + assert [ + mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + ] not in m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_etc_resolv_conf(self, m_render_to_file): + self.call_resolv_conf_handler('rhel', self.cfg) + + assert [ + mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) + ] == m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file): + ds = None + distro = self._fetch_distro('rhel', self.cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + cc = cloud.Cloud(ds, paths, {}, distro, None) + cc.distro.resolve_conf_fn = 'bla' + + self.logs.truncate(0) + self.call_resolv_conf_handler('rhel', self.cfg, cc) + + self.assertIn('No template found, not rendering resolve configs', + self.logs.getvalue()) + + assert [ + mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) + ] not in m_render_to_file.call_args_list + + +class TestGenerateResolvConf: + + dist = MockDistro() + tmpl_fn = "templates/resolv.conf.tmpl" + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_dist_resolv_conf_fn(self, m_render_to_file): + self.dist.resolve_conf_fn = "/tmp/resolv-test.conf" + generate_resolv_conf(self.tmpl_fn, + mock.MagicMock(), + self.dist.resolve_conf_fn) + + assert [ + mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY) + ] == m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_target_fname_is_used_if_passed(self, m_render_to_file): + path = "/use/this/path" + generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path) + + assert [ + mock.call(mock.ANY, path, mock.ANY) + ] == m_render_to_file.call_args_list + + # Patch in templater so we can assert on the actual generated content + @mock.patch("cloudinit.templater.util.write_file") + # Parameterise with the value to be passed to generate_resolv_conf as the + # params parameter, and the expected line after the header as + # expected_extra_line. + @pytest.mark.parametrize( + "params,expected_extra_line", + [ + # No options + ({}, None), + # Just a true flag + ({"options": {"foo": True}}, "options foo"), + # Just a false flag + ({"options": {"foo": False}}, None), + # Just an option + ({"options": {"foo": "some_value"}}, "options foo:some_value"), + # A true flag and an option + ( + {"options": {"foo": "some_value", "bar": True}}, + "options bar foo:some_value", + ), + # Two options + ( + {"options": {"foo": "some_value", "bar": "other_value"}}, + "options bar:other_value foo:some_value", + ), + # Everything + ( + { + "options": { + "foo": "some_value", + "bar": "other_value", + "baz": False, + "spam": True, + } + }, + "options spam bar:other_value foo:some_value", + ), + ], + ) + def test_flags_and_options( + self, m_write_file, params, expected_extra_line + ): + target_fn = "/etc/resolv.conf" + generate_resolv_conf(self.tmpl_fn, params, target_fn) + + expected_content = EXPECTED_HEADER + if expected_extra_line is not None: + # If we have any extra lines, expect a trailing newline + expected_content += "\n".join([expected_extra_line, ""]) + assert [ + mock.call(mock.ANY, expected_content, mode=mock.ANY) + ] == m_write_file.call_args_list + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py new file mode 100644 index 00000000..bd7ebc98 --- /dev/null +++ b/tests/unittests/config/test_cc_rh_subscription.py @@ -0,0 +1,234 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for registering RHEL subscription via rh_subscription.""" + +import copy +import logging + +from cloudinit.config import cc_rh_subscription +from cloudinit import subp + +from tests.unittests.helpers import CiTestCase, mock + +SUBMGR = cc_rh_subscription.SubscriptionManager +SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli' + + +@mock.patch(SUB_MAN_CLI) +class GoodTests(CiTestCase): + with_logs = True + + def setUp(self): + super(GoodTests, self).setUp() + self.name = "cc_rh_subscription" + self.cloud_init = None + self.log = logging.getLogger("good_tests") + self.args = [] + self.handle = cc_rh_subscription.handle + + self.config = {'rh_subscription': + {'username': 'scooby@do.com', + 'password': 'scooby-snacks' + }} + self.config_full = {'rh_subscription': + {'username': 'scooby@do.com', + 'password': 'scooby-snacks', + 'auto-attach': True, + 'service-level': 'self-support', + 'add-pool': ['pool1', 'pool2', 'pool3'], + 'enable-repo': ['repo1', 'repo2', 'repo3'], + 'disable-repo': ['repo4', 'repo5'] + }} + + def test_already_registered(self, m_sman_cli): + ''' + Emulates a system that is already registered. Ensure it gets + a non-ProcessExecution error from is_registered() + ''' + self.handle(self.name, self.config, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 1) + self.assertIn('System is already registered', self.logs.getvalue()) + + def test_simple_registration(self, m_sman_cli): + ''' + Simple registration with username and password + ''' + reg = "The system has been registered with ID:" \ + " 12345678-abde-abcde-1234-1234567890abc" + m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')] + self.handle(self.name, self.config, self.cloud_init, + self.log, self.args) + self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list) + self.assertIn(mock.call(['register', '--username=scooby@do.com', + '--password=scooby-snacks'], + logstring_val=True), + m_sman_cli.call_args_list) + self.assertIn('rh_subscription plugin completed successfully', + self.logs.getvalue()) + self.assertEqual(m_sman_cli.call_count, 2) + + @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos") + def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): + cfg = copy.deepcopy(self.config) + m_get_repos.return_value = ([], ['repo1']) + cfg['rh_subscription'].update( + {'enable-repo': ['repo1'], 'disable-repo': None}) + mysm = cc_rh_subscription.SubscriptionManager(cfg) + self.assertEqual(True, mysm.update_repos()) + m_get_repos.assert_called_with() + self.assertEqual(m_sman_cli.call_args_list, + [mock.call(['repos', '--enable=repo1'])]) + + def test_full_registration(self, m_sman_cli): + ''' + Registration with auto-attach, service-level, adding pools, + and enabling and disabling yum repos + ''' + call_lists = [] + call_lists.append(['attach', '--pool=pool1', '--pool=pool3']) + call_lists.append(['repos', '--disable=repo5', '--enable=repo2', + '--enable=repo3']) + call_lists.append(['attach', '--auto', '--servicelevel=self-support']) + reg = "The system has been registered with ID:" \ + " 12345678-abde-abcde-1234-1234567890abc" + m_sman_cli.side_effect = [ + subp.ProcessExecutionError, + (reg, 'bar'), + ('Service level set to: self-support', ''), + ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), + ('Repo ID: repo1\nRepo ID: repo5\n', ''), + ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''), + ('', '')] + self.handle(self.name, self.config_full, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 9) + for call in call_lists: + self.assertIn(mock.call(call), m_sman_cli.call_args_list) + self.assertIn("rh_subscription plugin completed successfully", + self.logs.getvalue()) + + +@mock.patch(SUB_MAN_CLI) +class TestBadInput(CiTestCase): + with_logs = True + name = "cc_rh_subscription" + cloud_init = None + log = logging.getLogger("bad_tests") + args = [] + SM = cc_rh_subscription.SubscriptionManager + reg = "The system has been registered with ID:" \ + " 12345678-abde-abcde-1234-1234567890abc" + + config_no_password = {'rh_subscription': + {'username': 'scooby@do.com' + }} + + config_no_key = {'rh_subscription': + {'activation-key': '1234abcde', + }} + + config_service = {'rh_subscription': + {'username': 'scooby@do.com', + 'password': 'scooby-snacks', + 'service-level': 'self-support' + }} + + config_badpool = {'rh_subscription': + {'username': 'scooby@do.com', + 'password': 'scooby-snacks', + 'add-pool': 'not_a_list' + }} + config_badrepo = {'rh_subscription': + {'username': 'scooby@do.com', + 'password': 'scooby-snacks', + 'enable-repo': 'not_a_list' + }} + config_badkey = {'rh_subscription': + {'activation-key': 'abcdef1234', + 'fookey': 'bar', + 'org': '123', + }} + + def setUp(self): + super(TestBadInput, self).setUp() + self.handle = cc_rh_subscription.handle + + def assert_logged_warnings(self, warnings): + logs = self.logs.getvalue() + missing = [w for w in warnings if "WARNING: " + w not in logs] + self.assertEqual([], missing, "Missing expected warnings.") + + def test_no_password(self, m_sman_cli): + '''Attempt to register without the password key/value.''' + m_sman_cli.side_effect = [subp.ProcessExecutionError, + (self.reg, 'bar')] + self.handle(self.name, self.config_no_password, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 0) + + def test_no_org(self, m_sman_cli): + '''Attempt to register without the org key/value.''' + m_sman_cli.side_effect = [subp.ProcessExecutionError] + self.handle(self.name, self.config_no_key, self.cloud_init, + self.log, self.args) + m_sman_cli.assert_called_with(['identity']) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'Unable to register system due to incomplete information.', + 'Use either activationkey and org *or* userid and password', + 'Registration failed or did not run completely', + 'rh_subscription plugin did not complete successfully')) + + def test_service_level_without_auto(self, m_sman_cli): + '''Attempt to register using service-level without auto-attach key.''' + m_sman_cli.side_effect = [subp.ProcessExecutionError, + (self.reg, 'bar')] + self.handle(self.name, self.config_service, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'The service-level key must be used in conjunction with ', + 'rh_subscription plugin did not complete successfully')) + + def test_pool_not_a_list(self, m_sman_cli): + ''' + Register with pools that are not in the format of a list + ''' + m_sman_cli.side_effect = [subp.ProcessExecutionError, + (self.reg, 'bar')] + self.handle(self.name, self.config_badpool, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 2) + self.assert_logged_warnings(( + 'Pools must in the format of a list', + 'rh_subscription plugin did not complete successfully')) + + def test_repo_not_a_list(self, m_sman_cli): + ''' + Register with repos that are not in the format of a list + ''' + m_sman_cli.side_effect = [subp.ProcessExecutionError, + (self.reg, 'bar')] + self.handle(self.name, self.config_badrepo, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 2) + self.assert_logged_warnings(( + 'Repo IDs must in the format of a list.', + 'Unable to add or remove repos', + 'rh_subscription plugin did not complete successfully')) + + def test_bad_key_value(self, m_sman_cli): + ''' + Attempt to register with a key that we don't know + ''' + m_sman_cli.side_effect = [subp.ProcessExecutionError, + (self.reg, 'bar')] + self.handle(self.name, self.config_badkey, self.cloud_init, + self.log, self.args) + self.assertEqual(m_sman_cli.call_count, 1) + self.assert_logged_warnings(( + 'fookey is not a valid key for rh_subscription. Valid keys are:', + 'rh_subscription plugin did not complete successfully')) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_rsyslog.py b/tests/unittests/config/test_cc_rsyslog.py new file mode 100644 index 00000000..bc147dac --- /dev/null +++ b/tests/unittests/config/test_cc_rsyslog.py @@ -0,0 +1,178 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import shutil +import tempfile + +from cloudinit.config.cc_rsyslog import ( + apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config, + parse_remotes_line, remotes_to_rsyslog_cfg) +from cloudinit import util + +from tests.unittests import helpers as t_help + + +class TestLoadConfig(t_help.TestCase): + def setUp(self): + super(TestLoadConfig, self).setUp() + self.basecfg = { + 'config_filename': DEF_FILENAME, + 'config_dir': DEF_DIR, + 'service_reload_command': DEF_RELOAD, + 'configs': [], + 'remotes': {}, + } + + def test_legacy_full(self): + found = load_config({ + 'rsyslog': ['*.* @192.168.1.1'], + 'rsyslog_dir': "mydir", + 'rsyslog_filename': "myfilename"}) + self.basecfg.update({ + 'configs': ['*.* @192.168.1.1'], + 'config_dir': "mydir", + 'config_filename': 'myfilename', + 'service_reload_command': 'auto'} + ) + + self.assertEqual(found, self.basecfg) + + def test_legacy_defaults(self): + found = load_config({ + 'rsyslog': ['*.* @192.168.1.1']}) + self.basecfg.update({ + 'configs': ['*.* @192.168.1.1']}) + self.assertEqual(found, self.basecfg) + + def test_new_defaults(self): + self.assertEqual(load_config({}), self.basecfg) + + def test_new_configs(self): + cfgs = ['*.* myhost', '*.* my2host'] + self.basecfg.update({'configs': cfgs}) + self.assertEqual( + load_config({'rsyslog': {'configs': cfgs}}), + self.basecfg) + + +class TestApplyChanges(t_help.TestCase): + def setUp(self): + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + def test_simple(self): + cfgline = "*.* foohost" + changed = apply_rsyslog_changes( + configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp) + + fname = os.path.join(self.tmp, "foo.cfg") + self.assertEqual([fname], changed) + self.assertEqual( + util.load_file(fname), cfgline + "\n") + + def test_multiple_files(self): + configs = [ + '*.* foohost', + {'content': 'abc', 'filename': 'my.cfg'}, + {'content': 'filefoo-content', + 'filename': os.path.join(self.tmp, 'mydir/mycfg')}, + ] + + changed = apply_rsyslog_changes( + configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) + + expected = [ + (os.path.join(self.tmp, "default.cfg"), + "*.* foohost\n"), + (os.path.join(self.tmp, "my.cfg"), "abc\n"), + (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"), + ] + self.assertEqual([f[0] for f in expected], changed) + actual = [] + for fname, _content in expected: + util.load_file(fname) + actual.append((fname, util.load_file(fname),)) + self.assertEqual(expected, actual) + + def test_repeat_def(self): + configs = ['*.* foohost', "*.warn otherhost"] + + changed = apply_rsyslog_changes( + configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) + + fname = os.path.join(self.tmp, "default.cfg") + self.assertEqual([fname], changed) + + expected_content = '\n'.join([c for c in configs]) + '\n' + found_content = util.load_file(fname) + self.assertEqual(expected_content, found_content) + + def test_multiline_content(self): + configs = ['line1', 'line2\nline3\n'] + + apply_rsyslog_changes( + configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) + + fname = os.path.join(self.tmp, "default.cfg") + expected_content = '\n'.join([c for c in configs]) + found_content = util.load_file(fname) + self.assertEqual(expected_content, found_content) + + +class TestParseRemotesLine(t_help.TestCase): + def test_valid_port(self): + r = parse_remotes_line("foo:9") + self.assertEqual(9, r.port) + + def test_invalid_port(self): + with self.assertRaises(ValueError): + parse_remotes_line("*.* foo:abc") + + def test_valid_ipv6(self): + r = parse_remotes_line("*.* [::1]") + self.assertEqual("*.* @[::1]", str(r)) + + def test_valid_ipv6_with_port(self): + r = parse_remotes_line("*.* [::1]:100") + self.assertEqual(r.port, 100) + self.assertEqual(r.addr, "::1") + self.assertEqual("*.* @[::1]:100", str(r)) + + def test_invalid_multiple_colon(self): + with self.assertRaises(ValueError): + parse_remotes_line("*.* ::1:100") + + def test_name_in_string(self): + r = parse_remotes_line("syslog.host", name="foobar") + self.assertEqual("*.* @syslog.host # foobar", str(r)) + + +class TestRemotesToSyslog(t_help.TestCase): + def test_simple(self): + # str rendered line must appear in remotes_to_ryslog_cfg return + mycfg = "*.* myhost" + myline = str(parse_remotes_line(mycfg, name="myname")) + r = remotes_to_rsyslog_cfg({'myname': mycfg}) + lines = r.splitlines() + self.assertEqual(1, len(lines)) + self.assertTrue(myline in r.splitlines()) + + def test_header_footer(self): + header = "#foo head" + footer = "#foo foot" + r = remotes_to_rsyslog_cfg( + {'myname': "*.* myhost"}, header=header, footer=footer) + lines = r.splitlines() + self.assertTrue(header, lines[0]) + self.assertTrue(footer, lines[-1]) + + def test_with_empty_or_null(self): + mycfg = "*.* myhost" + myline = str(parse_remotes_line(mycfg, name="myname")) + r = remotes_to_rsyslog_cfg( + {'myname': mycfg, 'removed': None, 'removed2': ""}) + lines = r.splitlines() + self.assertEqual(1, len(lines)) + self.assertTrue(myline in r.splitlines()) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_runcmd.py b/tests/unittests/config/test_cc_runcmd.py new file mode 100644 index 00000000..01de6af0 --- /dev/null +++ b/tests/unittests/config/test_cc_runcmd.py @@ -0,0 +1,129 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +import os +import stat +from unittest.mock import patch + +from cloudinit.config.cc_runcmd import handle, schema +from cloudinit import (helpers, subp, util) +from tests.unittests.helpers import ( + CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, + skipUnlessJsonSchema) + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +class TestRuncmd(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestRuncmd, self).setUp() + self.subp = subp.subp + self.new_root = self.tmp_dir() + self.patchUtils(self.new_root) + self.paths = helpers.Paths({'scripts': self.new_root}) + + def test_handler_skip_if_no_runcmd(self): + """When the provided config doesn't contain runcmd, skip it.""" + cfg = {} + mycloud = get_cloud(paths=self.paths) + handle('notimportant', cfg, mycloud, LOG, None) + self.assertIn( + "Skipping module named notimportant, no 'runcmd' key", + self.logs.getvalue()) + + @patch('cloudinit.util.shellify') + def test_runcmd_shellify_fails(self, cls): + """When shellify fails throw exception""" + cls.side_effect = TypeError("patched shellify") + valid_config = {'runcmd': ['echo 42']} + cc = get_cloud(paths=self.paths) + with self.assertRaises(TypeError) as cm: + with self.allow_subp(['/bin/sh']): + handle('cc_runcmd', valid_config, cc, LOG, None) + self.assertIn("Failed to shellify", str(cm.exception)) + + def test_handler_invalid_command_set(self): + """Commands which can't be converted to shell will raise errors.""" + invalid_config = {'runcmd': 1} + cc = get_cloud(paths=self.paths) + with self.assertRaises(TypeError) as cm: + handle('cc_runcmd', invalid_config, cc, LOG, []) + self.assertIn( + 'Failed to shellify 1 into file' + ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd', + str(cm.exception)) + + @skipUnlessJsonSchema() + def test_handler_schema_validation_warns_non_array_type(self): + """Schema validation warns of non-array type for runcmd key. + + Schema validation is not strict, so runcmd attempts to shellify the + invalid content. + """ + invalid_config = {'runcmd': 1} + cc = get_cloud(paths=self.paths) + with self.assertRaises(TypeError) as cm: + handle('cc_runcmd', invalid_config, cc, LOG, []) + self.assertIn( + 'Invalid config:\nruncmd: 1 is not of type \'array\'', + self.logs.getvalue()) + self.assertIn('Failed to shellify', str(cm.exception)) + + @skipUnlessJsonSchema() + def test_handler_schema_validation_warns_non_array_item_type(self): + """Schema validation warns of non-array or string runcmd items. + + Schema validation is not strict, so runcmd attempts to shellify the + invalid content. + """ + invalid_config = { + 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} + cc = get_cloud(paths=self.paths) + with self.assertRaises(TypeError) as cm: + handle('cc_runcmd', invalid_config, cc, LOG, []) + expected_warnings = [ + 'runcmd.1: 20 is not valid under any of the given schemas', + 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given' + ' schema' + ] + logs = self.logs.getvalue() + for warning in expected_warnings: + self.assertIn(warning, logs) + self.assertIn('Failed to shellify', str(cm.exception)) + + def test_handler_write_valid_runcmd_schema_to_file(self): + """Valid runcmd schema is written to a runcmd shell script.""" + valid_config = {'runcmd': [['ls', '/']]} + cc = get_cloud(paths=self.paths) + handle('cc_runcmd', valid_config, cc, LOG, []) + runcmd_file = os.path.join( + self.new_root, + 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd') + self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file)) + file_stat = os.stat(runcmd_file) + self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) + + +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): + """Directly test schema rather than through handle.""" + + schema = schema + + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + [["echo", "bye"], ["echo", "bye"]], + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + ["echo bye", "echo bye"], + "command entries can be duplicate.") + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_seed_random.py b/tests/unittests/config/test_cc_seed_random.py new file mode 100644 index 00000000..cfd67dce --- /dev/null +++ b/tests/unittests/config/test_cc_seed_random.py @@ -0,0 +1,205 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Author: Juerg Haefliger +# +# Based on test_handler_set_hostname.py +# +# This file is part of cloud-init. See LICENSE file for license information. +import gzip +import logging +import tempfile +from io import BytesIO + +from cloudinit import subp +from cloudinit import util +from cloudinit.config import cc_seed_random +from tests.unittests import helpers as t_help + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +class TestRandomSeed(t_help.TestCase): + def setUp(self): + super(TestRandomSeed, self).setUp() + self._seed_file = tempfile.mktemp() + self.unapply = [] + + # by default 'which' has nothing in its path + self.apply_patches([(subp, 'which', self._which)]) + self.apply_patches([(subp, 'subp', self._subp)]) + self.subp_called = [] + self.whichdata = {} + + def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) + util.del_file(self._seed_file) + + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _which(self, program): + return self.whichdata.get(program) + + def _subp(self, *args, **kwargs): + # supports subp calling with cmd as args or kwargs + if 'args' not in kwargs: + kwargs['args'] = args[0] + self.subp_called.append(kwargs) + return + + def _compress(self, text): + contents = BytesIO() + gz_fh = gzip.GzipFile(mode='wb', fileobj=contents) + gz_fh.write(text) + gz_fh.close() + return contents.getvalue() + + def test_append_random(self): + cfg = { + 'random_seed': { + 'file': self._seed_file, + 'data': 'tiny-tim-was-here', + } + } + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + contents = util.load_file(self._seed_file) + self.assertEqual("tiny-tim-was-here", contents) + + def test_append_random_unknown_encoding(self): + data = self._compress(b"tiny-toe") + cfg = { + 'random_seed': { + 'file': self._seed_file, + 'data': data, + 'encoding': 'special_encoding', + } + } + self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg, + get_cloud('ubuntu'), LOG, []) + + def test_append_random_gzip(self): + data = self._compress(b"tiny-toe") + cfg = { + 'random_seed': { + 'file': self._seed_file, + 'data': data, + 'encoding': 'gzip', + } + } + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + contents = util.load_file(self._seed_file) + self.assertEqual("tiny-toe", contents) + + def test_append_random_gz(self): + data = self._compress(b"big-toe") + cfg = { + 'random_seed': { + 'file': self._seed_file, + 'data': data, + 'encoding': 'gz', + } + } + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + contents = util.load_file(self._seed_file) + self.assertEqual("big-toe", contents) + + def test_append_random_base64(self): + data = util.b64e('bubbles') + cfg = { + 'random_seed': { + 'file': self._seed_file, + 'data': data, + 'encoding': 'base64', + } + } + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + contents = util.load_file(self._seed_file) + self.assertEqual("bubbles", contents) + + def test_append_random_b64(self): + data = util.b64e('kit-kat') + cfg = { + 'random_seed': { + 'file': self._seed_file, + 'data': data, + 'encoding': 'b64', + } + } + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) + contents = util.load_file(self._seed_file) + self.assertEqual("kit-kat", contents) + + def test_append_random_metadata(self): + cfg = { + 'random_seed': { + 'file': self._seed_file, + 'data': 'tiny-tim-was-here', + } + } + c = get_cloud('ubuntu', metadata={'random_seed': '-so-was-josh'}) + cc_seed_random.handle('test', cfg, c, LOG, []) + contents = util.load_file(self._seed_file) + self.assertEqual('tiny-tim-was-here-so-was-josh', contents) + + def test_seed_command_provided_and_available(self): + c = get_cloud('ubuntu') + self.whichdata = {'pollinate': '/usr/bin/pollinate'} + cfg = {'random_seed': {'command': ['pollinate', '-q']}} + cc_seed_random.handle('test', cfg, c, LOG, []) + + subp_args = [f['args'] for f in self.subp_called] + self.assertIn(['pollinate', '-q'], subp_args) + + def test_seed_command_not_provided(self): + c = get_cloud('ubuntu') + self.whichdata = {} + cc_seed_random.handle('test', {}, c, LOG, []) + + # subp should not have been called as which would say not available + self.assertFalse(self.subp_called) + + def test_unavailable_seed_command_and_required_raises_error(self): + c = get_cloud('ubuntu') + self.whichdata = {} + cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'], + 'command_required': True}} + self.assertRaises(ValueError, cc_seed_random.handle, + 'test', cfg, c, LOG, []) + + def test_seed_command_and_required(self): + c = get_cloud('ubuntu') + self.whichdata = {'foo': 'foo'} + cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} + cc_seed_random.handle('test', cfg, c, LOG, []) + + self.assertIn(['foo'], [f['args'] for f in self.subp_called]) + + def test_file_in_environment_for_command(self): + c = get_cloud('ubuntu') + self.whichdata = {'foo': 'foo'} + cfg = {'random_seed': {'command_required': True, 'command': ['foo'], + 'file': self._seed_file}} + cc_seed_random.handle('test', cfg, c, LOG, []) + + # this just instists that the first time subp was called, + # RANDOM_SEED_FILE was in the environment set up correctly + subp_env = [f['env'] for f in self.subp_called] + self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file) + + +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py new file mode 100644 index 00000000..b9a783a7 --- /dev/null +++ b/tests/unittests/config/test_cc_set_hostname.py @@ -0,0 +1,207 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_set_hostname + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from tests.unittests import helpers as t_help + +from configobj import ConfigObj +import logging +import os +import shutil +import tempfile +from io import BytesIO +from unittest import mock + +LOG = logging.getLogger(__name__) + + +class TestHostname(t_help.FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestHostname, self).setUp() + self.tmp = tempfile.mkdtemp() + util.ensure_dir(os.path.join(self.tmp, 'data')) + self.addCleanup(shutil.rmtree, self.tmp) + + def _fetch_distro(self, kind, conf=None): + cls = distros.fetch(kind) + paths = helpers.Paths({'cloud_dir': self.tmp}) + conf = {} if conf is None else conf + return cls(kind, conf, paths) + + def test_debian_write_hostname_prefer_fqdn(self): + cfg = { + 'hostname': 'blah', + 'prefer_fqdn_over_hostname': True, + 'fqdn': 'blah.yahoo.com', + } + distro = self._fetch_distro('debian', cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('blah.yahoo.com', contents.strip()) + + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd): + cfg = { + 'hostname': 'blah', + 'prefer_fqdn_over_hostname': False, + 'fqdn': 'blah.yahoo.com', + } + distro = self._fetch_distro('rhel', cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/sysconfig/network", decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual( + {'HOSTNAME': 'blah'}, + dict(n_cfg)) + + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_write_hostname_rhel(self, m_uses_systemd): + cfg = { + 'hostname': 'blah', + 'fqdn': 'blah.blah.blah.yahoo.com' + } + distro = self._fetch_distro('rhel') + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/sysconfig/network", decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual( + {'HOSTNAME': 'blah.blah.blah.yahoo.com'}, + dict(n_cfg)) + + def test_write_hostname_debian(self): + cfg = { + 'hostname': 'blah', + 'fqdn': 'blah.blah.blah.yahoo.com', + } + distro = self._fetch_distro('debian') + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('blah', contents.strip()) + + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_write_hostname_sles(self, m_uses_systemd): + cfg = { + 'hostname': 'blah.blah.blah.suse.com', + } + distro = self._fetch_distro('sles') + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) + contents = util.load_file(distro.hostname_conf_fn) + self.assertEqual('blah', contents.strip()) + + @mock.patch('cloudinit.distros.photon.subp.subp') + def test_photon_hostname(self, m_subp): + cfg1 = { + 'hostname': 'photon', + 'prefer_fqdn_over_hostname': True, + 'fqdn': 'test1.vmware.com', + } + cfg2 = { + 'hostname': 'photon', + 'prefer_fqdn_over_hostname': False, + 'fqdn': 'test2.vmware.com', + } + + ds = None + m_subp.return_value = (None, None) + distro = self._fetch_distro('photon', cfg1) + paths = helpers.Paths({'cloud_dir': self.tmp}) + cc = cloud.Cloud(ds, paths, {}, distro, None) + for c in [cfg1, cfg2]: + cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, []) + print("\n", m_subp.call_args_list) + if c['prefer_fqdn_over_hostname']: + assert [ + mock.call(['hostnamectl', 'set-hostname', c['fqdn']], + capture=True) + ] in m_subp.call_args_list + assert [ + mock.call(['hostnamectl', 'set-hostname', c['hostname']], + capture=True) + ] not in m_subp.call_args_list + else: + assert [ + mock.call(['hostnamectl', 'set-hostname', c['hostname']], + capture=True) + ] in m_subp.call_args_list + assert [ + mock.call(['hostnamectl', 'set-hostname', c['fqdn']], + capture=True) + ] not in m_subp.call_args_list + + def test_multiple_calls_skips_unchanged_hostname(self): + """Only new hostname or fqdn values will generate a hostname call.""" + distro = self._fetch_distro('debian') + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('hostname1', contents.strip()) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + self.assertIn( + 'DEBUG: No hostname changes. Skipping set-hostname\n', + self.logs.getvalue()) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('hostname2', contents.strip()) + self.assertIn( + 'Non-persistently setting the system hostname to hostname2', + self.logs.getvalue()) + + def test_error_on_distro_set_hostname_errors(self): + """Raise SetHostnameError on exceptions from distro.set_hostname.""" + distro = self._fetch_distro('debian') + + def set_hostname_error(hostname, fqdn): + raise Exception("OOPS on: %s" % fqdn) + + distro.set_hostname = set_hostname_error + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr: + cc_set_hostname.handle( + 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + self.assertEqual( + 'Failed to set the hostname to hostname1.me.com (hostname1):' + ' OOPS on: hostname1.me.com', + str(ctx_mgr.exception)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py new file mode 100644 index 00000000..9bcd0439 --- /dev/null +++ b/tests/unittests/config/test_cc_set_passwords.py @@ -0,0 +1,162 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from unittest import mock + +from cloudinit.config import cc_set_passwords as setpass +from tests.unittests.helpers import CiTestCase +from cloudinit import util + +MODPATH = "cloudinit.config.cc_set_passwords." + + +class TestHandleSshPwauth(CiTestCase): + """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth.""" + + with_logs = True + + @mock.patch("cloudinit.distros.subp.subp") + def test_unknown_value_logs_warning(self, m_subp): + cloud = self.tmp_cloud(distro='ubuntu') + setpass.handle_ssh_pwauth("floo", cloud.distro) + self.assertIn("Unrecognized value: ssh_pwauth=floo", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch("cloudinit.distros.subp.subp") + def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): + """If systemctl in service cmd: systemctl restart name.""" + cloud = self.tmp_cloud(distro='ubuntu') + cloud.distro.init_cmd = ['systemctl'] + setpass.handle_ssh_pwauth(True, cloud.distro) + m_subp.assert_called_with( + ["systemctl", "restart", "ssh"], capture=True) + + @mock.patch(MODPATH + "update_ssh_config", return_value=False) + @mock.patch("cloudinit.distros.subp.subp") + def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): + """If config is not updated, then no system restart should be done.""" + cloud = self.tmp_cloud(distro='ubuntu') + setpass.handle_ssh_pwauth(True, cloud.distro) + m_subp.assert_not_called() + self.assertIn("No need to restart SSH", self.logs.getvalue()) + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch("cloudinit.distros.subp.subp") + def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): + """If 'unchanged', then no updates to config and no restart.""" + cloud = self.tmp_cloud(distro='ubuntu') + setpass.handle_ssh_pwauth("unchanged", cloud.distro) + m_update_ssh_config.assert_not_called() + m_subp.assert_not_called() + + @mock.patch("cloudinit.distros.subp.subp") + def test_valid_change_values(self, m_subp): + """If value is a valid changen value, then update should be called.""" + cloud = self.tmp_cloud(distro='ubuntu') + upname = MODPATH + "update_ssh_config" + optname = "PasswordAuthentication" + for value in util.FALSE_STRINGS + util.TRUE_STRINGS: + optval = "yes" if value in util.TRUE_STRINGS else "no" + with mock.patch(upname, return_value=False) as m_update: + setpass.handle_ssh_pwauth(value, cloud.distro) + m_update.assert_called_with({optname: optval}) + m_subp.assert_not_called() + + +class TestSetPasswordsHandle(CiTestCase): + """Test cc_set_passwords.handle""" + + with_logs = True + + def test_handle_on_empty_config(self, *args): + """handle logs that no password has changed when config is empty.""" + cloud = self.tmp_cloud(distro='ubuntu') + setpass.handle( + 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) + self.assertEqual( + "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. " + 'ssh_pwauth=None\n', + self.logs.getvalue()) + + def test_handle_on_chpasswd_list_parses_common_hashes(self): + """handle parses command password hashes.""" + cloud = self.tmp_cloud(distro='ubuntu') + valid_hashed_pwds = [ + 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/' + 'Dlew1Va', + 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' + 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] + cfg = {'chpasswd': {'list': valid_hashed_pwds}} + with mock.patch.object(setpass, 'chpasswd') as chpasswd: + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) + self.assertIn( + "DEBUG: Setting hashed password for ['root', 'ubuntu']", + self.logs.getvalue()) + valid = '\n'.join(valid_hashed_pwds) + '\n' + called = chpasswd.call_args[0][1] + self.assertEqual(valid, called) + + @mock.patch(MODPATH + "util.is_BSD") + @mock.patch(MODPATH + "subp.subp") + def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords( + self, m_subp, m_is_bsd): + """BSD don't use chpasswd""" + m_is_bsd.return_value = True + cloud = self.tmp_cloud(distro='freebsd') + valid_pwds = ['ubuntu:passw0rd'] + cfg = {'chpasswd': {'list': valid_pwds}} + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertEqual([ + mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd', + logstring="chpasswd for ubuntu"), + mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], + m_subp.call_args_list) + + @mock.patch(MODPATH + "util.multi_log") + @mock.patch(MODPATH + "subp.subp") + def test_handle_on_chpasswd_list_creates_random_passwords( + self, m_subp, m_multi_log + ): + """handle parses command set random passwords.""" + cloud = self.tmp_cloud(distro='ubuntu') + valid_random_pwds = [ + 'root:R', + 'ubuntu:RANDOM'] + cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}} + with mock.patch.object(setpass, 'chpasswd') as chpasswd: + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) + self.assertEqual(1, chpasswd.call_count) + passwords, _ = chpasswd.call_args + user_pass = { + user: password + for user, password + in (line.split(":") for line in passwords[1].splitlines()) + } + + self.assertEqual(1, m_multi_log.call_count) + self.assertEqual( + mock.call(mock.ANY, stderr=False, fallback_to_stdout=False), + m_multi_log.call_args + ) + + self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys())) + written_lines = m_multi_log.call_args[0][0].splitlines() + for password in user_pass.values(): + for line in written_lines: + if password in line: + break + else: + self.fail("Password not emitted to console") + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_snap.py b/tests/unittests/config/test_cc_snap.py new file mode 100644 index 00000000..e8113eca --- /dev/null +++ b/tests/unittests/config/test_cc_snap.py @@ -0,0 +1,564 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re +from io import StringIO + +from cloudinit.config.cc_snap import ( + ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, + run_commands, schema) +from cloudinit.config.schema import validate_cloudconfig_schema +from cloudinit import util +from tests.unittests.helpers import ( + CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) + + +SYSTEM_USER_ASSERTION = """\ +type: system-user +authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp +brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp +email: foo@bar.com +password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt +series: +- 16 +since: 2016-09-10T16:34:00+03:00 +until: 2017-11-10T16:34:00+03:00 +username: baz +sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj + +AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP +Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI +zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF +s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj ++to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP +Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS +d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q +BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H +f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V +v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""" + +ACCOUNT_ASSERTION = """\ +type: account-key +authority-id: canonical +revision: 2 +public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 +account-id: canonical +name: store +since: 2016-04-01T00:00:00.0Z +body-length: 717 +sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH + +AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j +qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 +vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ +UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK +Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG +o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl +VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 +2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an +Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc +vUvV7RjVzv17ut0AEQEAAQ== + +AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM +WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b +nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL +3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL +eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY +inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 +rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ +rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE +aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ +6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO +haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF +yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 +HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi +skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK +CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde +ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF +qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR +IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t +oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""" + + +class FakeCloud(object): + def __init__(self, distro): + self.distro = distro + + +class TestAddAssertions(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestAddAssertions, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.subp.subp') + def test_add_assertions_on_empty_list(self, m_subp): + """When provided with an empty list, add_assertions does nothing.""" + add_assertions([]) + self.assertEqual('', self.logs.getvalue()) + m_subp.assert_not_called() + + def test_add_assertions_on_non_list_or_dict(self): + """When provided an invalid type, add_assertions raises an error.""" + with self.assertRaises(TypeError) as context_manager: + add_assertions(assertions="I'm Not Valid") + self.assertEqual( + "assertion parameter was not a list or dict: I'm Not Valid", + str(context_manager.exception)) + + @mock.patch('cloudinit.config.cc_snap.subp.subp') + def test_add_assertions_adds_assertions_as_list(self, m_subp): + """When provided with a list, add_assertions adds all assertions.""" + self.assertEqual( + ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION] + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + add_assertions, assertions) + self.assertIn( + 'Importing user-provided snap assertions', self.logs.getvalue()) + self.assertIn( + 'sertions', self.logs.getvalue()) + self.assertEqual( + [mock.call(['snap', 'ack', assert_file], capture=True)], + m_subp.call_args_list) + compare_file = self.tmp_path('comparison', dir=self.tmp) + util.write_file(compare_file, '\n'.join(assertions).encode('utf-8')) + self.assertEqual( + util.load_file(compare_file), util.load_file(assert_file)) + + @mock.patch('cloudinit.config.cc_snap.subp.subp') + def test_add_assertions_adds_assertions_as_dict(self, m_subp): + """When provided with a dict, add_assertions adds all assertions.""" + self.assertEqual( + ASSERTIONS_FILE, '/var/lib/cloud/instance/snapd.assertions') + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + assertions = {'00': SYSTEM_USER_ASSERTION, '01': ACCOUNT_ASSERTION} + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + add_assertions, assertions) + self.assertIn( + 'Importing user-provided snap assertions', self.logs.getvalue()) + self.assertIn( + "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv", + self.logs.getvalue()) + self.assertIn( + "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic", + self.logs.getvalue()) + self.assertEqual( + [mock.call(['snap', 'ack', assert_file], capture=True)], + m_subp.call_args_list) + compare_file = self.tmp_path('comparison', dir=self.tmp) + combined = '\n'.join(assertions.values()) + util.write_file(compare_file, combined.encode('utf-8')) + self.assertEqual( + util.load_file(compare_file), util.load_file(assert_file)) + + +class TestRunCommands(CiTestCase): + + with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] + + def setUp(self): + super(TestRunCommands, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.subp.subp') + def test_run_commands_on_empty_list(self, m_subp): + """When provided with an empty list, run_commands does nothing.""" + run_commands([]) + self.assertEqual('', self.logs.getvalue()) + m_subp.assert_not_called() + + def test_run_commands_on_non_list_or_dict(self): + """When provided an invalid type, run_commands raises an error.""" + with self.assertRaises(TypeError) as context_manager: + run_commands(commands="I'm Not Valid") + self.assertEqual( + "commands parameter was not a list or dict: I'm Not Valid", + str(context_manager.exception)) + + def test_run_command_logs_commands_and_exit_codes_to_stderr(self): + """All exit codes are logged to stderr.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + + cmd1 = 'echo "HI" >> %s' % outfile + cmd2 = 'bogus command' + cmd3 = 'echo "MOM" >> %s' % outfile + commands = [cmd1, cmd2, cmd3] + + mock_path = 'cloudinit.config.cc_snap.sys.stderr' + with mock.patch(mock_path, new_callable=StringIO) as m_stderr: + with self.assertRaises(RuntimeError) as context_manager: + run_commands(commands=commands) + + self.assertIsNotNone( + re.search(r'bogus: (command )?not found', + str(context_manager.exception)), + msg='Expected bogus command not found') + expected_stderr_log = '\n'.join([ + 'Begin run command: {cmd}'.format(cmd=cmd1), + 'End run command: exit(0)', + 'Begin run command: {cmd}'.format(cmd=cmd2), + 'ERROR: End run command: exit(127)', + 'Begin run command: {cmd}'.format(cmd=cmd3), + 'End run command: exit(0)\n']) + self.assertEqual(expected_stderr_log, m_stderr.getvalue()) + + def test_run_command_as_lists(self): + """When commands are specified as a list, run them in order.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + + cmd1 = 'echo "HI" >> %s' % outfile + cmd2 = 'echo "MOM" >> %s' % outfile + commands = [cmd1, cmd2] + mock_path = 'cloudinit.config.cc_snap.sys.stderr' + with mock.patch(mock_path, new_callable=StringIO): + run_commands(commands=commands) + + self.assertIn( + 'DEBUG: Running user-provided snap commands', + self.logs.getvalue()) + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + self.assertIn( + 'WARNING: Non-snap commands in snap config:', self.logs.getvalue()) + + def test_run_command_dict_sorted_as_command_script(self): + """When commands are a dict, sort them and run.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + cmd1 = 'echo "HI" >> %s' % outfile + cmd2 = 'echo "MOM" >> %s' % outfile + commands = {'02': cmd1, '01': cmd2} + mock_path = 'cloudinit.config.cc_snap.sys.stderr' + with mock.patch(mock_path, new_callable=StringIO): + run_commands(commands=commands) + + expected_messages = [ + 'DEBUG: Running user-provided snap commands'] + for message in expected_messages: + self.assertIn(message, self.logs.getvalue()) + self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + + +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): + + with_logs = True + schema = schema + + def test_schema_warns_on_snap_not_as_dict(self): + """If the snap configuration is not a dict, emit a warning.""" + validate_cloudconfig_schema({'snap': 'wrong type'}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap: 'wrong type' is not of type" + " 'object'\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_disallows_unknown_keys(self, _): + """Unknown keys in the snap configuration emit warnings.""" + validate_cloudconfig_schema( + {'snap': {'commands': ['ls'], 'invalid-key': ''}}, schema) + self.assertIn( + 'WARNING: Invalid config:\nsnap: Additional properties are not' + " allowed ('invalid-key' was unexpected)", + self.logs.getvalue()) + + def test_warn_schema_requires_either_commands_or_assertions(self): + """Warn when snap configuration lacks both commands and assertions.""" + validate_cloudconfig_schema( + {'snap': {}}, schema) + self.assertIn( + 'WARNING: Invalid config:\nsnap: {} does not have enough' + ' properties', + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_warn_schema_commands_is_not_list_or_dict(self, _): + """Warn when snap:commands config is not a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'commands': 'broken'}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.commands: 'broken' is not of type" + " 'object', 'array'\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_warn_schema_when_commands_is_empty(self, _): + """Emit warnings when snap:commands is an empty list or dict.""" + validate_cloudconfig_schema( + {'snap': {'commands': []}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.commands: [] is too short\n" + "WARNING: Invalid config:\nsnap.commands: {} does not have enough" + " properties\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_commands_are_list_or_dict(self, _): + """No warnings when snap:commands are either a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'commands': ['valid']}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {'01': 'also valid'}}}, schema) + self.assertEqual('', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_commands_values_are_invalid_type(self, _): + """Warnings when snap:commands values are invalid type (e.g. int)""" + validate_cloudconfig_schema( + {'snap': {'commands': [123]}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {'01': 123}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\n" + "snap.commands.0: 123 is not valid under any of the given" + " schemas\n" + "WARNING: Invalid config:\n" + "snap.commands.01: 123 is not valid under any of the given" + " schemas\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_commands_list_values_are_invalid_type(self, _): + """Warnings when snap:commands list values are wrong type (e.g. int)""" + validate_cloudconfig_schema( + {'snap': {'commands': [["snap", "install", 123]]}}, schema) + validate_cloudconfig_schema( + {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\n" + "snap.commands.0: ['snap', 'install', 123] is not valid under any" + " of the given schemas\n", + "WARNING: Invalid config:\n" + "snap.commands.0: ['snap', 'install', 123] is not valid under any" + " of the given schemas\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.run_commands') + def test_schema_when_assertions_values_are_invalid_type(self, _): + """Warnings when snap:assertions values are invalid type (e.g. int)""" + validate_cloudconfig_schema( + {'snap': {'assertions': [123]}}, schema) + validate_cloudconfig_schema( + {'snap': {'assertions': {'01': 123}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\n" + "snap.assertions.0: 123 is not of type 'string'\n" + "WARNING: Invalid config:\n" + "snap.assertions.01: 123 is not of type 'string'\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.add_assertions') + def test_warn_schema_assertions_is_not_list_or_dict(self, _): + """Warn when snap:assertions config is not a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'assertions': 'broken'}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.assertions: 'broken' is not of" + " type 'object', 'array'\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.add_assertions') + def test_warn_schema_when_assertions_is_empty(self, _): + """Emit warnings when snap:assertions is an empty list or dict.""" + validate_cloudconfig_schema( + {'snap': {'assertions': []}}, schema) + validate_cloudconfig_schema( + {'snap': {'assertions': {}}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nsnap.assertions: [] is too short\n" + "WARNING: Invalid config:\nsnap.assertions: {} does not have" + " enough properties\n", + self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.add_assertions') + def test_schema_when_assertions_are_list_or_dict(self, _): + """No warnings when snap:assertions are a list or dict.""" + validate_cloudconfig_schema( + {'snap': {'assertions': ['valid']}}, schema) + validate_cloudconfig_schema( + {'snap': {'assertions': {'01': 'also valid'}}}, schema) + self.assertEqual('', self.logs.getvalue()) + + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + {'commands': [["echo", "bye"], ["echo", "bye"]]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + {'commands': ["echo bye", "echo bye"]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_array(self): + """Duplicated commands dict/array entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_string(self): + """Duplicated commands dict/string entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': "echo bye", '01': "echo bye"}}, + "command entries can be duplicate.") + + +class TestHandle(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestHandle, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch('cloudinit.config.cc_snap.add_assertions') + @mock.patch('cloudinit.config.cc_snap.validate_cloudconfig_schema') + def test_handle_no_config(self, m_schema, m_add, m_run): + """When no snap-related configuration is provided, nothing happens.""" + cfg = {} + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertIn( + "DEBUG: Skipping module named snap, no 'snap' key in config", + self.logs.getvalue()) + m_schema.assert_not_called() + m_add.assert_not_called() + m_run.assert_not_called() + + @mock.patch('cloudinit.config.cc_snap.run_commands') + @mock.patch('cloudinit.config.cc_snap.add_assertions') + @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') + def test_handle_skips_squashfuse_when_unconfigured(self, m_squash, m_add, + m_run): + """When squashfuse_in_container is unset, don't attempt to install.""" + handle( + 'snap', cfg={'snap': {}}, cloud=None, log=self.logger, args=None) + handle( + 'snap', cfg={'snap': {'squashfuse_in_container': None}}, + cloud=None, log=self.logger, args=None) + handle( + 'snap', cfg={'snap': {'squashfuse_in_container': False}}, + cloud=None, log=self.logger, args=None) + self.assertEqual([], m_squash.call_args_list) # No calls + # snap configuration missing assertions and commands will default to [] + self.assertIn(mock.call([]), m_add.call_args_list) + self.assertIn(mock.call([]), m_run.call_args_list) + + @mock.patch('cloudinit.config.cc_snap.maybe_install_squashfuse') + def test_handle_tries_to_install_squashfuse(self, m_squash): + """If squashfuse_in_container is True, try installing squashfuse.""" + cfg = {'snap': {'squashfuse_in_container': True}} + mycloud = FakeCloud(None) + handle('snap', cfg=cfg, cloud=mycloud, log=self.logger, args=None) + self.assertEqual( + [mock.call(mycloud)], m_squash.call_args_list) + + def test_handle_runs_commands_provided(self): + """If commands are specified as a list, run them.""" + outfile = self.tmp_path('output.log', dir=self.tmp) + + cfg = { + 'snap': {'commands': ['echo "HI" >> %s' % outfile, + 'echo "MOM" >> %s' % outfile]}} + mock_path = 'cloudinit.config.cc_snap.sys.stderr' + with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): + with mock.patch(mock_path, new_callable=StringIO): + handle('snap', cfg=cfg, cloud=None, log=self.logger, args=None) + + self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + + @mock.patch('cloudinit.config.cc_snap.subp.subp') + def test_handle_adds_assertions(self, m_subp): + """Any configured snap assertions are provided to add_assertions.""" + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + compare_file = self.tmp_path('comparison', dir=self.tmp) + cfg = { + 'snap': {'assertions': [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}} + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) + content = '\n'.join(cfg['snap']['assertions']) + util.write_file(compare_file, content.encode('utf-8')) + self.assertEqual( + util.load_file(compare_file), util.load_file(assert_file)) + + @mock.patch('cloudinit.config.cc_snap.subp.subp') + @skipUnlessJsonSchema() + def test_handle_validates_schema(self, m_subp): + """Any provided configuration is runs validate_cloudconfig_schema.""" + assert_file = self.tmp_path('snapd.assertions', dir=self.tmp) + cfg = {'snap': {'invalid': ''}} # Generates schema warning + wrap_and_call( + 'cloudinit.config.cc_snap', + {'ASSERTIONS_FILE': {'new': assert_file}}, + handle, 'snap', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + "WARNING: Invalid config:\nsnap: Additional properties are not" + " allowed ('invalid' was unexpected)\n", + self.logs.getvalue()) + + +class TestMaybeInstallSquashFuse(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestMaybeInstallSquashFuse, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_skips_non_containers(self, m_container): + """maybe_install_squashfuse does nothing when not on a container.""" + m_container.return_value = False + maybe_install_squashfuse(cloud=FakeCloud(None)) + self.assertEqual([mock.call()], m_container.call_args_list) + self.assertEqual('', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_raises_install_errors(self, m_container): + """maybe_install_squashfuse logs and raises package install errors.""" + m_container.return_value = True + distro = mock.MagicMock() + distro.update_package_sources.side_effect = RuntimeError( + 'Some apt error') + with self.assertRaises(RuntimeError) as context_manager: + maybe_install_squashfuse(cloud=FakeCloud(distro)) + self.assertEqual('Some apt error', str(context_manager.exception)) + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_raises_update_errors(self, m_container): + """maybe_install_squashfuse logs and raises package update errors.""" + m_container.return_value = True + distro = mock.MagicMock() + distro.update_package_sources.side_effect = RuntimeError( + 'Some apt error') + with self.assertRaises(RuntimeError) as context_manager: + maybe_install_squashfuse(cloud=FakeCloud(distro)) + self.assertEqual('Some apt error', str(context_manager.exception)) + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + + @mock.patch('cloudinit.config.cc_snap.util.is_container') + def test_maybe_install_squashfuse_happy_path(self, m_container): + """maybe_install_squashfuse logs and raises package install errors.""" + m_container.return_value = True + distro = mock.MagicMock() # No errors raised + maybe_install_squashfuse(cloud=FakeCloud(distro)) + self.assertEqual( + [mock.call()], distro.update_package_sources.call_args_list) + self.assertEqual( + [mock.call(['squashfuse'])], + distro.install_packages.call_args_list) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_spacewalk.py b/tests/unittests/config/test_cc_spacewalk.py new file mode 100644 index 00000000..96efccf0 --- /dev/null +++ b/tests/unittests/config/test_cc_spacewalk.py @@ -0,0 +1,42 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_spacewalk +from cloudinit import subp + +from tests.unittests import helpers + +import logging +from unittest import mock + +LOG = logging.getLogger(__name__) + + +class TestSpacewalk(helpers.TestCase): + space_cfg = { + 'spacewalk': { + 'server': 'localhost', + 'profile_name': 'test', + } + } + + @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") + def test_not_is_registered(self, mock_subp): + mock_subp.side_effect = subp.ProcessExecutionError(exit_code=1) + self.assertFalse(cc_spacewalk.is_registered()) + + @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") + def test_is_registered(self, mock_subp): + mock_subp.side_effect = None + self.assertTrue(cc_spacewalk.is_registered()) + + @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") + def test_do_register(self, mock_subp): + cc_spacewalk.do_register(**self.space_cfg['spacewalk']) + mock_subp.assert_called_with([ + 'rhnreg_ks', + '--serverUrl', 'https://localhost/XMLRPC', + '--profilename', 'test', + '--sslCACert', cc_spacewalk.def_ca_cert_path, + ], capture=False) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py new file mode 100644 index 00000000..ba179bbf --- /dev/null +++ b/tests/unittests/config/test_cc_ssh.py @@ -0,0 +1,405 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os.path + +from cloudinit.config import cc_ssh +from cloudinit import ssh_util +from tests.unittests.helpers import CiTestCase, mock +import logging + +LOG = logging.getLogger(__name__) + +MODPATH = "cloudinit.config.cc_ssh." +KEY_NAMES_NO_DSA = [name for name in cc_ssh.GENERATE_KEY_NAMES + if name not in 'dsa'] + + +@mock.patch(MODPATH + "ssh_util.setup_user_keys") +class TestHandleSsh(CiTestCase): + """Test cc_ssh handling of ssh config.""" + + def _publish_hostkey_test_setup(self): + self.test_hostkeys = { + 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), + 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), + 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), + 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), + } + self.test_hostkey_files = [] + hostkey_tmpdir = self.tmp_dir() + for key_type in cc_ssh.GENERATE_KEY_NAMES: + key_data = self.test_hostkeys[key_type] + filename = 'ssh_host_%s_key.pub' % key_type + filepath = os.path.join(hostkey_tmpdir, filename) + self.test_hostkey_files.append(filepath) + with open(filepath, 'w') as f: + f.write(' '.join(key_data)) + + cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') + + def test_apply_credentials_with_user(self, m_setup_keys): + """Apply keys for the given user and root.""" + keys = ["key1"] + user = "clouduser" + cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list) + + def test_apply_credentials_with_no_user(self, m_setup_keys): + """Apply keys for root only.""" + keys = ["key1"] + user = None + cc_ssh.apply_credentials(keys, user, False, ssh_util.DISABLE_USER_OPTS) + self.assertEqual([mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list) + + def test_apply_credentials_with_user_disable_root(self, m_setup_keys): + """Apply keys for the given user and disable root ssh.""" + keys = ["key1"] + user = "clouduser" + options = ssh_util.DISABLE_USER_OPTS + cc_ssh.apply_credentials(keys, user, True, options) + options = options.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + def test_apply_credentials_with_no_user_disable_root(self, m_setup_keys): + """Apply keys no user and disable root ssh.""" + keys = ["key1"] + user = None + options = ssh_util.DISABLE_USER_OPTS + cc_ssh.apply_credentials(keys, user, True, options) + options = options.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_no_cfg(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with no config ignores generating existing keyfiles.""" + cfg = {} + keys = ["key1"] + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ([], {}) + cc_ssh.PUBLISH_HOST_KEYS = False + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") + options = options.replace("$DISABLE_USER", "root") + m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') + self.assertIn( + [mock.call('/etc/ssh/ssh_host_rsa_key'), + mock.call('/etc/ssh/ssh_host_dsa_key'), + mock.call('/etc/ssh/ssh_host_ecdsa_key'), + mock.call('/etc/ssh/ssh_host_ed25519_key')], + m_path_exists.call_args_list) + self.assertEqual([mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test allow_public_ssh_keys=False ignores ssh public keys from + platform. + """ + cfg = {"allow_public_ssh_keys": False} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(), user), + mock.call(set(), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with no config and a default distro user.""" + cfg = {} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_cfg_with_explicit_disable_root(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with explicit disable_root and a default distro user.""" + # This test is identical to test_handle_no_cfg_and_default_root, + # except this uses an explicit cfg value + cfg = {"disable_root": True} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_cfg_without_disable_root(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test handle with disable_root == False.""" + # When disable_root == False, the ssh redirect for root is skipped + cfg = {"disable_root": False} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.get_public_ssh_keys = mock.Mock(return_value=keys) + cc_ssh.handle("name", cfg, cloud, LOG, None) + + self.assertEqual([mock.call(set(keys), user), + mock.call(set(keys), "root", options="")], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_default( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {} + expected_call = [self.test_hostkeys[key_type] for key_type + in KEY_NAMES_NO_DSA] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_enable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = False + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True}} + expected_call = [self.test_hostkeys[key_type] for key_type + in KEY_NAMES_NO_DSA] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_disable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': False}} + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) + cloud.datasource.publish_host_keys.assert_not_called() + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': ['dsa', 'rsa']}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519']] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_empty_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': []}} + expected_call = [self.test_hostkeys[key_type] for key_type + in cc_ssh.GENERATE_KEY_NAMES] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "util.write_file") + def test_handle_ssh_keys_in_cfg(self, m_write_file, m_nug, m_setup_keys): + """Test handle with ssh keys and certificate.""" + # Populate a config dictionary to pass to handle() as well + # as the expected file-writing calls. + cfg = {"ssh_keys": {}} + + expected_calls = [] + for key_type in cc_ssh.GENERATE_KEY_NAMES: + private_name = "{}_private".format(key_type) + public_name = "{}_public".format(key_type) + cert_name = "{}_certificate".format(key_type) + + # Actual key contents don"t have to be realistic + private_value = "{}_PRIVATE_KEY".format(key_type) + public_value = "{}_PUBLIC_KEY".format(key_type) + cert_value = "{}_CERT_KEY".format(key_type) + + cfg["ssh_keys"][private_name] = private_value + cfg["ssh_keys"][public_name] = public_value + cfg["ssh_keys"][cert_name] = cert_value + + expected_calls.extend([ + mock.call( + '/etc/ssh/ssh_host_{}_key'.format(key_type), + private_value, + 384 + ), + mock.call( + '/etc/ssh/ssh_host_{}_key.pub'.format(key_type), + public_value, + 384 + ), + mock.call( + '/etc/ssh/ssh_host_{}_key-cert.pub'.format(key_type), + cert_value, + 384 + ), + mock.call( + '/etc/ssh/sshd_config', + ('HostCertificate /etc/ssh/ssh_host_{}_key-cert.pub' + '\n'.format(key_type)), + preserve_mode=True + ) + ]) + + # Run the handler. + m_nug.return_value = ([], {}) + with mock.patch(MODPATH + 'ssh_util.parse_ssh_config', + return_value=[]): + cc_ssh.handle("name", cfg, self.tmp_cloud(distro='ubuntu'), + LOG, None) + + # Check that all expected output has been done. + for call_ in expected_calls: + self.assertIn(call_, m_write_file.call_args_list) diff --git a/tests/unittests/config/test_cc_timezone.py b/tests/unittests/config/test_cc_timezone.py new file mode 100644 index 00000000..fb6aab5f --- /dev/null +++ b/tests/unittests/config/test_cc_timezone.py @@ -0,0 +1,54 @@ +# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. +# +# Author: Juerg Haefliger +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_timezone + +from cloudinit import util + + +import logging +import shutil +import tempfile +from configobj import ConfigObj +from io import BytesIO + +from tests.unittests import helpers as t_help + +from tests.unittests.util import get_cloud + +LOG = logging.getLogger(__name__) + + +class TestTimezone(t_help.FilesystemMockingTestCase): + def setUp(self): + super(TestTimezone, self).setUp() + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + self.patchUtils(self.new_root) + self.patchOS(self.new_root) + + def test_set_timezone_sles(self): + + cfg = { + 'timezone': 'Tatooine/Bestine', + } + cc = get_cloud('sles') + + # Create a dummy timezone file + dummy_contents = '0123456789abcdefgh' + util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'], + dummy_contents) + + cc_timezone.handle('cc_timezone', cfg, cc, LOG, []) + + contents = util.load_file('/etc/sysconfig/clock', decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual({'TIMEZONE': cfg['timezone']}, dict(n_cfg)) + + contents = util.load_file('/etc/localtime') + self.assertEqual(dummy_contents, contents.strip()) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py new file mode 100644 index 00000000..8d0c9665 --- /dev/null +++ b/tests/unittests/config/test_cc_ubuntu_advantage.py @@ -0,0 +1,333 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config.cc_ubuntu_advantage import ( + configure_ua, handle, maybe_install_ua_tools, schema) +from cloudinit.config.schema import validate_cloudconfig_schema +from cloudinit import subp +from tests.unittests.helpers import ( + CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) + + +# Module path used in mocks +MPATH = 'cloudinit.config.cc_ubuntu_advantage' + + +class FakeCloud(object): + def __init__(self, distro): + self.distro = distro + + +class TestConfigureUA(CiTestCase): + + with_logs = True + allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] + + def setUp(self): + super(TestConfigureUA, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_error(self, m_subp): + """Errors from ua attach command are raised.""" + m_subp.side_effect = subp.ProcessExecutionError( + 'Invalid token SomeToken') + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken') + self.assertEqual( + 'Failure attaching Ubuntu Advantage:\nUnexpected error while' + ' running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid token SomeToken\nStderr: -', + str(context_manager.exception)) + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_with_token(self, m_subp): + """When token is provided, attach the machine to ua using the token.""" + configure_ua(token='SomeToken') + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_on_service_error(self, m_subp): + """all services should be enabled and then any failures raised""" + + def fake_subp(cmd, capture=None): + fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] + if cmd in fail_cmds and capture: + svc = cmd[-1] + raise subp.ProcessExecutionError( + 'Invalid {} credentials'.format(svc.upper())) + + m_subp.side_effect = fake_subp + + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'esm'], capture=True), + mock.call(['ua', 'enable', 'cc'], capture=True), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertIn( + 'WARNING: Failure enabling "esm":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid ESM credentials\nStderr: -\n', + self.logs.getvalue()) + self.assertIn( + 'WARNING: Failure enabling "cc":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid CC credentials\nStderr: -\n', + self.logs.getvalue()) + self.assertEqual( + 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', + str(context_manager.exception)) + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_with_empty_services(self, m_subp): + """When services is an empty list, do not auto-enable attach.""" + configure_ua(token='SomeToken', enable=[]) + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_with_specific_services(self, m_subp): + """When services a list, only enable specific services.""" + configure_ua(token='SomeToken', enable=['fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_with_string_services(self, m_subp): + """When services a string, treat as singleton list and warn""" + configure_ua(token='SomeToken', enable='fips') + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' string; treating as a single enable\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.subp.subp' % MPATH) + def test_configure_ua_attach_with_weird_services(self, m_subp): + """When services not string or list, warn but still attach""" + configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken'])]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' dict; skipping enabling services\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): + + with_logs = True + schema = schema + + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _): + """If ubuntu_advantage configuration is not a dict, emit a warning.""" + validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema) + self.assertEqual( + "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not" + " of type 'object'\n", + self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_disallows_unknown_keys(self, _cfg, _): + """Unknown keys in ubuntu_advantage configuration emit warnings.""" + validate_cloudconfig_schema( + {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}}, + schema) + self.assertIn( + 'WARNING: Invalid config:\nubuntu_advantage: Additional properties' + " are not allowed ('invalid-key' was unexpected)", + self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_requires_token(self, _cfg, _): + """Warn if ubuntu_advantage configuration lacks token.""" + validate_cloudconfig_schema( + {'ubuntu_advantage': {'enable': ['esm']}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nubuntu_advantage:" + " 'token' is a required property\n", self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _): + """Warn when ubuntu_advantage:enable config is not a list.""" + validate_cloudconfig_schema( + {'ubuntu_advantage': {'enable': 'needslist'}}, schema) + self.assertEqual( + "WARNING: Invalid config:\nubuntu_advantage: 'token' is a" + " required property\nubuntu_advantage.enable: 'needslist'" + " is not of type 'array'\n", + self.logs.getvalue()) + + +class TestHandle(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestHandle, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('%s.validate_cloudconfig_schema' % MPATH) + def test_handle_no_config(self, m_schema): + """When no ua-related configuration is provided, nothing happens.""" + cfg = {} + handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertIn( + "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'" + ' configuration found', + self.logs.getvalue()) + m_schema.assert_not_called() + + @mock.patch('%s.configure_ua' % MPATH) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + def test_handle_tries_to_install_ubuntu_advantage_tools( + self, m_install, m_cfg): + """If ubuntu_advantage is provided, try installing ua-tools package.""" + cfg = {'ubuntu_advantage': {'token': 'valid'}} + mycloud = FakeCloud(None) + handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) + m_install.assert_called_once_with(mycloud) + + @mock.patch('%s.configure_ua' % MPATH) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + def test_handle_passes_credentials_and_services_to_configure_ua( + self, m_install, m_configure_ua): + """All ubuntu_advantage config keys are passed to configure_ua.""" + cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config( + self, m_configure_ua): + """Warning when ubuntu-advantage key is present with new config""" + cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + def test_handle_error_on_deprecated_commands_key_dashed(self): + """Error when commands is present in ubuntu-advantage key.""" + cfg = {'ubuntu-advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + + def test_handle_error_on_deprecated_commands_key_underscored(self): + """Error when commands is present in ubuntu_advantage key.""" + cfg = {'ubuntu_advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_prefers_new_style_config( + self, m_configure_ua): + """ubuntu_advantage should be preferred over ubuntu-advantage""" + cfg = { + 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']}, + 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}, + } + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + +class TestMaybeInstallUATools(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestMaybeInstallUATools, self).setUp() + self.tmp = self.tmp_dir() + + @mock.patch('%s.subp.which' % MPATH) + def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): + """Do nothing if ubuntu-advantage-tools already exists.""" + m_which.return_value = '/usr/bin/ua' # already installed + distro = mock.MagicMock() + distro.update_package_sources.side_effect = RuntimeError( + 'Some apt error') + maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError + + @mock.patch('%s.subp.which' % MPATH) + def test_maybe_install_ua_tools_raises_update_errors(self, m_which): + """maybe_install_ua_tools logs and raises apt update errors.""" + m_which.return_value = None + distro = mock.MagicMock() + distro.update_package_sources.side_effect = RuntimeError( + 'Some apt error') + with self.assertRaises(RuntimeError) as context_manager: + maybe_install_ua_tools(cloud=FakeCloud(distro)) + self.assertEqual('Some apt error', str(context_manager.exception)) + self.assertIn('Package update failed\nTraceback', self.logs.getvalue()) + + @mock.patch('%s.subp.which' % MPATH) + def test_maybe_install_ua_raises_install_errors(self, m_which): + """maybe_install_ua_tools logs and raises package install errors.""" + m_which.return_value = None + distro = mock.MagicMock() + distro.update_package_sources.return_value = None + distro.install_packages.side_effect = RuntimeError( + 'Some install error') + with self.assertRaises(RuntimeError) as context_manager: + maybe_install_ua_tools(cloud=FakeCloud(distro)) + self.assertEqual('Some install error', str(context_manager.exception)) + self.assertIn( + 'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue()) + + @mock.patch('%s.subp.which' % MPATH) + def test_maybe_install_ua_tools_happy_path(self, m_which): + """maybe_install_ua_tools installs ubuntu-advantage-tools.""" + m_which.return_value = None + distro = mock.MagicMock() # No errors raised + maybe_install_ua_tools(cloud=FakeCloud(distro)) + distro.update_package_sources.assert_called_once_with() + distro.install_packages.assert_called_once_with( + ['ubuntu-advantage-tools']) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py new file mode 100644 index 00000000..d341fbfd --- /dev/null +++ b/tests/unittests/config/test_cc_ubuntu_drivers.py @@ -0,0 +1,244 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os + +from tests.unittests.helpers import CiTestCase, skipUnlessJsonSchema, mock +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) +from cloudinit.config import cc_ubuntu_drivers as drivers +from cloudinit.subp import ProcessExecutionError + +MPATH = "cloudinit.config.cc_ubuntu_drivers." +M_TMP_PATH = MPATH + "temp_utils.mkdtemp" +OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( + "ubuntu-drivers: error: argument : invalid choice: 'install' " + "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") + + +# The tests in this module call helper methods which are decorated with +# mock.patch. pylint doesn't understand that mock.patch passes parameters to +# the decorated function, so it incorrectly reports that we aren't passing +# values for all parameters. Instead of annotating every single call, we +# disable it for the entire module: +# pylint: disable=no-value-for-parameter + +class AnyTempScriptAndDebconfFile(object): + + def __init__(self, tmp_dir, debconf_file): + self.tmp_dir = tmp_dir + self.debconf_file = debconf_file + + def __eq__(self, cmd): + if not len(cmd) == 2: + return False + script, debconf_file = cmd + if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')): + return debconf_file == self.debconf_file + return False + + +class TestUbuntuDrivers(CiTestCase): + cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] + + with_logs = True + + @skipUnlessJsonSchema() + def test_schema_requires_boolean_for_license_accepted(self): + with self.assertRaisesRegex( + SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): + validate_cloudconfig_schema( + {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, + schema=drivers.schema, strict=True) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=False) + def _assert_happy_path_taken( + self, config, m_which, m_subp, m_tmp): + """Positive path test through handle. Package should be installed.""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_handle_does_package_install(self): + self._assert_happy_path_taken(self.cfg_accepted) + + def test_trueish_strings_are_considered_approval(self): + for true_value in ['yes', 'true', 'on', '1']: + new_config = copy.deepcopy(self.cfg_accepted) + new_config['drivers']['nvidia']['license-accepted'] = true_value + self._assert_happy_path_taken(new_config) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "subp.subp") + @mock.patch(MPATH + "subp.which", return_value=False) + def test_handle_raises_error_if_no_drivers_found( + self, m_which, m_subp, m_tmp): + """If ubuntu-drivers doesn't install any drivers, raise an error.""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + + def fake_subp(cmd): + if cmd[0].startswith(tdir): + return + raise ProcessExecutionError( + stdout='No drivers found for installation.\n', exit_code=1) + m_subp.side_effect = fake_subp + + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('ubuntu-drivers found no drivers for installation', + self.logs.getvalue()) + + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=False) + def _assert_inert_with_config(self, config, m_which, m_subp): + """Helper to reduce repetition when testing negative cases""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual(0, myCloud.distro.install_packages.call_count) + self.assertEqual(0, m_subp.call_count) + + def test_handle_inert_if_license_not_accepted(self): + """Ensure we don't do anything if the license is rejected.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': False}}}) + + def test_handle_inert_if_garbage_in_license_field(self): + """Ensure we don't do anything if unknown text is in license field.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) + + def test_handle_inert_if_no_license_key(self): + """Ensure we don't do anything if no license key.""" + self._assert_inert_with_config({'drivers': {'nvidia': {}}}) + + def test_handle_inert_if_no_nvidia_key(self): + """Ensure we don't do anything if other license accepted.""" + self._assert_inert_with_config( + {'drivers': {'acme': {'license-accepted': True}}}) + + def test_handle_inert_if_string_given(self): + """Ensure we don't do anything if string refusal given.""" + for false_value in ['no', 'false', 'off', '0']: + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': false_value}}}) + + @mock.patch(MPATH + "install_drivers") + def test_handle_no_drivers_does_nothing(self, m_install_drivers): + """If no 'drivers' key in the config, nothing should be done.""" + myCloud = mock.MagicMock() + myLog = mock.MagicMock() + drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) + self.assertIn('Skipping module named', + myLog.debug.call_args_list[0][0][0]) + self.assertEqual(0, m_install_drivers.call_count) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=True) + def test_install_drivers_no_install_if_present( + self, m_which, m_subp, m_tmp): + """If 'ubuntu-drivers' is present, no package install should occur.""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + pkg_install = mock.MagicMock() + drivers.install_drivers(self.cfg_accepted['drivers'], + pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + self.assertEqual([mock.call('ubuntu-drivers')], + m_which.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_install_drivers_rejects_invalid_config(self): + """install_drivers should raise TypeError if not given a config dict""" + pkg_install = mock.MagicMock() + with self.assertRaisesRegex(TypeError, ".*expected dict.*"): + drivers.install_drivers("mystring", pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "subp.subp") + @mock.patch(MPATH + "subp.which", return_value=False) + def test_install_drivers_handles_old_ubuntu_drivers_gracefully( + self, m_which, m_subp, m_tmp): + """Older ubuntu-drivers versions should emit message and raise error""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + + def fake_subp(cmd): + if cmd[0].startswith(tdir): + return + raise ProcessExecutionError( + stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2) + m_subp.side_effect = fake_subp + + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('WARNING: the available version of ubuntu-drivers is' + ' too old to perform requested driver installation', + self.logs.getvalue()) + + +# Sub-class TestUbuntuDrivers to run the same test cases, but with a version +class TestUbuntuDriversWithVersion(TestUbuntuDrivers): + cfg_accepted = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "subp.subp", return_value=('', '')) + @mock.patch(MPATH + "subp.which", return_value=False) + def test_version_none_uses_latest(self, m_which, m_subp, m_tmp): + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + version_none_cfg = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} + drivers.handle( + 'ubuntu_drivers', version_none_cfg, myCloud, None, None) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], + m_subp.call_args_list) + + def test_specifying_a_version_doesnt_override_license_acceptance(self): + self._assert_inert_with_config({ + 'drivers': {'nvidia': {'license-accepted': False, + 'version': '123'}} + }) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_update_etc_hosts.py b/tests/unittests/config/test_cc_update_etc_hosts.py new file mode 100644 index 00000000..77a7f78f --- /dev/null +++ b/tests/unittests/config/test_cc_update_etc_hosts.py @@ -0,0 +1,70 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_update_etc_hosts + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from tests.unittests import helpers as t_help + +import logging +import os +import shutil + +LOG = logging.getLogger(__name__) + + +class TestHostsFile(t_help.FilesystemMockingTestCase): + def setUp(self): + super(TestHostsFile, self).setUp() + self.tmp = self.tmp_dir() + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def test_write_etc_hosts_suse_localhost(self): + cfg = { + 'manage_etc_hosts': 'localhost', + 'hostname': 'cloud-init.test.us' + } + os.makedirs('%s/etc/' % self.tmp) + hosts_content = '192.168.1.1 blah.blah.us blah\n' + fout = open('%s/etc/hosts' % self.tmp, 'w') + fout.write(hosts_content) + fout.close() + distro = self._fetch_distro('sles') + distro.hosts_fn = '%s/etc/hosts' % self.tmp + paths = helpers.Paths({}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) + contents = util.load_file('%s/etc/hosts' % self.tmp) + if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents: + self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') + if '192.168.1.1\tblah.blah.us\tblah' not in contents: + self.assertIsNone('Default etc/hosts content modified') + + @t_help.skipUnlessJinja() + def test_write_etc_hosts_suse_template(self): + cfg = { + 'manage_etc_hosts': 'template', + 'hostname': 'cloud-init.test.us' + } + shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp) + distro = self._fetch_distro('sles') + paths = helpers.Paths({}) + paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl' + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) + contents = util.load_file('%s/etc/hosts' % self.tmp) + if '127.0.1.1 cloud-init.test.us cloud-init' not in contents: + self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') + if '::1 cloud-init.test.us cloud-init' not in contents: + self.assertIsNone('No entry for 127.0.0.1 in etc/hosts') diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py new file mode 100644 index 00000000..4ef844cb --- /dev/null +++ b/tests/unittests/config/test_cc_users_groups.py @@ -0,0 +1,172 @@ +# This file is part of cloud-init. See LICENSE file for license information. + + +from cloudinit.config import cc_users_groups +from tests.unittests.helpers import CiTestCase, mock + +MODPATH = "cloudinit.config.cc_users_groups" + + +@mock.patch('cloudinit.distros.ubuntu.Distro.create_group') +@mock.patch('cloudinit.distros.ubuntu.Distro.create_user') +class TestHandleUsersGroups(CiTestCase): + """Test cc_users_groups handling of config.""" + + with_logs = True + + def test_handle_no_cfg_creates_no_users_or_groups(self, m_user, m_group): + """Test handle with no config will not create users or groups.""" + cfg = {} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_user.assert_not_called() + m_group.assert_not_called() + + def test_handle_users_in_cfg_calls_create_users(self, m_user, m_group): + """When users in config, create users with distro.create_user.""" + cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertCountEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', default=False)]) + m_group.assert_not_called() + + @mock.patch('cloudinit.distros.freebsd.Distro.create_group') + @mock.patch('cloudinit.distros.freebsd.Distro.create_user') + def test_handle_users_in_cfg_calls_create_users_on_bsd( + self, + m_fbsd_user, + m_fbsd_group, + m_linux_user, + m_linux_group, + ): + """When users in config, create users with freebsd.create_user.""" + cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True, + 'groups': ['wheel'], + 'shell': '/bin/tcsh'}} + metadata = {} + cloud = self.tmp_cloud( + distro='freebsd', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertCountEqual( + m_fbsd_user.call_args_list, + [mock.call('freebsd', groups='wheel', lock_passwd=True, + shell='/bin/tcsh'), + mock.call('me2', default=False)]) + m_fbsd_group.assert_not_called() + m_linux_group.assert_not_called() + m_linux_user.assert_not_called() + + def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group): + """When ssh_redirect_user is True pass default user and cloud keys.""" + cfg = { + 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertCountEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, + ssh_redirect_user='ubuntu')]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_default_str(self, m_user, m_group): + """When ssh_redirect_user is 'default' pass default username.""" + cfg = { + 'users': ['default', {'name': 'me2', + 'ssh_redirect_user': 'default'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertCountEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', cloud_public_ssh_keys=['key1'], default=False, + ssh_redirect_user='ubuntu')]) + m_group.assert_not_called() + + def test_users_with_ssh_redirect_user_non_default(self, m_user, m_group): + """Warn when ssh_redirect_user is not 'default'.""" + cfg = { + 'users': ['default', {'name': 'me2', + 'ssh_redirect_user': 'snowflake'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + with self.assertRaises(ValueError) as context_manager: + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_group.assert_not_called() + self.assertEqual( + 'Not creating user me2. Invalid value of ssh_redirect_user:' + ' snowflake. Expected values: true, default or false.', + str(context_manager.exception)) + + def test_users_with_ssh_redirect_user_default_false(self, m_user, m_group): + """When unspecified ssh_redirect_user is false and not set up.""" + cfg = {'users': ['default', {'name': 'me2'}]} + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'ubuntu', 'lock_passwd': True, + 'groups': ['lxd', 'sudo'], + 'shell': '/bin/bash'}} + metadata = {'public-keys': ['key1']} + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertCountEqual( + m_user.call_args_list, + [mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True, + shell='/bin/bash'), + mock.call('me2', default=False)]) + m_group.assert_not_called() + + def test_users_ssh_redirect_user_and_no_default(self, m_user, m_group): + """Warn when ssh_redirect_user is True and no default user present.""" + cfg = { + 'users': ['default', {'name': 'me2', 'ssh_redirect_user': True}]} + # System config defines *no* default user for the distro. + sys_cfg = {} + metadata = {} # no public-keys defined + cloud = self.tmp_cloud( + distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + m_user.assert_called_once_with('me2', default=False) + m_group.assert_not_called() + self.assertEqual( + 'WARNING: Ignoring ssh_redirect_user: True for me2. No' + ' default_user defined. Perhaps missing' + ' cloud configuration users: [default, ..].\n', + self.logs.getvalue()) diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py new file mode 100644 index 00000000..99248f74 --- /dev/null +++ b/tests/unittests/config/test_cc_write_files.py @@ -0,0 +1,246 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import copy +import gzip +import io +import shutil +import tempfile + +from cloudinit.config.cc_write_files import ( + handle, decode_perms, write_files) +from cloudinit import log as logging +from cloudinit import util + +from tests.unittests.helpers import ( + CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + +LOG = logging.getLogger(__name__) + +YAML_TEXT = """ +write_files: + - encoding: gzip + content: !!binary | + H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= + path: /usr/bin/hello + permissions: '0755' + - content: !!binary | + Zm9vYmFyCg== + path: /wark + permissions: '0755' + - content: | + hi mom line 1 + hi mom line 2 + path: /tmp/message +""" + +YAML_CONTENT_EXPECTED = { + '/usr/bin/hello': "#!/bin/sh\necho hello world\n", + '/wark': "foobar\n", + '/tmp/message': "hi mom line 1\nhi mom line 2\n", +} + +VALID_SCHEMA = { + 'write_files': [ + {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', + 'path': '/some', 'permissions': '0777'} + ] +} + +INVALID_SCHEMA = { # Dropped required path key + 'write_files': [ + {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', + 'permissions': '0777'} + ] +} + + +@skipUnlessJsonSchema() +@mock.patch('cloudinit.config.cc_write_files.write_files') +class TestWriteFilesSchema(CiTestCase): + + with_logs = True + + def test_schema_validation_warns_missing_path(self, m_write_files): + """The only required file item property is 'path'.""" + cc = self.tmp_cloud('ubuntu') + valid_config = {'write_files': [{'path': '/some/path'}]} + handle('cc_write_file', valid_config, cc, LOG, []) + self.assertNotIn('Invalid config:', self.logs.getvalue()) + handle('cc_write_file', INVALID_SCHEMA, cc, LOG, []) + self.assertIn('Invalid config:', self.logs.getvalue()) + self.assertIn("'path' is a required property", self.logs.getvalue()) + + def test_schema_validation_warns_non_string_type_for_files( + self, m_write_files): + """Schema validation warns of non-string values for each file item.""" + cc = self.tmp_cloud('ubuntu') + for key in VALID_SCHEMA['write_files'][0].keys(): + if key == 'append': + key_type = 'boolean' + else: + key_type = 'string' + invalid_config = copy.deepcopy(VALID_SCHEMA) + invalid_config['write_files'][0][key] = 1 + handle('cc_write_file', invalid_config, cc, LOG, []) + self.assertIn( + mock.call('cc_write_file', invalid_config['write_files']), + m_write_files.call_args_list) + self.assertIn( + 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type), + self.logs.getvalue()) + self.assertIn('Invalid config:', self.logs.getvalue()) + + def test_schema_validation_warns_on_additional_undefined_propertes( + self, m_write_files): + """Schema validation warns on additional undefined file properties.""" + cc = self.tmp_cloud('ubuntu') + invalid_config = copy.deepcopy(VALID_SCHEMA) + invalid_config['write_files'][0]['bogus'] = 'value' + handle('cc_write_file', invalid_config, cc, LOG, []) + self.assertIn( + "Invalid config:\nwrite_files.0: Additional properties" + " are not allowed ('bogus' was unexpected)", + self.logs.getvalue()) + + +class TestWriteFiles(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestWriteFiles, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + @skipUnlessJsonSchema() + def test_handler_schema_validation_warns_non_array_type(self): + """Schema validation warns of non-array value.""" + invalid_config = {'write_files': 1} + cc = self.tmp_cloud('ubuntu') + with self.assertRaises(TypeError): + handle('cc_write_file', invalid_config, cc, LOG, []) + self.assertIn( + 'Invalid config:\nwrite_files: 1 is not of type \'array\'', + self.logs.getvalue()) + + def test_simple(self): + self.patchUtils(self.tmp) + expected = "hello world\n" + filename = "/tmp/my.file" + write_files( + "test_simple", [{"content": expected, "path": filename}]) + self.assertEqual(util.load_file(filename), expected) + + def test_append(self): + self.patchUtils(self.tmp) + existing = "hello " + added = "world\n" + expected = existing + added + filename = "/tmp/append.file" + util.write_file(filename, existing) + write_files( + "test_append", + [{"content": added, "path": filename, "append": "true"}]) + self.assertEqual(util.load_file(filename), expected) + + def test_yaml_binary(self): + self.patchUtils(self.tmp) + data = util.load_yaml(YAML_TEXT) + write_files("testname", data['write_files']) + for path, content in YAML_CONTENT_EXPECTED.items(): + self.assertEqual(util.load_file(path), content) + + def test_all_decodings(self): + self.patchUtils(self.tmp) + + # build a 'files' array that has a dictionary of encodings + # for 'gz', 'gzip', 'gz+base64' ... + data = b"foobzr" + utf8_valid = b"foobzr" + utf8_invalid = b'ab\xaadef' + files = [] + expected = [] + + gz_aliases = ('gz', 'gzip') + gz_b64_aliases = ('gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64') + b64_aliases = ('base64', 'b64') + + datum = (("utf8", utf8_valid), ("no-utf8", utf8_invalid)) + for name, data in datum: + gz = (_gzip_bytes(data), gz_aliases) + gz_b64 = (base64.b64encode(_gzip_bytes(data)), gz_b64_aliases) + b64 = (base64.b64encode(data), b64_aliases) + for content, aliases in (gz, gz_b64, b64): + for enc in aliases: + cur = {'content': content, + 'path': '/tmp/file-%s-%s' % (name, enc), + 'encoding': enc} + files.append(cur) + expected.append((cur['path'], data)) + + write_files("test_decoding", files) + + for path, content in expected: + self.assertEqual(util.load_file(path, decode=False), content) + + # make sure we actually wrote *some* files. + flen_expected = ( + len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum)) + self.assertEqual(len(expected), flen_expected) + + def test_deferred(self): + self.patchUtils(self.tmp) + file_path = '/tmp/deferred.file' + config = { + 'write_files': [ + {'path': file_path, 'defer': True} + ] + } + cc = self.tmp_cloud('ubuntu') + handle('cc_write_file', config, cc, LOG, []) + with self.assertRaises(FileNotFoundError): + util.load_file(file_path) + + +class TestDecodePerms(CiTestCase): + + with_logs = True + + def test_none_returns_default(self): + """If None is passed as perms, then default should be returned.""" + default = object() + found = decode_perms(None, default) + self.assertEqual(default, found) + + def test_integer(self): + """A valid integer should return itself.""" + found = decode_perms(0o755, None) + self.assertEqual(0o755, found) + + def test_valid_octal_string(self): + """A string should be read as octal.""" + found = decode_perms("644", None) + self.assertEqual(0o644, found) + + def test_invalid_octal_string_returns_default_and_warns(self): + """A string with invalid octal should warn and return default.""" + found = decode_perms("999", None) + self.assertIsNone(found) + self.assertIn("WARNING: Undecodable", self.logs.getvalue()) + + +def _gzip_bytes(data): + buf = io.BytesIO() + fp = None + try: + fp = gzip.GzipFile(fileobj=buf, mode="wb") + fp.write(data) + fp.close() + return buf.getvalue() + finally: + if fp: + fp.close() + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_write_files_deferred.py b/tests/unittests/config/test_cc_write_files_deferred.py new file mode 100644 index 00000000..d33d250a --- /dev/null +++ b/tests/unittests/config/test_cc_write_files_deferred.py @@ -0,0 +1,77 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import tempfile +import shutil + +from cloudinit.config.cc_write_files_deferred import (handle) +from .test_cc_write_files import (VALID_SCHEMA) +from cloudinit import log as logging +from cloudinit import util + +from tests.unittests.helpers import ( + CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + +LOG = logging.getLogger(__name__) + + +@skipUnlessJsonSchema() +@mock.patch('cloudinit.config.cc_write_files_deferred.write_files') +class TestWriteFilesDeferredSchema(CiTestCase): + + with_logs = True + + def test_schema_validation_warns_invalid_value(self, + m_write_files_deferred): + """If 'defer' is defined, it must be of type 'bool'.""" + + valid_config = { + 'write_files': [ + {**VALID_SCHEMA.get('write_files')[0], 'defer': True} + ] + } + + invalid_config = { + 'write_files': [ + {**VALID_SCHEMA.get('write_files')[0], 'defer': str('no')} + ] + } + + cc = self.tmp_cloud('ubuntu') + handle('cc_write_files_deferred', valid_config, cc, LOG, []) + self.assertNotIn('Invalid config:', self.logs.getvalue()) + handle('cc_write_files_deferred', invalid_config, cc, LOG, []) + self.assertIn('Invalid config:', self.logs.getvalue()) + self.assertIn("defer: 'no' is not of type 'boolean'", + self.logs.getvalue()) + + +class TestWriteFilesDeferred(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestWriteFilesDeferred, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + def test_filtering_deferred_files(self): + self.patchUtils(self.tmp) + expected = "hello world\n" + config = { + 'write_files': [ + { + 'path': '/tmp/deferred.file', + 'defer': True, + 'content': expected + }, + {'path': '/tmp/not_deferred.file'} + ] + } + cc = self.tmp_cloud('ubuntu') + handle('cc_write_files_deferred', config, cc, LOG, []) + self.assertEqual(util.load_file('/tmp/deferred.file'), expected) + with self.assertRaises(FileNotFoundError): + util.load_file('/tmp/not_deferred.file') + + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py new file mode 100644 index 00000000..2f11b96a --- /dev/null +++ b/tests/unittests/config/test_cc_yum_add_repo.py @@ -0,0 +1,111 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import configparser +import logging +import shutil +import tempfile + +from cloudinit import util +from cloudinit.config import cc_yum_add_repo +from tests.unittests import helpers + +LOG = logging.getLogger(__name__) + + +class TestConfig(helpers.FilesystemMockingTestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + def test_bad_config(self): + cfg = { + 'yum_repos': { + 'epel-testing': { + 'name': 'Extra Packages for Enterprise Linux 5 - Testing', + # Missing this should cause the repo not to be written + # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch', + 'enabled': False, + 'gpgcheck': True, + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', + 'failovermethod': 'priority', + }, + }, + } + self.patchUtils(self.tmp) + cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) + self.assertRaises(IOError, util.load_file, + "/etc/yum.repos.d/epel_testing.repo") + + def test_write_config(self): + cfg = { + 'yum_repos': { + 'epel-testing': { + 'name': 'Extra Packages for Enterprise Linux 5 - Testing', + 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', + 'enabled': False, + 'gpgcheck': True, + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', + 'failovermethod': 'priority', + }, + }, + } + self.patchUtils(self.tmp) + cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) + contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") + parser = configparser.ConfigParser() + parser.read_string(contents) + expected = { + 'epel_testing': { + 'name': 'Extra Packages for Enterprise Linux 5 - Testing', + 'failovermethod': 'priority', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', + 'enabled': '0', + 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', + 'gpgcheck': '1', + } + } + for section in expected: + self.assertTrue(parser.has_section(section), + "Contains section {0}".format(section)) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + + def test_write_config_array(self): + cfg = { + 'yum_repos': { + 'puppetlabs-products': { + 'name': 'Puppet Labs Products El 6 - $basearch', + 'baseurl': + 'http://yum.puppetlabs.com/el/6/products/$basearch', + 'gpgkey': [ + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs', + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', + ], + 'enabled': True, + 'gpgcheck': True, + } + } + } + self.patchUtils(self.tmp) + cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) + contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") + parser = configparser.ConfigParser() + parser.read_string(contents) + expected = { + 'puppetlabs_products': { + 'name': 'Puppet Labs Products El 6 - $basearch', + 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n' + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', + 'enabled': '1', + 'gpgcheck': '1', + } + } + for section in expected: + self.assertTrue(parser.has_section(section), + "Contains section {0}".format(section)) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + +# vi: ts=4 expandtab diff --git a/tests/unittests/config/test_cc_zypper_add_repo.py b/tests/unittests/config/test_cc_zypper_add_repo.py new file mode 100644 index 00000000..4af04bee --- /dev/null +++ b/tests/unittests/config/test_cc_zypper_add_repo.py @@ -0,0 +1,231 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import configparser +import glob +import logging +import os + +from cloudinit import util +from cloudinit.config import cc_zypper_add_repo +from tests.unittests import helpers +from tests.unittests.helpers import mock + +LOG = logging.getLogger(__name__) + + +class TestConfig(helpers.FilesystemMockingTestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.tmp = self.tmp_dir() + self.zypp_conf = 'etc/zypp/zypp.conf' + + def test_bad_repo_config(self): + """Config has no baseurl, no file should be written""" + cfg = { + 'repos': [ + { + 'id': 'foo', + 'name': 'suse-test', + 'enabled': '1' + }, + ] + } + self.patchUtils(self.tmp) + cc_zypper_add_repo._write_repos(cfg['repos'], '/etc/zypp/repos.d') + self.assertRaises(IOError, util.load_file, + "/etc/zypp/repos.d/foo.repo") + + def test_write_repos(self): + """Verify valid repos get written""" + cfg = self._get_base_config_repos() + root_d = self.tmp_dir() + cc_zypper_add_repo._write_repos(cfg['zypper']['repos'], root_d) + repos = glob.glob('%s/*.repo' % root_d) + expected_repos = ['testing-foo.repo', 'testing-bar.repo'] + if len(repos) != 2: + assert 'Number of repos written is "%d" expected 2' % len(repos) + for repo in repos: + repo_name = os.path.basename(repo) + if repo_name not in expected_repos: + assert 'Found repo with name "%s"; unexpected' % repo_name + # Validation that the content gets properly written is in another test + + def test_write_repo(self): + """Verify the content of a repo file""" + cfg = { + 'repos': [ + { + 'baseurl': 'http://foo', + 'name': 'test-foo', + 'id': 'testing-foo' + }, + ] + } + root_d = self.tmp_dir() + cc_zypper_add_repo._write_repos(cfg['repos'], root_d) + contents = util.load_file("%s/testing-foo.repo" % root_d) + parser = configparser.ConfigParser() + parser.read_string(contents) + expected = { + 'testing-foo': { + 'name': 'test-foo', + 'baseurl': 'http://foo', + 'enabled': '1', + 'autorefresh': '1' + } + } + for section in expected: + self.assertTrue(parser.has_section(section), + "Contains section {0}".format(section)) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + + def test_config_write(self): + """Write valid configuration data""" + cfg = { + 'config': { + 'download.deltarpm': 'False', + 'reposdir': 'foo' + } + } + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) + self.reRoot(root_d) + cc_zypper_add_repo._write_zypp_config(cfg['config']) + cfg_out = os.path.join(root_d, self.zypp_conf) + contents = util.load_file(cfg_out) + expected = [ + '# Zypp config', + '# Added via cloud.cfg', + 'download.deltarpm=False', + 'reposdir=foo' + ] + for item in contents.split('\n'): + if item not in expected: + self.assertIsNone(item) + + @mock.patch('cloudinit.log.logging') + def test_config_write_skip_configdir(self, mock_logging): + """Write configuration but skip writing 'configdir' setting""" + cfg = { + 'config': { + 'download.deltarpm': 'False', + 'reposdir': 'foo', + 'configdir': 'bar' + } + } + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) + self.reRoot(root_d) + cc_zypper_add_repo._write_zypp_config(cfg['config']) + cfg_out = os.path.join(root_d, self.zypp_conf) + contents = util.load_file(cfg_out) + expected = [ + '# Zypp config', + '# Added via cloud.cfg', + 'download.deltarpm=False', + 'reposdir=foo' + ] + for item in contents.split('\n'): + if item not in expected: + self.assertIsNone(item) + # Not finding teh right path for mocking :( + # assert mock_logging.warning.called + + def test_empty_config_section_no_new_data(self): + """When the config section is empty no new data should be written to + zypp.conf""" + cfg = self._get_base_config_repos() + cfg['zypper']['config'] = None + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) + self.reRoot(root_d) + cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) + cfg_out = os.path.join(root_d, self.zypp_conf) + contents = util.load_file(cfg_out) + self.assertEqual(contents, '# No data') + + def test_empty_config_value_no_new_data(self): + """When the config section is not empty but there are no values + no new data should be written to zypp.conf""" + cfg = self._get_base_config_repos() + cfg['zypper']['config'] = { + 'download.deltarpm': None + } + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) + self.reRoot(root_d) + cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) + cfg_out = os.path.join(root_d, self.zypp_conf) + contents = util.load_file(cfg_out) + self.assertEqual(contents, '# No data') + + def test_handler_full_setup(self): + """Test that the handler ends up calling the renderers""" + cfg = self._get_base_config_repos() + cfg['zypper']['config'] = { + 'download.deltarpm': 'False', + } + root_d = self.tmp_dir() + os.makedirs('%s/etc/zypp/repos.d' % root_d) + helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) + self.reRoot(root_d) + cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, []) + cfg_out = os.path.join(root_d, self.zypp_conf) + contents = util.load_file(cfg_out) + expected = [ + '# Zypp config', + '# Added via cloud.cfg', + 'download.deltarpm=False', + ] + for item in contents.split('\n'): + if item not in expected: + self.assertIsNone(item) + repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d) + expected_repos = ['testing-foo.repo', 'testing-bar.repo'] + if len(repos) != 2: + assert 'Number of repos written is "%d" expected 2' % len(repos) + for repo in repos: + repo_name = os.path.basename(repo) + if repo_name not in expected_repos: + assert 'Found repo with name "%s"; unexpected' % repo_name + + def test_no_config_section_no_new_data(self): + """When there is no config section no new data should be written to + zypp.conf""" + cfg = self._get_base_config_repos() + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) + self.reRoot(root_d) + cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) + cfg_out = os.path.join(root_d, self.zypp_conf) + contents = util.load_file(cfg_out) + self.assertEqual(contents, '# No data') + + def test_no_repo_data(self): + """When there is no repo data nothing should happen""" + root_d = self.tmp_dir() + self.reRoot(root_d) + cc_zypper_add_repo._write_repos(None, root_d) + content = glob.glob('%s/*' % root_d) + self.assertEqual(len(content), 0) + + def _get_base_config_repos(self): + """Basic valid repo configuration""" + cfg = { + 'zypper': { + 'repos': [ + { + 'baseurl': 'http://foo', + 'name': 'test-foo', + 'id': 'testing-foo' + }, + { + 'baseurl': 'http://bar', + 'name': 'test-bar', + 'id': 'testing-bar' + } + ] + } + } + return cfg diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py new file mode 100644 index 00000000..b01f5eea --- /dev/null +++ b/tests/unittests/config/test_schema.py @@ -0,0 +1,515 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import cloudinit +from cloudinit.config.schema import ( + CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, + get_schema_doc, get_schema, validate_cloudconfig_file, + validate_cloudconfig_schema, main) +from cloudinit.util import write_file + +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema + +from copy import copy +import itertools +import pytest +from pathlib import Path +from textwrap import dedent +from yaml import safe_load + + +class GetSchemaTest(CiTestCase): + + def test_get_schema_coalesces_known_schema(self): + """Every cloudconfig module with schema is listed in allOf keyword.""" + schema = get_schema() + self.assertCountEqual( + [ + 'cc_apk_configure', + 'cc_apt_configure', + 'cc_bootcmd', + 'cc_locale', + 'cc_ntp', + 'cc_resizefs', + 'cc_runcmd', + 'cc_snap', + 'cc_ubuntu_advantage', + 'cc_ubuntu_drivers', + 'cc_write_files', + 'cc_write_files_deferred', + 'cc_zypper_add_repo', + 'cc_chef', + 'cc_install_hotplug', + ], + [subschema['id'] for subschema in schema['allOf']]) + self.assertEqual('cloud-config-schema', schema['id']) + self.assertEqual( + 'http://json-schema.org/draft-04/schema#', + schema['$schema']) + # FULL_SCHEMA is updated by the get_schema call + from cloudinit.config.schema import FULL_SCHEMA + self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys()) + + def test_get_schema_returns_global_when_set(self): + """When FULL_SCHEMA global is already set, get_schema returns it.""" + m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA' + with mock.patch(m_schema_path, {'here': 'iam'}): + self.assertEqual({'here': 'iam'}, get_schema()) + + +class SchemaValidationErrorTest(CiTestCase): + """Test validate_cloudconfig_schema""" + + def test_schema_validation_error_expects_schema_errors(self): + """SchemaValidationError is initialized from schema_errors.""" + errors = (('key.path', 'unexpected key "junk"'), + ('key2.path', '"-123" is not a valid "hostname" format')) + exception = SchemaValidationError(schema_errors=errors) + self.assertIsInstance(exception, Exception) + self.assertEqual(exception.schema_errors, errors) + self.assertEqual( + 'Cloud config schema errors: key.path: unexpected key "junk", ' + 'key2.path: "-123" is not a valid "hostname" format', + str(exception)) + self.assertTrue(isinstance(exception, ValueError)) + + +class ValidateCloudConfigSchemaTest(CiTestCase): + """Tests for validate_cloudconfig_schema.""" + + with_logs = True + + @skipUnlessJsonSchema() + def test_validateconfig_schema_non_strict_emits_warnings(self): + """When strict is False validate_cloudconfig_schema emits warnings.""" + schema = {'properties': {'p1': {'type': 'string'}}} + validate_cloudconfig_schema({'p1': -1}, schema, strict=False) + self.assertIn( + "Invalid config:\np1: -1 is not of type 'string'\n", + self.logs.getvalue()) + + @skipUnlessJsonSchema() + def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self): + """Warning from validate_cloudconfig_schema when missing jsonschema.""" + schema = {'properties': {'p1': {'type': 'string'}}} + with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): + validate_cloudconfig_schema({'p1': -1}, schema, strict=True) + self.assertIn( + 'Ignoring schema validation. python-jsonschema is not present', + self.logs.getvalue()) + + @skipUnlessJsonSchema() + def test_validateconfig_schema_strict_raises_errors(self): + """When strict is True validate_cloudconfig_schema raises errors.""" + schema = {'properties': {'p1': {'type': 'string'}}} + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_schema({'p1': -1}, schema, strict=True) + self.assertEqual( + "Cloud config schema errors: p1: -1 is not of type 'string'", + str(context_mgr.exception)) + + @skipUnlessJsonSchema() + def test_validateconfig_schema_honors_formats(self): + """With strict True, validate_cloudconfig_schema errors on format.""" + schema = { + 'properties': {'p1': {'type': 'string', 'format': 'email'}}} + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True) + self.assertEqual( + "Cloud config schema errors: p1: '-1' is not a 'email'", + str(context_mgr.exception)) + + +class TestCloudConfigExamples: + schema = get_schema() + params = [ + (schema["id"], example) + for schema in schema["allOf"] for example in schema["examples"]] + + @pytest.mark.parametrize("schema_id,example", params) + @skipUnlessJsonSchema() + def test_validateconfig_schema_of_example(self, schema_id, example): + """ For a given example in a config module we test if it is valid + according to the unified schema of all config modules + """ + config_load = safe_load(example) + validate_cloudconfig_schema( + config_load, self.schema, strict=True) + + +class ValidateCloudConfigFileTest(CiTestCase): + """Tests for validate_cloudconfig_file.""" + + def setUp(self): + super(ValidateCloudConfigFileTest, self).setUp() + self.config_file = self.tmp_path('cloudcfg.yaml') + + def test_validateconfig_file_error_on_absent_file(self): + """On absent config_path, validate_cloudconfig_file errors.""" + with self.assertRaises(RuntimeError) as context_mgr: + validate_cloudconfig_file('/not/here', {}) + self.assertEqual( + 'Configfile /not/here does not exist', + str(context_mgr.exception)) + + def test_validateconfig_file_error_on_invalid_header(self): + """On invalid header, validate_cloudconfig_file errors. + + A SchemaValidationError is raised when the file doesn't begin with + CLOUD_CONFIG_HEADER. + """ + write_file(self.config_file, '#junk') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, {}) + self.assertEqual( + 'Cloud config schema errors: format-l1.c1: File {0} needs to begin' + ' with "{1}"'.format( + self.config_file, CLOUD_CONFIG_HEADER.decode()), + str(context_mgr.exception)) + + def test_validateconfig_file_error_on_non_yaml_scanner_error(self): + """On non-yaml scan issues, validate_cloudconfig_file errors.""" + # Generate a scanner error by providing text on a single line with + # improper indent. + write_file(self.config_file, '#cloud-config\nasdf:\nasdf') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, {}) + self.assertIn( + 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format( + self.config_file), + str(context_mgr.exception)) + + def test_validateconfig_file_error_on_non_yaml_parser_error(self): + """On non-yaml parser issues, validate_cloudconfig_file errors.""" + write_file(self.config_file, '#cloud-config\n{}}') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, {}) + self.assertIn( + 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format( + self.config_file), + str(context_mgr.exception)) + + @skipUnlessJsonSchema() + def test_validateconfig_file_sctrictly_validates_schema(self): + """validate_cloudconfig_file raises errors on invalid schema.""" + schema = { + 'properties': {'p1': {'type': 'string', 'format': 'string'}}} + write_file(self.config_file, '#cloud-config\np1: -1') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, schema) + self.assertEqual( + "Cloud config schema errors: p1: -1 is not of type 'string'", + str(context_mgr.exception)) + + +class GetSchemaDocTest(CiTestCase): + """Tests for get_schema_doc.""" + + def setUp(self): + super(GetSchemaDocTest, self).setUp() + self.required_schema = { + 'title': 'title', 'description': 'description', 'id': 'id', + 'name': 'name', 'frequency': 'frequency', + 'distros': ['debian', 'rhel']} + + def test_get_schema_doc_returns_restructured_text(self): + """get_schema_doc returns restructured text for a cloudinit schema.""" + full_schema = copy(self.required_schema) + full_schema.update( + {'properties': { + 'prop1': {'type': 'array', 'description': 'prop-description', + 'items': {'type': 'integer'}}}}) + self.assertEqual( + dedent(""" + name + ---- + **Summary:** title + + description + + **Internal name:** ``id`` + + **Module frequency:** frequency + + **Supported distros:** debian, rhel + + **Config schema**: + **prop1:** (array of integer) prop-description\n\n"""), + get_schema_doc(full_schema)) + + def test_get_schema_doc_handles_multiple_types(self): + """get_schema_doc delimits multiple property types with a '/'.""" + full_schema = copy(self.required_schema) + full_schema.update( + {'properties': { + 'prop1': {'type': ['string', 'integer'], + 'description': 'prop-description'}}}) + self.assertIn( + '**prop1:** (string/integer) prop-description', + get_schema_doc(full_schema)) + + def test_get_schema_doc_handles_enum_types(self): + """get_schema_doc converts enum types to yaml and delimits with '/'.""" + full_schema = copy(self.required_schema) + full_schema.update( + {'properties': { + 'prop1': {'enum': [True, False, 'stuff'], + 'description': 'prop-description'}}}) + self.assertIn( + '**prop1:** (true/false/stuff) prop-description', + get_schema_doc(full_schema)) + + def test_get_schema_doc_handles_nested_oneof_property_types(self): + """get_schema_doc describes array items oneOf declarations in type.""" + full_schema = copy(self.required_schema) + full_schema.update( + {'properties': { + 'prop1': {'type': 'array', + 'items': { + 'oneOf': [{'type': 'string'}, + {'type': 'integer'}]}, + 'description': 'prop-description'}}}) + self.assertIn( + '**prop1:** (array of (string)/(integer)) prop-description', + get_schema_doc(full_schema)) + + def test_get_schema_doc_handles_string_examples(self): + """get_schema_doc properly indented examples as a list of strings.""" + full_schema = copy(self.required_schema) + full_schema.update( + {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'], + 'properties': { + 'prop1': {'type': 'array', 'description': 'prop-description', + 'items': {'type': 'integer'}}}}) + self.assertIn( + dedent(""" + **Config schema**: + **prop1:** (array of integer) prop-description + + **Examples**:: + + ex1: + [don't, expand, "this"] + # --- Example2 --- + ex2: true + """), + get_schema_doc(full_schema)) + + def test_get_schema_doc_properly_parse_description(self): + """get_schema_doc description properly formatted""" + full_schema = copy(self.required_schema) + full_schema.update( + {'properties': { + 'p1': { + 'type': 'string', + 'description': dedent("""\ + This item + has the + following options: + + - option1 + - option2 + - option3 + + The default value is + option1""") + } + }} + ) + + self.assertIn( + dedent(""" + **Config schema**: + **p1:** (string) This item has the following options: + + - option1 + - option2 + - option3 + + The default value is option1 + """), + get_schema_doc(full_schema)) + + def test_get_schema_doc_raises_key_errors(self): + """get_schema_doc raises KeyErrors on missing keys.""" + for key in self.required_schema: + invalid_schema = copy(self.required_schema) + invalid_schema.pop(key) + with self.assertRaises(KeyError) as context_mgr: + get_schema_doc(invalid_schema) + self.assertIn(key, str(context_mgr.exception)) + + +class AnnotatedCloudconfigFileTest(CiTestCase): + maxDiff = None + + def test_annotated_cloudconfig_file_no_schema_errors(self): + """With no schema_errors, print the original content.""" + content = b'ntp:\n pools: [ntp1.pools.com]\n' + self.assertEqual( + content, + annotated_cloudconfig_file({}, content, schema_errors=[])) + + def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self): + """With schema_errors, error lines are annotated and a footer added.""" + content = dedent("""\ + #cloud-config + # comment + ntp: + pools: [-99, 75] + """).encode() + expected = dedent("""\ + #cloud-config + # comment + ntp: # E1 + pools: [-99, 75] # E2,E3 + + # Errors: ------------- + # E1: Some type error + # E2: -99 is not a string + # E3: 75 is not a string + + """) + parsed_config = safe_load(content[13:]) + schema_errors = [ + ('ntp', 'Some type error'), ('ntp.pools.0', '-99 is not a string'), + ('ntp.pools.1', '75 is not a string')] + self.assertEqual( + expected, + annotated_cloudconfig_file(parsed_config, content, schema_errors)) + + def test_annotated_cloudconfig_file_annotates_separate_line_items(self): + """Errors are annotated for lists with items on separate lines.""" + content = dedent("""\ + #cloud-config + # comment + ntp: + pools: + - -99 + - 75 + """).encode() + expected = dedent("""\ + ntp: + pools: + - -99 # E1 + - 75 # E2 + """) + parsed_config = safe_load(content[13:]) + schema_errors = [ + ('ntp.pools.0', '-99 is not a string'), + ('ntp.pools.1', '75 is not a string')] + self.assertIn( + expected, + annotated_cloudconfig_file(parsed_config, content, schema_errors)) + + +class TestMain: + + exclusive_combinations = itertools.combinations( + ["--system", "--docs all", "--config-file something"], 2 + ) + + @pytest.mark.parametrize("params", exclusive_combinations) + def test_main_exclusive_args(self, params, capsys): + """Main exits non-zero and error on required exclusive args.""" + params = list(itertools.chain(*[a.split() for a in params])) + with mock.patch('sys.argv', ['mycmd'] + params): + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + + _out, err = capsys.readouterr() + expected = ( + 'Expected one of --config-file, --system or --docs arguments\n' + ) + assert expected == err + + def test_main_missing_args(self, capsys): + """Main exits non-zero and reports an error on missing parameters.""" + with mock.patch('sys.argv', ['mycmd']): + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + + _out, err = capsys.readouterr() + expected = ( + 'Expected one of --config-file, --system or --docs arguments\n' + ) + assert expected == err + + def test_main_absent_config_file(self, capsys): + """Main exits non-zero when config file is absent.""" + myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE'] + with mock.patch('sys.argv', myargs): + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + _out, err = capsys.readouterr() + assert 'Configfile NOT_A_FILE does not exist\n' == err + + def test_main_prints_docs(self, capsys): + """When --docs parameter is provided, main generates documentation.""" + myargs = ['mycmd', '--docs', 'all'] + with mock.patch('sys.argv', myargs): + assert 0 == main(), 'Expected 0 exit code' + out, _err = capsys.readouterr() + assert '\nNTP\n---\n' in out + assert '\nRuncmd\n------\n' in out + + def test_main_validates_config_file(self, tmpdir, capsys): + """When --config-file parameter is provided, main validates schema.""" + myyaml = tmpdir.join('my.yaml') + myargs = ['mycmd', '--config-file', myyaml.strpath] + myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema + with mock.patch('sys.argv', myargs): + assert 0 == main(), 'Expected 0 exit code' + out, _err = capsys.readouterr() + assert 'Valid cloud-config: {0}\n'.format(myyaml) == out + + @mock.patch('cloudinit.config.schema.read_cfg_paths') + @mock.patch('cloudinit.config.schema.os.getuid', return_value=0) + def test_main_validates_system_userdata( + self, m_getuid, m_read_cfg_paths, capsys, paths + ): + """When --system is provided, main validates system userdata.""" + m_read_cfg_paths.return_value = paths + ud_file = paths.get_ipath_cur("userdata_raw") + write_file(ud_file, b'#cloud-config\nntp:') + myargs = ['mycmd', '--system'] + with mock.patch('sys.argv', myargs): + assert 0 == main(), 'Expected 0 exit code' + out, _err = capsys.readouterr() + assert 'Valid cloud-config: system userdata\n' == out + + @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000) + def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths): + """Non-root user can't use --system param""" + myargs = ['mycmd', '--system'] + with mock.patch('sys.argv', myargs): + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + _out, err = capsys.readouterr() + expected = ( + 'Unable to read system userdata as non-root user. Try using sudo\n' + ) + assert expected == err + + +def _get_schema_doc_examples(): + examples_dir = Path( + cloudinit.__file__).parent.parent / 'doc' / 'examples' + assert examples_dir.is_dir() + + all_text_files = (f for f in examples_dir.glob('cloud-config*.txt') + if not f.name.startswith('cloud-config-archive')) + return all_text_files + + +class TestSchemaDocExamples: + schema = get_schema() + + @pytest.mark.parametrize("example_path", _get_schema_doc_examples()) + @skipUnlessJsonSchema() + def test_schema_doc_examples(self, example_path): + validate_cloudconfig_file(str(example_path), self.schema) + +# vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/distros/__init__.py b/tests/unittests/distros/__init__.py new file mode 100644 index 00000000..5394aa56 --- /dev/null +++ b/tests/unittests/distros/__init__.py @@ -0,0 +1,21 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import copy + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import settings + + +def _get_distro(dtype, system_info=None): + """Return a Distro class of distro 'dtype'. + + cfg is format of CFG_BUILTIN['system_info']. + + example: _get_distro("debian") + """ + if system_info is None: + system_info = copy.deepcopy(settings.CFG_BUILTIN['system_info']) + system_info['distro'] = dtype + paths = helpers.Paths(system_info['paths']) + distro_cls = distros.fetch(dtype) + return distro_cls(dtype, system_info, paths) diff --git a/tests/unittests/distros/test_arch.py b/tests/unittests/distros/test_arch.py new file mode 100644 index 00000000..590ba00e --- /dev/null +++ b/tests/unittests/distros/test_arch.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros.arch import _render_network +from cloudinit import util + +from tests.unittests.helpers import (CiTestCase, dir2dict) + +from . import _get_distro + + +class TestArch(CiTestCase): + + def test_get_distro(self): + distro = _get_distro("arch") + hostname = "myhostname" + hostfile = self.tmp_path("hostfile") + distro._write_hostname(hostname, hostfile) + self.assertEqual(hostname + "\n", util.load_file(hostfile)) + + +class TestRenderNetwork(CiTestCase): + def test_basic_static(self): + """Just the most basic static config. + + note 'lo' should not be rendered as an interface.""" + entries = {'eth0': {'auto': True, + 'dns-nameservers': ['8.8.8.8'], + 'bootproto': 'static', + 'address': '10.0.0.2', + 'gateway': '10.0.0.1', + 'netmask': '255.255.255.0'}, + 'lo': {'auto': True}} + target = self.tmp_dir() + devs = _render_network(entries, target=target) + files = dir2dict(target, prefix=target) + self.assertEqual(['eth0'], devs) + self.assertEqual( + {'/etc/netctl/eth0': '\n'.join([ + "Address=10.0.0.2/255.255.255.0", + "Connection=ethernet", + "DNS=('8.8.8.8')", + "Gateway=10.0.0.1", + "IP=static", + "Interface=eth0", ""]), + '/etc/resolv.conf': 'nameserver 8.8.8.8\n'}, files) diff --git a/tests/unittests/distros/test_bsd_utils.py b/tests/unittests/distros/test_bsd_utils.py new file mode 100644 index 00000000..55686dc9 --- /dev/null +++ b/tests/unittests/distros/test_bsd_utils.py @@ -0,0 +1,67 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import cloudinit.distros.bsd_utils as bsd_utils + +from tests.unittests.helpers import (CiTestCase, ExitStack, mock) + +RC_FILE = """ +if something; then + do something here +fi +hostname={hostname} +""" + + +class TestBsdUtils(CiTestCase): + + def setUp(self): + super().setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.load_file = patches.enter_context( + mock.patch.object(bsd_utils.util, 'load_file')) + + self.write_file = patches.enter_context( + mock.patch.object(bsd_utils.util, 'write_file')) + + def test_get_rc_config_value(self): + self.load_file.return_value = 'hostname=foo\n' + self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') + self.load_file.assert_called_with('/etc/rc.conf') + + self.load_file.return_value = 'hostname=foo' + self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') + + self.load_file.return_value = 'hostname="foo"' + self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') + + self.load_file.return_value = "hostname='foo'" + self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') + + self.load_file.return_value = 'hostname=\'foo"' + self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"") + + self.load_file.return_value = '' + self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None) + + self.load_file.return_value = RC_FILE.format(hostname='foo') + self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo") + + def test_set_rc_config_value_unchanged(self): + # bsd_utils.set_rc_config_value('hostname', 'foo') + # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') + + self.load_file.return_value = RC_FILE.format(hostname='foo') + self.write_file.assert_not_called() + + def test_set_rc_config_value(self): + bsd_utils.set_rc_config_value('hostname', 'foo') + self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') + + self.load_file.return_value = RC_FILE.format(hostname='foo') + bsd_utils.set_rc_config_value('hostname', 'bar') + self.write_file.assert_called_with( + '/etc/rc.conf', + RC_FILE.format(hostname='bar') + ) diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py new file mode 100644 index 00000000..5baa8a4b --- /dev/null +++ b/tests/unittests/distros/test_create_users.py @@ -0,0 +1,236 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re + +from cloudinit import distros +from cloudinit import ssh_util +from tests.unittests.helpers import (CiTestCase, mock) +from tests.unittests.util import abstract_to_concrete + + +@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False) +@mock.patch("cloudinit.distros.subp.subp") +class TestCreateUser(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestCreateUser, self).setUp() + self.dist = abstract_to_concrete(distros.Distro)( + name='test', cfg=None, paths=None + ) + + def _useradd2call(self, args): + # return a mock call for the useradd command in args + # with expected 'logstring'. + args = ['useradd'] + args + logcmd = [a for a in args] + for i in range(len(args)): + if args[i] in ('--password',): + logcmd[i + 1] = 'REDACTED' + return mock.call(args, logstring=logcmd) + + def test_basic(self, m_subp, m_is_snappy): + user = 'foouser' + self.dist.create_user(user) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + + def test_no_home(self, m_subp, m_is_snappy): + user = 'foouser' + self.dist.create_user(user, no_create_home=True) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-M']), + mock.call(['passwd', '-l', user])]) + + def test_system_user(self, m_subp, m_is_snappy): + # system user should have no home and get --system + user = 'foouser' + self.dist.create_user(user, system=True) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '--system', '-M']), + mock.call(['passwd', '-l', user])]) + + def test_explicit_no_home_false(self, m_subp, m_is_snappy): + user = 'foouser' + self.dist.create_user(user, no_create_home=False) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + + def test_unlocked(self, m_subp, m_is_snappy): + user = 'foouser' + self.dist.create_user(user, lock_passwd=False) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m'])]) + + def test_set_password(self, m_subp, m_is_snappy): + user = 'foouser' + password = 'passfoo' + self.dist.create_user(user, passwd=password) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '--password', password, '-m']), + mock.call(['passwd', '-l', user])]) + + @mock.patch("cloudinit.distros.util.is_group") + def test_group_added(self, m_is_group, m_subp, m_is_snappy): + m_is_group.return_value = False + user = 'foouser' + self.dist.create_user(user, groups=['group1']) + expected = [ + mock.call(['groupadd', 'group1']), + self._useradd2call([user, '--groups', 'group1', '-m']), + mock.call(['passwd', '-l', user])] + self.assertEqual(m_subp.call_args_list, expected) + + @mock.patch("cloudinit.distros.util.is_group") + def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy): + ex_groups = ['existing_group'] + groups = ['group1', ex_groups[0]] + m_is_group.side_effect = lambda m: m in ex_groups + user = 'foouser' + self.dist.create_user(user, groups=groups) + expected = [ + mock.call(['groupadd', 'group1']), + self._useradd2call([user, '--groups', ','.join(groups), '-m']), + mock.call(['passwd', '-l', user])] + self.assertEqual(m_subp.call_args_list, expected) + + @mock.patch("cloudinit.distros.util.is_group") + def test_create_groups_with_whitespace_string( + self, m_is_group, m_subp, m_is_snappy): + # groups supported as a comma delimeted string even with white space + m_is_group.return_value = False + user = 'foouser' + self.dist.create_user(user, groups='group1, group2') + expected = [ + mock.call(['groupadd', 'group1']), + mock.call(['groupadd', 'group2']), + self._useradd2call([user, '--groups', 'group1,group2', '-m']), + mock.call(['passwd', '-l', user])] + self.assertEqual(m_subp.call_args_list, expected) + + def test_explicit_sudo_false(self, m_subp, m_is_snappy): + user = 'foouser' + self.dist.create_user(user, sudo=False) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_string( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys allows string and calls setup_user_keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys='mykey') + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + m_setup_user_keys.assert_called_once_with(set(['mykey']), user) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_list( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys allows lists and calls setup_user_keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2']) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_setup_ssh_authorized_keys_with_integer( + self, m_setup_user_keys, m_subp, m_is_snappy): + """ssh_authorized_keys warns on non-iterable/string type.""" + user = 'foouser' + self.dist.create_user(user, ssh_authorized_keys=-1) + m_setup_user_keys.assert_called_once_with(set([]), user) + match = re.match( + r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for' + ' \'ssh_authorized_keys\'.*', + self.logs.getvalue(), + re.DOTALL) + self.assertIsNotNone( + match, 'Missing ssh_authorized_keys invalid type warning') + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_no_cloud_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Log a warning when trying to redirect a user no cloud ssh keys.""" + user = 'foouser' + self.dist.create_user(user, ssh_redirect_user='someuser') + self.assertIn( + 'WARNING: Unable to disable SSH logins for foouser given ' + 'ssh_redirect_user: someuser. No cloud public-keys present.\n', + self.logs.getvalue()) + m_setup_user_keys.assert_not_called() + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_with_cloud_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Disable ssh when ssh_redirect_user and cloud ssh keys are set.""" + user = 'foouser' + self.dist.create_user( + user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1']) + disable_prefix = ssh_util.DISABLE_USER_OPTS + disable_prefix = disable_prefix.replace('$USER', 'someuser') + disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + m_setup_user_keys.assert_called_once_with( + set(['key1']), 'foouser', options=disable_prefix) + + @mock.patch('cloudinit.ssh_util.setup_user_keys') + def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys( + self, m_setup_user_keys, m_subp, m_is_snappy): + """Do not disable ssh_authorized_keys when ssh_redirect_user is set.""" + user = 'foouser' + self.dist.create_user( + user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser', + cloud_public_ssh_keys=['key1']) + disable_prefix = ssh_util.DISABLE_USER_OPTS + disable_prefix = disable_prefix.replace('$USER', 'someuser') + disable_prefix = disable_prefix.replace('$DISABLE_USER', user) + self.assertEqual( + m_setup_user_keys.call_args_list, + [mock.call(set(['auth1']), user), # not disabled + mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + + @mock.patch("cloudinit.distros.subp.which") + def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, + m_is_snappy): + """Lock uses usermod --lock if no 'passwd' cmd available.""" + m_which.side_effect = lambda m: m in ('usermod',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['usermod', '--lock', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.subp.which") + def test_lock_with_passwd_if_available(self, m_which, m_subp, + m_is_snappy): + """Lock with only passwd will use passwd.""" + m_which.side_effect = lambda m: m in ('passwd',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['passwd', '-l', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.subp.which") + def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, + m_is_snappy): + """Lock with no commands available raises RuntimeError.""" + m_which.return_value = None + with self.assertRaises(RuntimeError): + self.dist.lock_passwd("bob") + +# vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_debian.py b/tests/unittests/distros/test_debian.py new file mode 100644 index 00000000..3d0db145 --- /dev/null +++ b/tests/unittests/distros/test_debian.py @@ -0,0 +1,174 @@ +# This file is part of cloud-init. See LICENSE file for license information. +from itertools import count, cycle +from unittest import mock + +import pytest + +from cloudinit import distros, util +from cloudinit.distros.debian import ( + APT_GET_COMMAND, + APT_GET_WRAPPER, +) +from tests.unittests.helpers import FilesystemMockingTestCase +from cloudinit import subp + + +@mock.patch("cloudinit.distros.debian.subp.subp") +class TestDebianApplyLocale(FilesystemMockingTestCase): + + def setUp(self): + super(TestDebianApplyLocale, self).setUp() + self.new_root = self.tmp_dir() + self.patchOS(self.new_root) + self.patchUtils(self.new_root) + self.spath = self.tmp_path('etc/default/locale', self.new_root) + cls = distros.fetch("debian") + self.distro = cls("debian", {}, None) + + def test_no_rerun(self, m_subp): + """If system has defined locale, no re-run is expected.""" + m_subp.return_value = (None, None) + locale = 'en_US.UTF-8' + util.write_file(self.spath, 'LANG=%s\n' % locale, omode="w") + self.distro.apply_locale(locale, out_fn=self.spath) + m_subp.assert_not_called() + + def test_no_regen_on_c_utf8(self, m_subp): + """If locale is set to C.UTF8, do not attempt to call locale-gen""" + m_subp.return_value = (None, None) + locale = 'C.UTF-8' + util.write_file(self.spath, 'LANG=%s\n' % 'en_US.UTF-8', omode="w") + self.distro.apply_locale(locale, out_fn=self.spath) + self.assertEqual( + [['update-locale', '--locale-file=' + self.spath, + 'LANG=%s' % locale]], + [p[0][0] for p in m_subp.call_args_list]) + + def test_rerun_if_different(self, m_subp): + """If system has different locale, locale-gen should be called.""" + m_subp.return_value = (None, None) + locale = 'en_US.UTF-8' + util.write_file(self.spath, 'LANG=fr_FR.UTF-8', omode="w") + self.distro.apply_locale(locale, out_fn=self.spath) + self.assertEqual( + [['locale-gen', locale], + ['update-locale', '--locale-file=' + self.spath, + 'LANG=%s' % locale]], + [p[0][0] for p in m_subp.call_args_list]) + + def test_rerun_if_no_file(self, m_subp): + """If system has no locale file, locale-gen should be called.""" + m_subp.return_value = (None, None) + locale = 'en_US.UTF-8' + self.distro.apply_locale(locale, out_fn=self.spath) + self.assertEqual( + [['locale-gen', locale], + ['update-locale', '--locale-file=' + self.spath, + 'LANG=%s' % locale]], + [p[0][0] for p in m_subp.call_args_list]) + + def test_rerun_on_unset_system_locale(self, m_subp): + """If system has unset locale, locale-gen should be called.""" + m_subp.return_value = (None, None) + locale = 'en_US.UTF-8' + util.write_file(self.spath, 'LANG=', omode="w") + self.distro.apply_locale(locale, out_fn=self.spath) + self.assertEqual( + [['locale-gen', locale], + ['update-locale', '--locale-file=' + self.spath, + 'LANG=%s' % locale]], + [p[0][0] for p in m_subp.call_args_list]) + + def test_rerun_on_mismatched_keys(self, m_subp): + """If key is LC_ALL and system has only LANG, rerun is expected.""" + m_subp.return_value = (None, None) + locale = 'en_US.UTF-8' + util.write_file(self.spath, 'LANG=', omode="w") + self.distro.apply_locale(locale, out_fn=self.spath, keyname='LC_ALL') + self.assertEqual( + [['locale-gen', locale], + ['update-locale', '--locale-file=' + self.spath, + 'LC_ALL=%s' % locale]], + [p[0][0] for p in m_subp.call_args_list]) + + def test_falseish_locale_raises_valueerror(self, m_subp): + """locale as None or "" is invalid and should raise ValueError.""" + + with self.assertRaises(ValueError) as ctext_m: + self.distro.apply_locale(None) + m_subp.assert_not_called() + + self.assertEqual( + 'Failed to provide locale value.', str(ctext_m.exception)) + + with self.assertRaises(ValueError) as ctext_m: + self.distro.apply_locale("") + m_subp.assert_not_called() + self.assertEqual( + 'Failed to provide locale value.', str(ctext_m.exception)) + + +@mock.patch.dict('os.environ', {}, clear=True) +@mock.patch("cloudinit.distros.debian.subp.which", return_value=True) +@mock.patch("cloudinit.distros.debian.subp.subp") +class TestPackageCommand: + distro = distros.fetch("debian")("debian", {}, None) + + @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", + return_value=True) + def test_simple_command(self, m_apt_avail, m_subp, m_which): + self.distro.package_command('update') + apt_args = [APT_GET_WRAPPER['command']] + apt_args.extend(APT_GET_COMMAND) + apt_args.append('update') + expected_call = { + 'args': apt_args, + 'capture': False, + 'env': {'DEBIAN_FRONTEND': 'noninteractive'}, + } + assert m_subp.call_args == mock.call(**expected_call) + + @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", + side_effect=[False, False, True]) + @mock.patch("cloudinit.distros.debian.time.sleep") + def test_wait_for_lock(self, m_sleep, m_apt_avail, m_subp, m_which): + self.distro._wait_for_apt_command("stub", {"args": "stub2"}) + assert m_sleep.call_args_list == [mock.call(1), mock.call(1)] + assert m_subp.call_args_list == [mock.call(args='stub2')] + + @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", + return_value=False) + @mock.patch("cloudinit.distros.debian.time.sleep") + @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) + def test_lock_wait_timeout( + self, m_time, m_sleep, m_apt_avail, m_subp, m_which + ): + with pytest.raises(TimeoutError): + self.distro._wait_for_apt_command("stub", "stub2", timeout=5) + assert m_subp.call_args_list == [] + + @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", + side_effect=cycle([True, False])) + @mock.patch("cloudinit.distros.debian.time.sleep") + def test_lock_exception_wait(self, m_sleep, m_apt_avail, m_subp, m_which): + exception = subp.ProcessExecutionError( + exit_code=100, stderr="Could not get apt lock" + ) + m_subp.side_effect = [exception, exception, "return_thing"] + ret = self.distro._wait_for_apt_command("stub", {"args": "stub2"}) + assert ret == "return_thing" + + @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", + side_effect=cycle([True, False])) + @mock.patch("cloudinit.distros.debian.time.sleep") + @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) + def test_lock_exception_timeout( + self, m_time, m_sleep, m_apt_avail, m_subp, m_which + ): + m_subp.side_effect = subp.ProcessExecutionError( + exit_code=100, stderr="Could not get apt lock" + ) + with pytest.raises(TimeoutError): + self.distro._wait_for_apt_command( + "stub", {"args": "stub2"}, timeout=5 + ) diff --git a/tests/unittests/distros/test_dragonflybsd.py b/tests/unittests/distros/test_dragonflybsd.py new file mode 100644 index 00000000..f0cd1b24 --- /dev/null +++ b/tests/unittests/distros/test_dragonflybsd.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 + + +import cloudinit.util +from tests.unittests.helpers import mock + + +def test_find_dragonflybsd_part(): + assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3" + + +@mock.patch("cloudinit.util.is_DragonFlyBSD") +@mock.patch("cloudinit.subp.subp") +def test_parse_mount(mock_subp, m_is_DragonFlyBSD): + mount_out = """ +vbd0s3 on / (hammer2, local) +devfs on /dev (devfs, nosymfollow, local) +/dev/vbd0s0a on /boot (ufs, local) +procfs on /proc (procfs, local) +tmpfs on /var/run/shm (tmpfs, local) +""" + + mock_subp.return_value = (mount_out, "") + m_is_DragonFlyBSD.return_value = True + assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/") diff --git a/tests/unittests/distros/test_freebsd.py b/tests/unittests/distros/test_freebsd.py new file mode 100644 index 00000000..0279e86f --- /dev/null +++ b/tests/unittests/distros/test_freebsd.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) +from tests.unittests.helpers import (CiTestCase, mock) + +import os + + +class TestDeviceLookUp(CiTestCase): + + @mock.patch('cloudinit.subp.subp') + def test_find_freebsd_part_label(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + @mock.patch('cloudinit.subp.subp') + def test_find_freebsd_part_gpt(self, mock_subp): + glabel_out = ''' + gpt/bootfs N/A vtbd0p1 +gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 + gpt/swapfs N/A vtbd0p2 + gpt/rootfs N/A vtbd0p3 + iso9660/cidata N/A vtbd2 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/gpt/rootfs") + self.assertEqual("vtbd0p3", res) + + def test_get_path_dev_freebsd_label(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertIsNotNone(res) diff --git a/tests/unittests/distros/test_generic.py b/tests/unittests/distros/test_generic.py new file mode 100644 index 00000000..e542c26f --- /dev/null +++ b/tests/unittests/distros/test_generic.py @@ -0,0 +1,315 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import distros +from cloudinit import util + +from tests.unittests import helpers + +import os +import pytest +import shutil +import tempfile +from unittest import mock + +unknown_arch_info = { + 'arches': ['default'], + 'failsafe': {'primary': 'http://fs-primary-default', + 'security': 'http://fs-security-default'} +} + +package_mirrors = [ + {'arches': ['i386', 'amd64'], + 'failsafe': {'primary': 'http://fs-primary-intel', + 'security': 'http://fs-security-intel'}, + 'search': { + 'primary': ['http://%(ec2_region)s.ec2/', + 'http://%(availability_zone)s.clouds/'], + 'security': ['http://security-mirror1-intel', + 'http://security-mirror2-intel']}}, + {'arches': ['armhf', 'armel'], + 'failsafe': {'primary': 'http://fs-primary-arm', + 'security': 'http://fs-security-arm'}}, + unknown_arch_info +] + +gpmi = distros._get_package_mirror_info +gapmi = distros._get_arch_package_mirror_info + + +class TestGenericDistro(helpers.FilesystemMockingTestCase): + + def setUp(self): + super(TestGenericDistro, self).setUp() + # Make a temp directoy for tests to use. + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + def _write_load_sudoers(self, _user, rules): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + os.makedirs(os.path.join(self.tmp, "etc")) + os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d')) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + d.write_sudo_rules("harlowja", rules) + contents = util.load_file(d.ci_sudoers_fn) + return contents + + def _count_in(self, lines_look_for, text_content): + found_amount = 0 + for e in lines_look_for: + for line in text_content.splitlines(): + line = line.strip() + if line == e: + found_amount += 1 + return found_amount + + def test_sudoers_ensure_rules(self): + rules = 'ALL=(ALL:ALL) ALL' + contents = self._write_load_sudoers('harlowja', rules) + expected = ['harlowja ALL=(ALL:ALL) ALL'] + self.assertEqual(len(expected), self._count_in(expected, contents)) + not_expected = [ + 'harlowja A', + 'harlowja L', + 'harlowja L', + ] + self.assertEqual(0, self._count_in(not_expected, contents)) + + def test_sudoers_ensure_rules_list(self): + rules = [ + 'ALL=(ALL:ALL) ALL', + 'B-ALL=(ALL:ALL) ALL', + 'C-ALL=(ALL:ALL) ALL', + ] + contents = self._write_load_sudoers('harlowja', rules) + expected = [ + 'harlowja ALL=(ALL:ALL) ALL', + 'harlowja B-ALL=(ALL:ALL) ALL', + 'harlowja C-ALL=(ALL:ALL) ALL', + ] + self.assertEqual(len(expected), self._count_in(expected, contents)) + not_expected = [ + 'harlowja A', + 'harlowja L', + 'harlowja L', + ] + self.assertEqual(0, self._count_in(not_expected, contents)) + + def test_sudoers_ensure_new(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + + def test_sudoers_ensure_append(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + util.write_file("/etc/sudoers", "josh, josh\n") + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + self.assertIn("josh", contents) + self.assertEqual(2, contents.count("josh")) + + def test_sudoers_ensure_only_one_includedir(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + for char in ['#', '@']: + util.write_file("/etc/sudoers", "{}includedir /b".format(char)) + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + self.assertEqual(1, contents.count("includedir /b")) + + def test_arch_package_mirror_info_unknown(self): + """for an unknown arch, we should get back that with arch 'default'.""" + arch_mirrors = gapmi(package_mirrors, arch="unknown") + self.assertEqual(unknown_arch_info, arch_mirrors) + + def test_arch_package_mirror_info_known(self): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + self.assertEqual(package_mirrors[0], arch_mirrors) + + def test_systemd_in_use(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + os.makedirs('/run/systemd/system') + self.assertTrue(d.uses_systemd()) + + def test_systemd_not_in_use(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + self.assertFalse(d.uses_systemd()) + + def test_systemd_symlink(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + os.makedirs('/run/systemd') + os.symlink('/', '/run/systemd/system') + self.assertFalse(d.uses_systemd()) + + @mock.patch('cloudinit.distros.debian.read_system_locale') + def test_get_locale_ubuntu(self, m_locale): + """Test ubuntu distro returns locale set to C.UTF-8""" + m_locale.return_value = 'C.UTF-8' + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + locale = d.get_locale() + self.assertEqual('C.UTF-8', locale) + + def test_get_locale_rhel(self): + """Test rhel distro returns NotImplementedError exception""" + cls = distros.fetch("rhel") + d = cls("rhel", {}, None) + with self.assertRaises(NotImplementedError): + d.get_locale() + + def test_expire_passwd_uses_chpasswd(self): + """Test ubuntu.expire_passwd uses the passwd command.""" + for d_name in ("ubuntu", "rhel"): + cls = distros.fetch(d_name) + d = cls(d_name, {}, None) + with mock.patch("cloudinit.subp.subp") as m_subp: + d.expire_passwd("myuser") + m_subp.assert_called_once_with(["passwd", "--expire", "myuser"]) + + def test_expire_passwd_freebsd_uses_pw_command(self): + """Test FreeBSD.expire_passwd uses the pw command.""" + cls = distros.fetch("freebsd") + d = cls("freebsd", {}, None) + with mock.patch("cloudinit.subp.subp") as m_subp: + d.expire_passwd("myuser") + m_subp.assert_called_once_with( + ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]) + + +class TestGetPackageMirrors: + + def return_first(self, mlist): + if not mlist: + return None + return mlist[0] + + def return_second(self, mlist): + if not mlist: + return None + + return mlist[1] if len(mlist) > 1 else None + + def return_none(self, _mlist): + return None + + def return_last(self, mlist): + if not mlist: + return None + return(mlist[-1]) + + @pytest.mark.parametrize( + "allow_ec2_mirror, platform_type, mirrors", + [ + (True, "ec2", [ + {'primary': 'http://us-east-1.ec2/', + 'security': 'http://security-mirror1-intel'}, + {'primary': 'http://us-east-1a.clouds/', + 'security': 'http://security-mirror2-intel'} + ]), + (True, "other", [ + {'primary': 'http://us-east-1.ec2/', + 'security': 'http://security-mirror1-intel'}, + {'primary': 'http://us-east-1a.clouds/', + 'security': 'http://security-mirror2-intel'} + ]), + (False, "ec2", [ + {'primary': 'http://us-east-1.ec2/', + 'security': 'http://security-mirror1-intel'}, + {'primary': 'http://us-east-1a.clouds/', + 'security': 'http://security-mirror2-intel'} + ]), + (False, "other", [ + {'primary': 'http://us-east-1a.clouds/', + 'security': 'http://security-mirror1-intel'}, + {'primary': 'http://fs-primary-intel', + 'security': 'http://security-mirror2-intel'} + ]) + ]) + def test_get_package_mirror_info_az_ec2(self, + allow_ec2_mirror, + platform_type, + mirrors): + flag_path = "cloudinit.distros." \ + "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" + with mock.patch(flag_path, allow_ec2_mirror): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock( + availability_zone="us-east-1a", + platform_type=platform_type) + + results = gpmi(arch_mirrors, data_source=data_source_mock, + mirror_filter=self.return_first) + assert(results == mirrors[0]) + + results = gpmi(arch_mirrors, data_source=data_source_mock, + mirror_filter=self.return_second) + assert(results == mirrors[1]) + + results = gpmi(arch_mirrors, data_source=data_source_mock, + mirror_filter=self.return_none) + assert(results == package_mirrors[0]['failsafe']) + + def test_get_package_mirror_info_az_non_ec2(self): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone="nova.cloudvendor") + + results = gpmi(arch_mirrors, data_source=data_source_mock, + mirror_filter=self.return_first) + assert(results == { + 'primary': 'http://nova.cloudvendor.clouds/', + 'security': 'http://security-mirror1-intel'} + ) + + results = gpmi(arch_mirrors, data_source=data_source_mock, + mirror_filter=self.return_last) + assert(results == { + 'primary': 'http://nova.cloudvendor.clouds/', + 'security': 'http://security-mirror2-intel'} + ) + + def test_get_package_mirror_info_none(self): + arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone=None) + + # because both search entries here replacement based on + # availability-zone, the filter will be called with an empty list and + # failsafe should be taken. + results = gpmi(arch_mirrors, data_source=data_source_mock, + mirror_filter=self.return_first) + assert(results == { + 'primary': 'http://fs-primary-intel', + 'security': 'http://security-mirror1-intel'} + ) + + results = gpmi(arch_mirrors, data_source=data_source_mock, + mirror_filter=self.return_last) + assert(results == { + 'primary': 'http://fs-primary-intel', + 'security': 'http://security-mirror2-intel'} + ) + +# vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_gentoo.py b/tests/unittests/distros/test_gentoo.py new file mode 100644 index 00000000..4e4680b8 --- /dev/null +++ b/tests/unittests/distros/test_gentoo.py @@ -0,0 +1,26 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit import atomic_helper +from tests.unittests.helpers import CiTestCase +from . import _get_distro + + +class TestGentoo(CiTestCase): + + def test_write_hostname(self): + distro = _get_distro("gentoo") + hostname = "myhostname" + hostfile = self.tmp_path("hostfile") + distro._write_hostname(hostname, hostfile) + self.assertEqual('hostname="myhostname"\n', util.load_file(hostfile)) + + def test_write_existing_hostname_with_comments(self): + distro = _get_distro("gentoo") + hostname = "myhostname" + contents = '#This is the hostname\nhostname="localhost"' + hostfile = self.tmp_path("hostfile") + atomic_helper.write_file(hostfile, contents, omode="w") + distro._write_hostname(hostname, hostfile) + self.assertEqual('#This is the hostname\nhostname="myhostname"\n', + util.load_file(hostfile)) diff --git a/tests/unittests/distros/test_hostname.py b/tests/unittests/distros/test_hostname.py new file mode 100644 index 00000000..f6d4dbe5 --- /dev/null +++ b/tests/unittests/distros/test_hostname.py @@ -0,0 +1,42 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import unittest + +from cloudinit.distros.parsers import hostname + + +BASE_HOSTNAME = ''' +# My super-duper-hostname + +blahblah + +''' +BASE_HOSTNAME = BASE_HOSTNAME.strip() + + +class TestHostnameHelper(unittest.TestCase): + def test_parse_same(self): + hn = hostname.HostnameConf(BASE_HOSTNAME) + self.assertEqual(str(hn).strip(), BASE_HOSTNAME) + self.assertEqual(hn.hostname, 'blahblah') + + def test_no_adjust_hostname(self): + hn = hostname.HostnameConf(BASE_HOSTNAME) + prev_name = hn.hostname + hn.set_hostname("") + self.assertEqual(hn.hostname, prev_name) + + def test_adjust_hostname(self): + hn = hostname.HostnameConf(BASE_HOSTNAME) + prev_name = hn.hostname + self.assertEqual(prev_name, 'blahblah') + hn.set_hostname("bbbbd") + self.assertEqual(hn.hostname, 'bbbbd') + expected_out = ''' +# My super-duper-hostname + +bbbbd +''' + self.assertEqual(str(hn).strip(), expected_out.strip()) + +# vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_hosts.py b/tests/unittests/distros/test_hosts.py new file mode 100644 index 00000000..8aaa6e48 --- /dev/null +++ b/tests/unittests/distros/test_hosts.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import unittest + +from cloudinit.distros.parsers import hosts + + +BASE_ETC = ''' +# Example +127.0.0.1 localhost +192.168.1.10 foo.mydomain.org foo +192.168.1.10 bar.mydomain.org bar +146.82.138.7 master.debian.org master +209.237.226.90 www.opensource.org +''' +BASE_ETC = BASE_ETC.strip() + + +class TestHostsHelper(unittest.TestCase): + def test_parse(self): + eh = hosts.HostsConf(BASE_ETC) + self.assertEqual(eh.get_entry('127.0.0.1'), [['localhost']]) + self.assertEqual(eh.get_entry('192.168.1.10'), + [['foo.mydomain.org', 'foo'], + ['bar.mydomain.org', 'bar']]) + eh = str(eh) + self.assertTrue(eh.startswith('# Example')) + + def test_add(self): + eh = hosts.HostsConf(BASE_ETC) + eh.add_entry('127.0.0.0', 'blah') + self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) + eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3') + self.assertEqual(eh.get_entry('127.0.0.3'), + [['blah', 'blah2', 'blah3']]) + + def test_del(self): + eh = hosts.HostsConf(BASE_ETC) + eh.add_entry('127.0.0.0', 'blah') + self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) + + eh.del_entries('127.0.0.0') + self.assertEqual(eh.get_entry('127.0.0.0'), []) + +# vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_init.py b/tests/unittests/distros/test_init.py new file mode 100644 index 00000000..fd64a322 --- /dev/null +++ b/tests/unittests/distros/test_init.py @@ -0,0 +1,161 @@ +# Copyright (C) 2020 Canonical Ltd. +# +# Author: Daniel Watkins +# +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests for cloudinit/distros/__init__.py""" + +from unittest import mock + +import pytest + +from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS + +# In newer versions of Python, these characters will be omitted instead +# of substituted because of security concerns. +# See https://bugs.python.org/issue43882 +SECURITY_URL_CHARS = '\n\r\t' + +# Define a set of characters we would expect to be replaced +INVALID_URL_CHARS = [ + chr(x) for x in range(127) + if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS +] +for separator in [":", ".", "/", "#", "?", "@", "[", "]"]: + # Remove from the set characters that either separate hostname parts (":", + # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be + # unable to parse URLs ("[", "]"). + INVALID_URL_CHARS.remove(separator) + + +class TestGetPackageMirrorInfo: + """ + Tests for cloudinit.distros._get_package_mirror_info. + + These supplement the tests in tests/unittests/test_distros/test_generic.py + which are more focused on testing a single production-like configuration. + These tests are more focused on specific aspects of the unit under test. + """ + + @pytest.mark.parametrize('mirror_info,expected', [ + # Empty info gives empty return + ({}, {}), + # failsafe values used if present + ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}}, + {'primary': 'http://value', 'security': 'http://other'}), + # search values used if present + ({'search': {'primary': ['http://value'], + 'security': ['http://other']}}, + {'primary': ['http://value'], 'security': ['http://other']}), + # failsafe values used if search value not present + ({'search': {'primary': ['http://value']}, + 'failsafe': {'security': 'http://other'}}, + {'primary': ['http://value'], 'security': 'http://other'}) + ]) + def test_get_package_mirror_info_failsafe(self, mirror_info, expected): + """ + Test the interaction between search and failsafe inputs + + (This doesn't test the case where the mirror_filter removes all search + options; test_failsafe_used_if_all_search_results_filtered_out covers + that.) + """ + assert expected == _get_package_mirror_info(mirror_info, + mirror_filter=lambda x: x) + + def test_failsafe_used_if_all_search_results_filtered_out(self): + """Test the failsafe option used if all search options eliminated.""" + mirror_info = { + 'search': {'primary': ['http://value']}, + 'failsafe': {'primary': 'http://other'} + } + assert {'primary': 'http://other'} == _get_package_mirror_info( + mirror_info, mirror_filter=lambda x: False) + + @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [ + (True, 'ec2') + ]) + @pytest.mark.parametrize('availability_zone,region,patterns,expected', ( + # Test ec2_region alone + ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'], + ['http://ec2-fk-fake-1/ubuntu']), + # Test availability_zone alone + ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'], + ['http://az-fk-fake-1f/ubuntu']), + # Test region alone + (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'], + ['http://rg-fk-fake-1/ubuntu']), + # Test that ec2_region is not available for non-matching AZs + ('fake-fake-1f', None, + ['http://EC2-%(ec2_region)s/ubuntu', + 'http://AZ-%(availability_zone)s/ubuntu'], + ['http://az-fake-fake-1f/ubuntu']), + # Test that template order maintained + (None, 'fake-region', + ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'], + ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']), + # Test that non-ASCII hostnames are IDNA encoded; + # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" + (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'], + ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']), + # Test that non-ASCII hostnames with a port are IDNA encoded; + # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q" + (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'], + ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']), + # Test that non-ASCII non-hostname parts of URLs are unchanged + (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'], + ['http://www.example.com/ТεЅТ̣/ubuntu']), + # Test that IPv4 addresses are unchanged + (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'], + ['http://192.168.1.1:8080/fk-fake-1/ubuntu']), + # Test that IPv6 addresses are unchanged + (None, 'fk-fake-1', + ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'], + ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']), + # Test that unparseable URLs are filtered out of the mirror list + (None, 'inv[lid', + ['http://%(region)s.in.hostname/should/be/filtered', + 'http://but.not.in.the.path/%(region)s'], + ['http://but.not.in.the.path/inv[lid']), + (None, '-some-region-', + ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'], + ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']), + ) + tuple( + # Dynamically generate a test case for each non-LDH + # (Letters/Digits/Hyphen) ASCII character, testing that it is + # substituted with a hyphen + (None, 'fk{0}fake{0}1'.format(invalid_char), + ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu']) + for invalid_char in INVALID_URL_CHARS + )) + def test_valid_substitution(self, + allow_ec2_mirror, + platform_type, + availability_zone, + region, + patterns, + expected): + """Test substitution works as expected.""" + flag_path = "cloudinit.distros." \ + "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" + + m_data_source = mock.Mock( + availability_zone=availability_zone, + region=region, + platform_type=platform_type + ) + mirror_info = {'search': {'primary': patterns}} + + with mock.patch(flag_path, allow_ec2_mirror): + ret = _get_package_mirror_info( + mirror_info, + data_source=m_data_source, + mirror_filter=lambda x: x + ) + print(allow_ec2_mirror) + print(platform_type) + print(availability_zone) + print(region) + print(patterns) + print(expected) + assert {'primary': expected} == ret diff --git a/tests/unittests/distros/test_manage_service.py b/tests/unittests/distros/test_manage_service.py new file mode 100644 index 00000000..6f1bd0b1 --- /dev/null +++ b/tests/unittests/distros/test_manage_service.py @@ -0,0 +1,38 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.helpers import (CiTestCase, mock) +from tests.unittests.util import MockDistro + + +class TestManageService(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestManageService, self).setUp() + self.dist = MockDistro() + + @mock.patch.object(MockDistro, 'uses_systemd', return_value=False) + @mock.patch("cloudinit.distros.subp.subp") + def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd): + self.dist.init_cmd = ['systemctl'] + self.dist.manage_service('start', 'myssh') + m_subp.assert_called_with(['systemctl', 'start', 'myssh'], + capture=True) + + @mock.patch.object(MockDistro, 'uses_systemd', return_value=False) + @mock.patch("cloudinit.distros.subp.subp") + def test_manage_service_service_initcmd(self, m_subp, m_sysd): + self.dist.init_cmd = ['service'] + self.dist.manage_service('start', 'myssh') + m_subp.assert_called_with(['service', 'myssh', 'start'], capture=True) + + @mock.patch.object(MockDistro, 'uses_systemd', return_value=True) + @mock.patch("cloudinit.distros.subp.subp") + def test_manage_service_systemctl(self, m_subp, m_sysd): + self.dist.init_cmd = ['ignore'] + self.dist.manage_service('start', 'myssh') + m_subp.assert_called_with(['systemctl', 'start', 'myssh'], + capture=True) + +# vi: ts=4 sw=4 expandtab diff --git a/tests/unittests/distros/test_netbsd.py b/tests/unittests/distros/test_netbsd.py new file mode 100644 index 00000000..11a68d2a --- /dev/null +++ b/tests/unittests/distros/test_netbsd.py @@ -0,0 +1,17 @@ +import cloudinit.distros.netbsd + +import pytest +import unittest.mock as mock + + +@pytest.mark.parametrize('with_pkgin', (True, False)) +@mock.patch("cloudinit.distros.netbsd.os") +def test_init(m_os, with_pkgin): + print(with_pkgin) + m_os.path.exists.return_value = with_pkgin + cfg = {} + + distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None) + expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None + assert distro.pkg_cmd_upgrade_prefix == expectation + assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py new file mode 100644 index 00000000..90ac5578 --- /dev/null +++ b/tests/unittests/distros/test_netconfig.py @@ -0,0 +1,916 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os +import re +from io import StringIO +from textwrap import dedent +from unittest import mock + +from cloudinit import distros +from cloudinit.distros.parsers.sys_conf import SysConf +from cloudinit import helpers +from cloudinit import settings +from tests.unittests.helpers import ( + FilesystemMockingTestCase, dir2dict) +from cloudinit import subp +from cloudinit import util +from cloudinit import safeyaml + +BASE_NET_CFG = ''' +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5 + broadcast 192.168.1.0 + gateway 192.168.1.254 + netmask 255.255.255.0 + network 192.168.0.0 + +auto eth1 +iface eth1 inet dhcp +''' + +BASE_NET_CFG_FROM_V2 = ''' +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5/24 + gateway 192.168.1.254 + +auto eth1 +iface eth1 inet dhcp +''' + +BASE_NET_CFG_IPV6 = ''' +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5 + netmask 255.255.255.0 + network 192.168.0.0 + broadcast 192.168.1.0 + gateway 192.168.1.254 + +iface eth0 inet6 static + address 2607:f0d0:1002:0011::2 + netmask 64 + gateway 2607:f0d0:1002:0011::1 + +iface eth1 inet static + address 192.168.1.6 + netmask 255.255.255.0 + network 192.168.0.0 + broadcast 192.168.1.0 + gateway 192.168.1.254 + +iface eth1 inet6 static + address 2607:f0d0:1002:0011::3 + netmask 64 + gateway 2607:f0d0:1002:0011::1 +''' + +V1_NET_CFG = {'config': [{'name': 'eth0', + + 'subnets': [{'address': '192.168.1.5', + 'broadcast': '192.168.1.0', + 'gateway': '192.168.1.254', + 'netmask': '255.255.255.0', + 'type': 'static'}], + 'type': 'physical'}, + {'name': 'eth1', + 'subnets': [{'control': 'auto', 'type': 'dhcp4'}], + 'type': 'physical'}], + 'version': 1} + +V1_NET_CFG_WITH_DUPS = """\ +# same value in interface specific dns and global dns +# should produce single entry in network file +version: 1 +config: + - type: physical + name: eth0 + subnets: + - type: static + address: 192.168.0.102/24 + dns_nameservers: [1.2.3.4] + dns_search: [test.com] + interface: eth0 + - type: nameserver + address: [1.2.3.4] + search: [test.com] +""" + +V1_NET_CFG_OUTPUT = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5/24 + broadcast 192.168.1.0 + gateway 192.168.1.254 + +auto eth1 +iface eth1 inet dhcp +""" + +V1_NET_CFG_IPV6_OUTPUT = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet6 static + address 2607:f0d0:1002:0011::2/64 + gateway 2607:f0d0:1002:0011::1 + +auto eth1 +iface eth1 inet dhcp +""" + +V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', + 'subnets': [{'address': + '2607:f0d0:1002:0011::2', + 'gateway': + '2607:f0d0:1002:0011::1', + 'netmask': '64', + 'type': 'static6'}], + 'type': 'physical'}, + {'name': 'eth1', + 'subnets': [{'control': 'auto', + 'type': 'dhcp4'}], + 'type': 'physical'}], + 'version': 1} + + +V1_TO_V2_NET_CFG_OUTPUT = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.5/24 + gateway4: 192.168.1.254 + eth1: + dhcp4: true +""" + +V1_TO_V2_NET_CFG_IPV6_OUTPUT = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +network: + version: 2 + ethernets: + eth0: + addresses: + - 2607:f0d0:1002:0011::2/64 + gateway6: 2607:f0d0:1002:0011::1 + eth1: + dhcp4: true +""" + +V2_NET_CFG = { + 'ethernets': { + 'eth7': { + 'addresses': ['192.168.1.5/24'], + 'gateway4': '192.168.1.254'}, + 'eth9': { + 'dhcp4': True} + }, + 'version': 2 +} + + +V2_TO_V2_NET_CFG_OUTPUT = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +network: + ethernets: + eth7: + addresses: + - 192.168.1.5/24 + gateway4: 192.168.1.254 + eth9: + dhcp4: true + version: 2 +""" + + +class WriteBuffer(object): + def __init__(self): + self.buffer = StringIO() + self.mode = None + self.omode = None + + def write(self, text): + self.buffer.write(text) + + def __str__(self): + return self.buffer.getvalue() + + +class TestNetCfgDistroBase(FilesystemMockingTestCase): + + def setUp(self): + super(TestNetCfgDistroBase, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + + def _get_distro(self, dname, renderers=None): + cls = distros.fetch(dname) + cfg = settings.CFG_BUILTIN + cfg['system_info']['distro'] = dname + if renderers: + cfg['system_info']['network'] = {'renderers': renderers} + paths = helpers.Paths({}) + return cls(dname, cfg.get('system_info'), paths) + + def assertCfgEquals(self, blob1, blob2): + b1 = dict(SysConf(blob1.strip().splitlines())) + b2 = dict(SysConf(blob2.strip().splitlines())) + self.assertEqual(b1, b2) + for (k, v) in b1.items(): + self.assertIn(k, b2) + for (k, v) in b2.items(): + self.assertIn(k, b1) + for (k, v) in b1.items(): + self.assertEqual(v, b2[k]) + + +class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroFreeBSD, self).setUp() + self.distro = self._get_distro('freebsd', renderers=['freebsd']) + + def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.freebsd.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + util.ensure_dir('/etc') + util.ensure_file('/etc/rc.conf') + util.ensure_file('/etc/resolv.conf') + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual( + set(expected.split('\n')), + set(results[cfgpath].split('\n'))) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_apply_network_config_freebsd_standard(self, ifaces_mac): + ifaces_mac.return_value = { + '00:15:5d:4c:73:00': 'eth0', + } + rc_conf_expected = """\ +defaultrouter=192.168.1.254 +ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' +ifconfig_eth1=DHCP +""" + + expected_cfgs = { + '/etc/rc.conf': rc_conf_expected, + '/etc/resolv.conf': '' + } + self._apply_and_verify_freebsd(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_apply_network_config_freebsd_ifrename(self, ifaces_mac): + ifaces_mac.return_value = { + '00:15:5d:4c:73:00': 'vtnet0', + } + rc_conf_expected = """\ +ifconfig_vtnet0_name=eth0 +defaultrouter=192.168.1.254 +ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' +ifconfig_eth1=DHCP +""" + + V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG) + V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00' + + expected_cfgs = { + '/etc/rc.conf': rc_conf_expected, + '/etc/resolv.conf': '' + } + self._apply_and_verify_freebsd(self.distro.apply_network_config, + V1_NET_CFG_RENAME, + expected_cfgs=expected_cfgs.copy()) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_apply_network_config_freebsd_nameserver(self, ifaces_mac): + ifaces_mac.return_value = { + '00:15:5d:4c:73:00': 'eth0', + } + + V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG) + ns = ['1.2.3.4'] + V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns + expected_cfgs = { + '/etc/resolv.conf': 'nameserver 1.2.3.4\n' + } + self._apply_and_verify_freebsd(self.distro.apply_network_config, + V1_NET_CFG_DNS, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroUbuntuEni, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['eni']) + + def eni_path(self): + return '/etc/network/interfaces.d/50-cloud-init.cfg' + + def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.eni.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_apply_network_config_eni_ub(self): + expected_cfgs = { + self.eni_path(): V1_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_eni(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_ub(self): + expected_cfgs = { + self.eni_path(): V1_NET_CFG_IPV6_OUTPUT + } + self._apply_and_verify_eni(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): + def setUp(self): + super(TestNetCfgDistroUbuntuNetplan, self).setUp() + self.distro = self._get_distro('ubuntu', renderers=['netplan']) + self.devlist = ['eth0', 'lo'] + + def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.netplan.available', + return_value=True): + with mock.patch("cloudinit.net.netplan.get_devicelist", + return_value=self.devlist): + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def netplan_path(self): + return '/etc/netplan/50-cloud-init.yaml' + + def test_apply_network_config_v1_to_netplan_ub(self): + expected_cfgs = { + self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT, + } + + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_v1_ipv6_to_netplan_ub(self): + expected_cfgs = { + self.netplan_path(): V1_TO_V2_NET_CFG_IPV6_OUTPUT, + } + + # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_v2_passthrough_ub(self): + expected_cfgs = { + self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, + } + # ub_distro.apply_network_config(V2_NET_CFG, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroRedhat(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroRedhat, self).setUp() + self.distro = self._get_distro('rhel', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname + + def control_path(self): + return '/etc/sysconfig/network' + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_apply_network_config_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + # rh_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_rh(self): + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_AUTOCONF=no + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 + IPV6_FORCE_ACCEPT_RA=no + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp + DEVICE=eth1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.control_path(): dedent("""\ + NETWORKING=yes + NETWORKING_IPV6=yes + IPV6_AUTOCONF=no + """), + } + # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + def test_vlan_render_unsupported(self): + """Render officially unsupported vlan names.""" + cfg = { + 'version': 2, + 'ethernets': { + 'eth0': {'addresses': ["192.10.1.2/24"], + 'match': {'macaddress': "00:16:3e:60:7c:df"}}}, + 'vlans': { + 'infra0': {'addresses': ["10.0.1.2/16"], + 'id': 1001, 'link': 'eth0'}}, + } + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEVICE=eth0 + HWADDR=00:16:3e:60:7c:df + IPADDR=192.10.1.2 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('infra0'): dedent("""\ + BOOTPROTO=none + DEVICE=infra0 + IPADDR=10.0.1.2 + NETMASK=255.255.0.0 + NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=eth0 + USERCTL=no + VLAN=yes + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + self._apply_and_verify( + self.distro.apply_network_config, cfg, + expected_cfgs=expected_cfgs) + + def test_vlan_render(self): + cfg = { + 'version': 2, + 'ethernets': { + 'eth0': {'addresses': ["192.10.1.2/24"]}}, + 'vlans': { + 'eth0.1001': {'addresses': ["10.0.1.2/16"], + 'id': 1001, 'link': 'eth0'}}, + } + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=none + DEVICE=eth0 + IPADDR=192.10.1.2 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + self.ifcfg_path('eth0.1001'): dedent("""\ + BOOTPROTO=none + DEVICE=eth0.1001 + IPADDR=10.0.1.2 + NETMASK=255.255.0.0 + NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=eth0 + USERCTL=no + VLAN=yes + """), + self.control_path(): dedent("""\ + NETWORKING=yes + """), + } + self._apply_and_verify( + self.distro.apply_network_config, cfg, + expected_cfgs=expected_cfgs) + + +class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroOpensuse, self).setUp() + self.distro = self._get_distro('opensuse', renderers=['sysconfig']) + + def ifcfg_path(self, ifname): + return '/etc/sysconfig/network/ifcfg-%s' % ifname + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.sysconfig.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + self.assertCfgEquals(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def test_apply_network_config_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=static + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 + STARTMODE=auto + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp4 + STARTMODE=auto + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) + + def test_apply_network_config_ipv6_opensuse(self): + """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" + expected_cfgs = { + self.ifcfg_path('eth0'): dedent("""\ + BOOTPROTO=static + IPADDR6=2607:f0d0:1002:0011::2/64 + STARTMODE=auto + """), + self.ifcfg_path('eth1'): dedent("""\ + BOOTPROTO=dhcp4 + STARTMODE=auto + """), + } + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + + +class TestNetCfgDistroArch(TestNetCfgDistroBase): + def setUp(self): + super(TestNetCfgDistroArch, self).setUp() + self.distro = self._get_distro('arch', renderers=['netplan']) + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False, with_netplan=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.netplan.available', + return_value=with_netplan): + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def netctl_path(self, iface): + return '/etc/netctl/%s' % iface + + def netplan_path(self): + return '/etc/netplan/50-cloud-init.yaml' + + def test_apply_network_config_v1_without_netplan(self): + # Note that this is in fact an invalid netctl config: + # "Address=None/None" + # But this is what the renderer has been writing out for a long time, + # and the test's purpose is to assert that the netctl renderer is + # still being used in absence of netplan, not the correctness of the + # rendered netctl config. + expected_cfgs = { + self.netctl_path('eth0'): dedent("""\ + Address=192.168.1.5/255.255.255.0 + Connection=ethernet + DNS=() + Gateway=192.168.1.254 + IP=static + Interface=eth0 + """), + self.netctl_path('eth1'): dedent("""\ + Address=None/None + Connection=ethernet + DNS=() + Gateway= + IP=dhcp + Interface=eth1 + """), + } + + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=False) + + def test_apply_network_config_v1_with_netplan(self): + expected_cfgs = { + self.netplan_path(): dedent("""\ + # generated by cloud-init + network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.5/24 + gateway4: 192.168.1.254 + eth1: + dhcp4: true + """), + } + + with mock.patch( + 'cloudinit.net.netplan.get_devicelist', + return_value=[] + ): + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=True) + + +class TestNetCfgDistroPhoton(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroPhoton, self).setUp() + self.distro = self._get_distro('photon', renderers=['networkd']) + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r'^\[(.+)\]$', line): + content_dict[line] = [] + key = line + elif line: + assert key + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.networkd.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + actual = self.create_conf_dict(results[cfgpath].splitlines()) + self.compare_dicts(actual, expected) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def nwk_file_path(self, ifname): + return '/etc/systemd/network/10-cloud-init-%s.network' % ifname + + def net_cfg_1(self, ifname): + ret = """\ + [Match] + Name=%s + [Network] + DHCP=no + [Address] + Address=192.168.1.5/24 + [Route] + Gateway=192.168.1.254""" % ifname + return ret + + def net_cfg_2(self, ifname): + ret = """\ + [Match] + Name=%s + [Network] + DHCP=ipv4""" % ifname + return ret + + def test_photon_network_config_v1(self): + tmp = self.net_cfg_1('eth0').splitlines() + expected_eth0 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2('eth1').splitlines() + expected_eth1 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path('eth0'): expected_eth0, + self.nwk_file_path('eth1'): expected_eth1, + } + + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs.copy()) + + def test_photon_network_config_v2(self): + tmp = self.net_cfg_1('eth7').splitlines() + expected_eth7 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2('eth9').splitlines() + expected_eth9 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path('eth7'): expected_eth7, + self.nwk_file_path('eth9'): expected_eth9, + } + + self._apply_and_verify(self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs.copy()) + + def test_photon_network_config_v1_with_duplicates(self): + expected = """\ + [Match] + Name=eth0 + [Network] + DHCP=no + DNS=1.2.3.4 + Domains=test.com + [Address] + Address=192.168.0.102/24""" + + net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + + expected = self.create_conf_dict(expected.splitlines()) + expected_cfgs = { + self.nwk_file_path('eth0'): expected, + } + + self._apply_and_verify(self.distro.apply_network_config, + net_cfg, + expected_cfgs.copy()) + + +def get_mode(path, target=None): + return os.stat(subp.target_path(target, path)).st_mode & 0o777 + +# vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py new file mode 100644 index 00000000..ec508f4d --- /dev/null +++ b/tests/unittests/distros/test_networking.py @@ -0,0 +1,223 @@ +from unittest import mock + +import pytest + +from cloudinit import net +from cloudinit.distros.networking import ( + BSDNetworking, + LinuxNetworking, + Networking, +) + +# See https://docs.pytest.org/en/stable/example +# /parametrize.html#parametrizing-conditional-raising +from contextlib import ExitStack as does_not_raise + + +@pytest.yield_fixture +def generic_networking_cls(): + """Returns a direct Networking subclass which errors on /sys usage. + + This enables the direct testing of functionality only present on the + ``Networking`` super-class, and provides a check on accidentally using /sys + in that context. + """ + + class TestNetworking(Networking): + def is_physical(self, *args, **kwargs): + raise NotImplementedError + + def settle(self, *args, **kwargs): + raise NotImplementedError + + def try_set_link_up(self, *args, **kwargs): + raise NotImplementedError + + error = AssertionError("Unexpectedly used /sys in generic networking code") + with mock.patch( + "cloudinit.net.get_sys_class_path", side_effect=error, + ): + yield TestNetworking + + +@pytest.yield_fixture +def sys_class_net(tmpdir): + sys_class_net_path = tmpdir.join("sys/class/net") + sys_class_net_path.ensure_dir() + with mock.patch( + "cloudinit.net.get_sys_class_path", + return_value=sys_class_net_path.strpath + "/", + ): + yield sys_class_net_path + + +class TestBSDNetworkingIsPhysical: + def test_raises_notimplementederror(self): + with pytest.raises(NotImplementedError): + BSDNetworking().is_physical("eth0") + + +class TestLinuxNetworkingIsPhysical: + def test_returns_false_by_default(self, sys_class_net): + assert not LinuxNetworking().is_physical("eth0") + + def test_returns_false_if_devname_exists_but_not_physical( + self, sys_class_net + ): + devname = "eth0" + sys_class_net.join(devname).mkdir() + assert not LinuxNetworking().is_physical(devname) + + def test_returns_true_if_device_is_physical(self, sys_class_net): + devname = "eth0" + device_dir = sys_class_net.join(devname) + device_dir.mkdir() + device_dir.join("device").write("") + + assert LinuxNetworking().is_physical(devname) + + +class TestBSDNetworkingTrySetLinkUp: + def test_raises_notimplementederror(self): + with pytest.raises(NotImplementedError): + BSDNetworking().try_set_link_up("eth0") + + +@mock.patch("cloudinit.net.is_up") +@mock.patch("cloudinit.distros.networking.subp.subp") +class TestLinuxNetworkingTrySetLinkUp: + def test_calls_subp_return_true(self, m_subp, m_is_up): + devname = "eth0" + m_is_up.return_value = True + is_success = LinuxNetworking().try_set_link_up(devname) + + assert (mock.call(['ip', 'link', 'set', devname, 'up']) == + m_subp.call_args_list[-1]) + assert is_success + + def test_calls_subp_return_false(self, m_subp, m_is_up): + devname = "eth0" + m_is_up.return_value = False + is_success = LinuxNetworking().try_set_link_up(devname) + + assert (mock.call(['ip', 'link', 'set', devname, 'up']) == + m_subp.call_args_list[-1]) + assert not is_success + + +class TestBSDNetworkingSettle: + def test_settle_doesnt_error(self): + # This also implicitly tests that it doesn't use subp.subp + BSDNetworking().settle() + + +@pytest.mark.usefixtures("sys_class_net") +@mock.patch("cloudinit.distros.networking.util.udevadm_settle", autospec=True) +class TestLinuxNetworkingSettle: + def test_no_arguments(self, m_udevadm_settle): + LinuxNetworking().settle() + + assert [mock.call(exists=None)] == m_udevadm_settle.call_args_list + + def test_exists_argument(self, m_udevadm_settle): + LinuxNetworking().settle(exists="ens3") + + expected_path = net.sys_dev_path("ens3") + assert [ + mock.call(exists=expected_path) + ] == m_udevadm_settle.call_args_list + + +class TestNetworkingWaitForPhysDevs: + @pytest.fixture + def wait_for_physdevs_netcfg(self): + """This config is shared across all the tests in this class.""" + + def ethernet(mac, name, driver=None, device_id=None): + v2_cfg = {"set-name": name, "match": {"macaddress": mac}} + if driver: + v2_cfg["match"].update({"driver": driver}) + if device_id: + v2_cfg["match"].update({"device_id": device_id}) + + return v2_cfg + + physdevs = [ + ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"], + ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"], + ] + netcfg = { + "version": 2, + "ethernets": {args[1]: ethernet(*args) for args in physdevs}, + } + return netcfg + + def test_skips_settle_if_all_present( + self, generic_networking_cls, wait_for_physdevs_netcfg, + ): + networking = generic_networking_cls() + with mock.patch.object( + networking, "get_interfaces_by_mac" + ) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.side_effect = iter( + [{"aa:bb:cc:dd:ee:ff": "eth0", "00:11:22:33:44:55": "ens3"}] + ) + with mock.patch.object( + networking, "settle", autospec=True + ) as m_settle: + networking.wait_for_physdevs(wait_for_physdevs_netcfg) + assert 0 == m_settle.call_count + + def test_calls_udev_settle_on_missing( + self, generic_networking_cls, wait_for_physdevs_netcfg, + ): + networking = generic_networking_cls() + with mock.patch.object( + networking, "get_interfaces_by_mac" + ) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.side_effect = iter( + [ + { + "aa:bb:cc:dd:ee:ff": "eth0" + }, # first call ens3 is missing + { + "aa:bb:cc:dd:ee:ff": "eth0", + "00:11:22:33:44:55": "ens3", + }, # second call has both + ] + ) + with mock.patch.object( + networking, "settle", autospec=True + ) as m_settle: + networking.wait_for_physdevs(wait_for_physdevs_netcfg) + m_settle.assert_called_with(exists="ens3") + + @pytest.mark.parametrize( + "strict,expectation", + [(True, pytest.raises(RuntimeError)), (False, does_not_raise())], + ) + def test_retrying_and_strict_behaviour( + self, + strict, + expectation, + generic_networking_cls, + wait_for_physdevs_netcfg, + ): + networking = generic_networking_cls() + with mock.patch.object( + networking, "get_interfaces_by_mac" + ) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.return_value = {} + + with mock.patch.object( + networking, "settle", autospec=True + ) as m_settle: + with expectation: + networking.wait_for_physdevs( + wait_for_physdevs_netcfg, strict=strict + ) + + assert ( + 5 * len(wait_for_physdevs_netcfg["ethernets"]) + == m_settle.call_count + ) diff --git a/tests/unittests/distros/test_opensuse.py b/tests/unittests/distros/test_opensuse.py new file mode 100644 index 00000000..4ff26102 --- /dev/null +++ b/tests/unittests/distros/test_opensuse.py @@ -0,0 +1,12 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.helpers import CiTestCase + +from . import _get_distro + + +class TestopenSUSE(CiTestCase): + + def test_get_distro(self): + distro = _get_distro("opensuse") + self.assertEqual(distro.osfamily, 'suse') diff --git a/tests/unittests/distros/test_photon.py b/tests/unittests/distros/test_photon.py new file mode 100644 index 00000000..3858f723 --- /dev/null +++ b/tests/unittests/distros/test_photon.py @@ -0,0 +1,68 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from . import _get_distro +from cloudinit import util +from tests.unittests.helpers import mock +from tests.unittests.helpers import CiTestCase + +SYSTEM_INFO = { + 'paths': { + 'cloud_dir': '/var/lib/cloud/', + 'templates_dir': '/etc/cloud/templates/', + }, + 'network': {'renderers': 'networkd'}, +} + + +class TestPhoton(CiTestCase): + with_logs = True + distro = _get_distro('photon', SYSTEM_INFO) + expected_log_line = 'Rely on PhotonOS default network config' + + def test_network_renderer(self): + self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd') + + def test_get_distro(self): + self.assertEqual(self.distro.osfamily, 'photon') + + @mock.patch("cloudinit.distros.photon.subp.subp") + def test_write_hostname(self, m_subp): + hostname = 'myhostname' + hostfile = self.tmp_path('previous-hostname') + self.distro._write_hostname(hostname, hostfile) + self.assertEqual(hostname, util.load_file(hostfile)) + + ret = self.distro._read_hostname(hostfile) + self.assertEqual(ret, hostname) + + m_subp.return_value = (None, None) + hostfile += 'hostfile' + self.distro._write_hostname(hostname, hostfile) + + m_subp.return_value = (hostname, None) + ret = self.distro._read_hostname(hostfile) + self.assertEqual(ret, hostname) + + self.logs.truncate(0) + m_subp.return_value = (None, 'bla') + self.distro._write_hostname(hostname, None) + self.assertIn('Error while setting hostname', self.logs.getvalue()) + + @mock.patch('cloudinit.net.generate_fallback_config') + def test_fallback_netcfg(self, m_fallback_cfg): + + key = 'disable_fallback_netcfg' + # Don't use fallback if no setting given + self.logs.truncate(0) + assert(self.distro.generate_fallback_config() is None) + self.assertIn(self.expected_log_line, self.logs.getvalue()) + + self.logs.truncate(0) + self.distro._cfg[key] = True + assert(self.distro.generate_fallback_config() is None) + self.assertIn(self.expected_log_line, self.logs.getvalue()) + + self.logs.truncate(0) + self.distro._cfg[key] = False + assert(self.distro.generate_fallback_config() is not None) + self.assertNotIn(self.expected_log_line, self.logs.getvalue()) diff --git a/tests/unittests/distros/test_resolv.py b/tests/unittests/distros/test_resolv.py new file mode 100644 index 00000000..e7971627 --- /dev/null +++ b/tests/unittests/distros/test_resolv.py @@ -0,0 +1,65 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros.parsers import resolv_conf + +from tests.unittests.helpers import TestCase + +import re + + +BASE_RESOLVE = ''' +; generated by /sbin/dhclient-script +search blah.yahoo.com yahoo.com +nameserver 10.15.44.14 +nameserver 10.15.30.92 +''' +BASE_RESOLVE = BASE_RESOLVE.strip() + + +class TestResolvHelper(TestCase): + def test_parse_same(self): + rp = resolv_conf.ResolvConf(BASE_RESOLVE) + rp_r = str(rp).strip() + self.assertEqual(BASE_RESOLVE, rp_r) + + def test_local_domain(self): + rp = resolv_conf.ResolvConf(BASE_RESOLVE) + self.assertIsNone(rp.local_domain) + + rp.local_domain = "bob" + self.assertEqual('bob', rp.local_domain) + self.assertIn('domain bob', str(rp)) + + def test_nameservers(self): + rp = resolv_conf.ResolvConf(BASE_RESOLVE) + self.assertIn('10.15.44.14', rp.nameservers) + self.assertIn('10.15.30.92', rp.nameservers) + rp.add_nameserver('10.2') + self.assertIn('10.2', rp.nameservers) + self.assertIn('nameserver 10.2', str(rp)) + self.assertNotIn('10.3', rp.nameservers) + self.assertEqual(len(rp.nameservers), 3) + rp.add_nameserver('10.2') + rp.add_nameserver('10.3') + self.assertNotIn('10.3', rp.nameservers) + + def test_search_domains(self): + rp = resolv_conf.ResolvConf(BASE_RESOLVE) + self.assertIn('yahoo.com', rp.search_domains) + self.assertIn('blah.yahoo.com', rp.search_domains) + rp.add_search_domain('bbb.y.com') + self.assertIn('bbb.y.com', rp.search_domains) + self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp))) + self.assertIn('bbb.y.com', rp.search_domains) + rp.add_search_domain('bbb.y.com') + self.assertEqual(len(rp.search_domains), 3) + rp.add_search_domain('bbb2.y.com') + self.assertEqual(len(rp.search_domains), 4) + rp.add_search_domain('bbb3.y.com') + self.assertEqual(len(rp.search_domains), 5) + rp.add_search_domain('bbb4.y.com') + self.assertEqual(len(rp.search_domains), 6) + self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com') + self.assertEqual(len(rp.search_domains), 6) + +# vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_sles.py b/tests/unittests/distros/test_sles.py new file mode 100644 index 00000000..04514a19 --- /dev/null +++ b/tests/unittests/distros/test_sles.py @@ -0,0 +1,12 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.helpers import CiTestCase + +from . import _get_distro + + +class TestSLES(CiTestCase): + + def test_get_distro(self): + distro = _get_distro("sles") + self.assertEqual(distro.osfamily, 'suse') diff --git a/tests/unittests/distros/test_sysconfig.py b/tests/unittests/distros/test_sysconfig.py new file mode 100644 index 00000000..4368496d --- /dev/null +++ b/tests/unittests/distros/test_sysconfig.py @@ -0,0 +1,86 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re + +from cloudinit.distros.parsers.sys_conf import SysConf + +from tests.unittests.helpers import TestCase + + +# Lots of good examples @ +# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt + +class TestSysConfHelper(TestCase): + # This function was added in 2.7, make it work for 2.6 + def assertRegMatches(self, text, regexp): + regexp = re.compile(regexp) + self.assertTrue(regexp.search(text), + msg="%s must match %s!" % (text, regexp.pattern)) + + def test_parse_no_change(self): + contents = '''# A comment +USESMBAUTH=no +KEYTABLE=/usr/lib/kbd/keytables/us.map +SHORTDATE=$(date +%y:%m:%d:%H:%M) +HOSTNAME=blahblah +NETMASK0=255.255.255.0 +# Inline comment +LIST=$LOGROOT/incremental-list +IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64' +ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256" +USEMD5=no''' + conf = SysConf(contents.splitlines()) + self.assertEqual(conf['HOSTNAME'], 'blahblah') + self.assertEqual(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)') + # Should be unquoted + self.assertEqual(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; ' + '-G ${DEVICE} rx 256 tx 256')) + self.assertEqual(contents, str(conf)) + + def test_parse_shell_vars(self): + contents = 'USESMBAUTH=$XYZ' + conf = SysConf(contents.splitlines()) + self.assertEqual(contents, str(conf)) + conf = SysConf('') + conf['B'] = '${ZZ}d apples' + # Should be quoted + self.assertEqual('B="${ZZ}d apples"', str(conf)) + conf = SysConf('') + conf['B'] = '$? d apples' + self.assertEqual('B="$? d apples"', str(conf)) + contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"' + conf = SysConf(contents.splitlines()) + self.assertEqual('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf)) + + def test_parse_adjust(self): + contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"' + conf = SysConf(contents.splitlines()) + # Should be unquoted + self.assertEqual('eth0-:0004::1/64 eth1-:0005::1/64', + conf['IPV6TO4_ROUTING']) + conf['IPV6TO4_ROUTING'] = "blah \tblah" + contents2 = str(conf).strip() + # Should be requoted due to whitespace + self.assertRegMatches(contents2, + r'IPV6TO4_ROUTING=[\']blah\s+blah[\']') + + def test_parse_no_adjust_shell(self): + conf = SysConf(''.splitlines()) + conf['B'] = ' $(time)' + contents = str(conf) + self.assertEqual('B= $(time)', contents) + + def test_parse_empty(self): + contents = '' + conf = SysConf(contents.splitlines()) + self.assertEqual('', str(conf).strip()) + + def test_parse_add_new(self): + contents = 'BLAH=b' + conf = SysConf(contents.splitlines()) + conf['Z'] = 'd' + contents = str(conf) + self.assertIn("Z=d", contents) + self.assertIn("BLAH=b", contents) + +# vi: ts=4 expandtab diff --git a/tests/unittests/distros/test_user_data_normalize.py b/tests/unittests/distros/test_user_data_normalize.py new file mode 100644 index 00000000..bd8f2adb --- /dev/null +++ b/tests/unittests/distros/test_user_data_normalize.py @@ -0,0 +1,372 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from unittest import mock + +from cloudinit import distros +from cloudinit.distros import ug_util +from cloudinit import helpers +from cloudinit import settings + +from tests.unittests.helpers import TestCase + + +bcfg = { + 'name': 'bob', + 'plain_text_passwd': 'ubuntu', + 'home': "/home/ubuntu", + 'shell': "/bin/bash", + 'lock_passwd': True, + 'gecos': "Ubuntu", + 'groups': ["foo"] +} + + +class TestUGNormalize(TestCase): + + def setUp(self): + super(TestUGNormalize, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + + def _make_distro(self, dtype, def_user=None): + cfg = dict(settings.CFG_BUILTIN) + cfg['system_info']['distro'] = dtype + paths = helpers.Paths(cfg['system_info']['paths']) + distro_cls = distros.fetch(dtype) + if def_user: + cfg['system_info']['default_user'] = def_user.copy() + distro = distro_cls(dtype, cfg['system_info'], paths) + return distro + + def _norm(self, cfg, distro): + return ug_util.normalize_users_groups(cfg, distro) + + def test_group_dict(self): + distro = self._make_distro('ubuntu') + g = {'groups': + [{'ubuntu': ['foo', 'bar'], + 'bob': 'users'}, + 'cloud-users', + {'bob': 'users2'}]} + (_users, groups) = self._norm(g, distro) + self.assertIn('ubuntu', groups) + ub_members = groups['ubuntu'] + self.assertEqual(sorted(['foo', 'bar']), sorted(ub_members)) + self.assertIn('bob', groups) + b_members = groups['bob'] + self.assertEqual(sorted(['users', 'users2']), + sorted(b_members)) + + def test_basic_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': ['bob'], + } + (users, groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', groups) + self.assertEqual({}, users) + + def test_csv_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': 'bob,joe,steve', + } + (users, groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', groups) + self.assertIn('joe', groups) + self.assertIn('steve', groups) + self.assertEqual({}, users) + + def test_more_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': ['bob', 'joe', 'steve'] + } + (users, groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', groups) + self.assertIn('joe', groups) + self.assertIn('steve', groups) + self.assertEqual({}, users) + + def test_member_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': { + 'bob': ['s'], + 'joe': [], + 'steve': [], + } + } + (users, groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', groups) + self.assertEqual(['s'], groups['bob']) + self.assertEqual([], groups['joe']) + self.assertIn('joe', groups) + self.assertIn('steve', groups) + self.assertEqual({}, users) + + def test_users_simple_dict(self): + distro = self._make_distro('ubuntu', bcfg) + ug_cfg = { + 'users': { + 'default': True, + } + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + ug_cfg = { + 'users': { + 'default': 'yes', + } + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + ug_cfg = { + 'users': { + 'default': '1', + } + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + + def test_users_simple_dict_no(self): + distro = self._make_distro('ubuntu', bcfg) + ug_cfg = { + 'users': { + 'default': False, + } + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertEqual({}, users) + ug_cfg = { + 'users': { + 'default': 'no', + } + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertEqual({}, users) + + def test_users_simple_csv(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': 'joe,bob', + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEqual({'default': False}, users['joe']) + self.assertEqual({'default': False}, users['bob']) + + def test_users_simple(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + 'joe', + 'bob' + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEqual({'default': False}, users['joe']) + self.assertEqual({'default': False}, users['bob']) + + def test_users_old_user(self): + distro = self._make_distro('ubuntu', bcfg) + ug_cfg = { + 'user': 'zetta', + 'users': 'default' + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertNotIn('bob', users) # Bob is not the default now, zetta is + self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) + self.assertNotIn('default', users) + ug_cfg = { + 'user': 'zetta', + 'users': 'default, joe' + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertNotIn('bob', users) # Bob is not the default now, zetta is + self.assertIn('joe', users) + self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) + self.assertNotIn('default', users) + ug_cfg = { + 'user': 'zetta', + 'users': ['bob', 'joe'] + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertIn('joe', users) + self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) + ug_cfg = { + 'user': 'zetta', + 'users': { + 'bob': True, + 'joe': True, + } + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertIn('joe', users) + self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) + ug_cfg = { + 'user': 'zetta', + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('zetta', users) + ug_cfg = {} + (users, groups) = self._norm(ug_cfg, distro) + self.assertEqual({}, users) + self.assertEqual({}, groups) + + def test_users_dict_default_additional(self): + distro = self._make_distro('ubuntu', bcfg) + ug_cfg = { + 'users': [ + {'name': 'default', 'blah': True} + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertEqual(",".join(distro.get_default_user()['groups']), + users['bob']['groups']) + self.assertEqual(True, users['bob']['blah']) + self.assertEqual(True, users['bob']['default']) + + def test_users_dict_extract(self): + distro = self._make_distro('ubuntu', bcfg) + ug_cfg = { + 'users': [ + 'default', + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + (name, config) = ug_util.extract_default(users) + self.assertEqual(name, 'bob') + expected_config = {} + def_config = None + try: + def_config = distro.get_default_user() + except NotImplementedError: + pass + if not def_config: + def_config = {} + expected_config.update(def_config) + + # Ignore these for now + expected_config.pop('name', None) + expected_config.pop('groups', None) + config.pop('groups', None) + self.assertEqual(config, expected_config) + + def test_users_dict_default(self): + distro = self._make_distro('ubuntu', bcfg) + ug_cfg = { + 'users': [ + 'default', + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertEqual(",".join(distro.get_default_user()['groups']), + users['bob']['groups']) + self.assertEqual(True, users['bob']['default']) + + def test_users_dict_trans(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', + 'tr-me': True}, + {'name': 'bob'}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEqual({'tr_me': True, 'default': False}, users['joe']) + self.assertEqual({'default': False}, users['bob']) + + def test_users_dict(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe'}, + {'name': 'bob'}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEqual({'default': False}, users['joe']) + self.assertEqual({'default': False}, users['bob']) + + @mock.patch('cloudinit.subp.subp') + def test_create_snap_user(self, mock_subp): + mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', + '')] + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', 'snapuser': 'joe@joe.com'}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + for (user, config) in users.items(): + print('user=%s config=%s' % (user, config)) + username = distro.create_user(user, **config) + + snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com'] + mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) + self.assertEqual(username, 'joe') + + @mock.patch('cloudinit.subp.subp') + def test_create_snap_user_known(self, mock_subp): + mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', + '')] + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + for (user, config) in users.items(): + print('user=%s config=%s' % (user, config)) + username = distro.create_user(user, **config) + + snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known', + 'joe@joe.com'] + mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) + self.assertEqual(username, 'joe') + + @mock.patch('cloudinit.util.system_is_snappy') + @mock.patch('cloudinit.util.is_group') + @mock.patch('cloudinit.subp.subp') + def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp, + mock_snappy): + mock_isgrp.return_value = False + mock_subp.return_value = True + mock_snappy.return_value = True + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', 'groups': 'users', 'create_groups': True}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + for (user, config) in users.items(): + print('user=%s config=%s' % (user, config)) + distro.add_user(user, **config) + + groupcmd = ['groupadd', 'users', '--extrausers'] + addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m'] + + mock_subp.assert_any_call(groupcmd) + mock_subp.assert_any_call(addcmd, logstring=addcmd) + +# vi: ts=4 expandtab diff --git a/tests/unittests/filters/__init__.py b/tests/unittests/filters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/filters/test_launch_index.py b/tests/unittests/filters/test_launch_index.py new file mode 100644 index 00000000..0b1a7067 --- /dev/null +++ b/tests/unittests/filters/test_launch_index.py @@ -0,0 +1,135 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +from itertools import filterfalse + +from tests.unittests import helpers + +from cloudinit.filters import launch_index +from cloudinit import user_data as ud +from cloudinit import util + + +def count_messages(root): + am = 0 + for m in root.walk(): + if ud.is_skippable(m): + continue + am += 1 + return am + + +class TestLaunchFilter(helpers.ResourceUsingTestCase): + + def assertCounts(self, message, expected_counts): + orig_message = copy.deepcopy(message) + for (index, count) in expected_counts.items(): + index = util.safe_int(index) + filtered_message = launch_index.Filter(index).apply(message) + self.assertEqual(count_messages(filtered_message), count) + # Ensure original message still ok/not modified + self.assertTrue(self.equivalentMessage(message, orig_message)) + + def equivalentMessage(self, msg1, msg2): + msg1_count = count_messages(msg1) + msg2_count = count_messages(msg2) + if msg1_count != msg2_count: + return False + # Do some basic payload checking + msg1_msgs = [m for m in msg1.walk()] + msg1_msgs = [m for m in filterfalse(ud.is_skippable, msg1_msgs)] + msg2_msgs = [m for m in msg2.walk()] + msg2_msgs = [m for m in filterfalse(ud.is_skippable, msg2_msgs)] + for i in range(0, len(msg2_msgs)): + m1_msg = msg1_msgs[i] + m2_msg = msg2_msgs[i] + if m1_msg.get_charset() != m2_msg.get_charset(): + return False + if m1_msg.is_multipart() != m2_msg.is_multipart(): + return False + m1_py = m1_msg.get_payload(decode=True) + m2_py = m2_msg.get_payload(decode=True) + if m1_py != m2_py: + return False + return True + + def testMultiEmailIndex(self): + test_data = helpers.readResource('filter_cloud_multipart_2.email') + ud_proc = ud.UserDataProcessor(self.getCloudPaths()) + message = ud_proc.process(test_data) + self.assertTrue(count_messages(message) > 0) + # This file should have the following + # indexes -> amount mapping in it + expected_counts = { + 3: 1, + 2: 2, + None: 3, + -1: 0, + } + self.assertCounts(message, expected_counts) + + def testHeaderEmailIndex(self): + test_data = helpers.readResource('filter_cloud_multipart_header.email') + ud_proc = ud.UserDataProcessor(self.getCloudPaths()) + message = ud_proc.process(test_data) + self.assertTrue(count_messages(message) > 0) + # This file should have the following + # indexes -> amount mapping in it + expected_counts = { + 5: 1, + -1: 0, + 'c': 1, + None: 1, + } + self.assertCounts(message, expected_counts) + + def testConfigEmailIndex(self): + test_data = helpers.readResource('filter_cloud_multipart_1.email') + ud_proc = ud.UserDataProcessor(self.getCloudPaths()) + message = ud_proc.process(test_data) + self.assertTrue(count_messages(message) > 0) + # This file should have the following + # indexes -> amount mapping in it + expected_counts = { + 2: 1, + -1: 0, + None: 1, + } + self.assertCounts(message, expected_counts) + + def testNoneIndex(self): + test_data = helpers.readResource('filter_cloud_multipart.yaml') + ud_proc = ud.UserDataProcessor(self.getCloudPaths()) + message = ud_proc.process(test_data) + start_count = count_messages(message) + self.assertTrue(start_count > 0) + filtered_message = launch_index.Filter(None).apply(message) + self.assertTrue(self.equivalentMessage(message, filtered_message)) + + def testIndexes(self): + test_data = helpers.readResource('filter_cloud_multipart.yaml') + ud_proc = ud.UserDataProcessor(self.getCloudPaths()) + message = ud_proc.process(test_data) + start_count = count_messages(message) + self.assertTrue(start_count > 0) + # This file should have the following + # indexes -> amount mapping in it + expected_counts = { + 2: 2, + 3: 2, + 1: 2, + 0: 1, + 4: 1, + 7: 0, + -1: 0, + 100: 0, + # None should just give all back + None: start_count, + # Non ints should be ignored + 'c': start_count, + # Strings should be converted + '1': 2, + } + self.assertCounts(message, expected_counts) + +# vi: ts=4 expandtab diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py new file mode 100644 index 00000000..ccd56793 --- /dev/null +++ b/tests/unittests/helpers.py @@ -0,0 +1,507 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import functools +import httpretty +import io +import logging +import os +import random +import shutil +import string +import sys +import tempfile +import time +import unittest +from contextlib import ExitStack, contextmanager +from unittest import mock +from unittest.util import strclass + +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers as ch +from cloudinit.sources import DataSourceNone +from cloudinit.templater import JINJA_AVAILABLE +from cloudinit import subp +from cloudinit import util + +_real_subp = subp.subp + +# Used for skipping tests +SkipTest = unittest.SkipTest +skipIf = unittest.skipIf + + +# Makes the old path start +# with new base instead of whatever +# it previously had +def rebase_path(old_path, new_base): + if old_path.startswith(new_base): + # Already handled... + return old_path + # Retarget the base of that path + # to the new base instead of the + # old one... + path = os.path.join(new_base, old_path.lstrip("/")) + path = os.path.abspath(path) + return path + + +# Can work on anything that takes a path as arguments +def retarget_many_wrapper(new_base, am, old_func): + def wrapper(*args, **kwds): + n_args = list(args) + nam = am + if am == -1: + nam = len(n_args) + for i in range(0, nam): + path = args[i] + # patchOS() wraps various os and os.path functions, however in + # Python 3 some of these now accept file-descriptors (integers). + # That breaks rebase_path() so in lieu of a better solution, just + # don't rebase if we get a fd. + if isinstance(path, str): + n_args[i] = rebase_path(path, new_base) + return old_func(*n_args, **kwds) + return wrapper + + +class TestCase(unittest.TestCase): + + def reset_global_state(self): + """Reset any global state to its original settings. + + cloudinit caches some values in cloudinit.util. Unit tests that + involved those cached paths were then subject to failure if the order + of invocation changed (LP: #1703697). + + This function resets any of these global state variables to their + initial state. + + In the future this should really be done with some registry that + can then be cleaned in a more obvious way. + """ + util.PROC_CMDLINE = None + util._DNS_REDIRECT_IP = None + util._LSB_RELEASE = {} + + def setUp(self): + super(TestCase, self).setUp() + self.reset_global_state() + + def shortDescription(self): + return strclass(self.__class__) + '.' + self._testMethodName + + def add_patch(self, target, attr, *args, **kwargs): + """Patches specified target object and sets it as attr on test + instance also schedules cleanup""" + if 'autospec' not in kwargs: + kwargs['autospec'] = True + m = mock.patch(target, *args, **kwargs) + p = m.start() + self.addCleanup(m.stop) + setattr(self, attr, p) + + +class CiTestCase(TestCase): + """This is the preferred test case base class unless user + needs other test case classes below.""" + + # Subclass overrides for specific test behavior + # Whether or not a unit test needs logfile setup + with_logs = False + allowed_subp = False + SUBP_SHELL_TRUE = "shell=true" + + @contextmanager + def allow_subp(self, allowed_subp): + orig = self.allowed_subp + try: + self.allowed_subp = allowed_subp + yield + finally: + self.allowed_subp = orig + + def setUp(self): + super(CiTestCase, self).setUp() + if self.with_logs: + # Create a log handler so unit tests can search expected logs. + self.logger = logging.getLogger() + self.logs = io.StringIO() + formatter = logging.Formatter('%(levelname)s: %(message)s') + handler = logging.StreamHandler(self.logs) + handler.setFormatter(formatter) + self.old_handlers = self.logger.handlers + self.logger.handlers = [handler] + if self.allowed_subp is True: + subp.subp = _real_subp + else: + subp.subp = self._fake_subp + + def _fake_subp(self, *args, **kwargs): + if 'args' in kwargs: + cmd = kwargs['args'] + else: + if not args: + raise TypeError( + "subp() missing 1 required positional argument: 'args'") + cmd = args[0] + + if not isinstance(cmd, str): + cmd = cmd[0] + pass_through = False + if not isinstance(self.allowed_subp, (list, bool)): + raise TypeError("self.allowed_subp supports list or bool.") + if isinstance(self.allowed_subp, bool): + pass_through = self.allowed_subp + else: + pass_through = ( + (cmd in self.allowed_subp) or + (self.SUBP_SHELL_TRUE in self.allowed_subp and + kwargs.get('shell'))) + if pass_through: + return _real_subp(*args, **kwargs) + raise Exception( + "called subp. set self.allowed_subp=True to allow\n subp(%s)" % + ', '.join([str(repr(a)) for a in args] + + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()])) + + def tearDown(self): + if self.with_logs: + # Remove the handler we setup + logging.getLogger().handlers = self.old_handlers + logging.getLogger().setLevel(logging.NOTSET) + subp.subp = _real_subp + super(CiTestCase, self).tearDown() + + def tmp_dir(self, dir=None, cleanup=True): + # return a full path to a temporary directory that will be cleaned up. + if dir is None: + tmpd = tempfile.mkdtemp( + prefix="ci-%s." % self.__class__.__name__) + else: + tmpd = tempfile.mkdtemp(dir=dir) + self.addCleanup( + functools.partial(shutil.rmtree, tmpd, ignore_errors=True)) + return tmpd + + def tmp_path(self, path, dir=None): + # return an absolute path to 'path' under dir. + # if dir is None, one will be created with tmp_dir() + # the file is not created or modified. + if dir is None: + dir = self.tmp_dir() + return os.path.normpath(os.path.abspath(os.path.join(dir, path))) + + def tmp_cloud(self, distro, sys_cfg=None, metadata=None): + """Create a cloud with tmp working directory paths. + + @param distro: Name of the distro to attach to the cloud. + @param metadata: Optional metadata to set on the datasource. + + @return: The built cloud instance. + """ + self.new_root = self.tmp_dir() + if not sys_cfg: + sys_cfg = {} + tmp_paths = {} + for var in ['templates_dir', 'run_dir', 'cloud_dir']: + tmp_paths[var] = self.tmp_path(var, dir=self.new_root) + util.ensure_dir(tmp_paths[var]) + self.paths = ch.Paths(tmp_paths) + cls = distros.fetch(distro) + mydist = cls(distro, sys_cfg, self.paths) + myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths) + if metadata: + myds.metadata.update(metadata) + return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None) + + @classmethod + def random_string(cls, length=8): + """ return a random lowercase string with default length of 8""" + return ''.join( + random.choice(string.ascii_lowercase) for _ in range(length)) + + +class ResourceUsingTestCase(CiTestCase): + + def setUp(self): + super(ResourceUsingTestCase, self).setUp() + self.resource_path = None + + def getCloudPaths(self, ds=None): + tmpdir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, tmpdir) + cp = ch.Paths({'cloud_dir': tmpdir, + 'templates_dir': resourceLocation()}, + ds=ds) + return cp + + +class FilesystemMockingTestCase(ResourceUsingTestCase): + + def setUp(self): + super(FilesystemMockingTestCase, self).setUp() + self.patched_funcs = ExitStack() + + def tearDown(self): + self.patched_funcs.close() + ResourceUsingTestCase.tearDown(self) + + def replicateTestRoot(self, example_root, target_root): + real_root = resourceLocation() + real_root = os.path.join(real_root, 'roots', example_root) + for (dir_path, _dirnames, filenames) in os.walk(real_root): + real_path = dir_path + make_path = rebase_path(real_path[len(real_root):], target_root) + util.ensure_dir(make_path) + for f in filenames: + real_path = util.abs_join(real_path, f) + make_path = util.abs_join(make_path, f) + shutil.copy(real_path, make_path) + + def patchUtils(self, new_root): + patch_funcs = { + util: [('write_file', 1), + ('append_file', 1), + ('load_file', 1), + ('ensure_dir', 1), + ('chmod', 1), + ('delete_dir_contents', 1), + ('del_file', 1), + ('sym_link', -1), + ('copy', -1)], + } + for (mod, funcs) in patch_funcs.items(): + for (f, am) in funcs: + func = getattr(mod, f) + trap_func = retarget_many_wrapper(new_root, am, func) + self.patched_funcs.enter_context( + mock.patch.object(mod, f, trap_func)) + + # Handle subprocess calls + func = getattr(subp, 'subp') + + def nsubp(*_args, **_kwargs): + return ('', '') + + self.patched_funcs.enter_context( + mock.patch.object(subp, 'subp', nsubp)) + + def null_func(*_args, **_kwargs): + return None + + for f in ['chownbyid', 'chownbyname']: + self.patched_funcs.enter_context( + mock.patch.object(util, f, null_func)) + + def patchOS(self, new_root): + patch_funcs = { + os.path: [('isfile', 1), ('exists', 1), + ('islink', 1), ('isdir', 1), ('lexists', 1)], + os: [('listdir', 1), ('mkdir', 1), + ('lstat', 1), ('symlink', 2), + ('stat', 1)] + } + + if hasattr(os, 'scandir'): + # py27 does not have scandir + patch_funcs[os].append(('scandir', 1)) + + for (mod, funcs) in patch_funcs.items(): + for f, nargs in funcs: + func = getattr(mod, f) + trap_func = retarget_many_wrapper(new_root, nargs, func) + self.patched_funcs.enter_context( + mock.patch.object(mod, f, trap_func)) + + def patchOpen(self, new_root): + trap_func = retarget_many_wrapper(new_root, 1, open) + self.patched_funcs.enter_context( + mock.patch('builtins.open', trap_func) + ) + + def patchStdoutAndStderr(self, stdout=None, stderr=None): + if stdout is not None: + self.patched_funcs.enter_context( + mock.patch.object(sys, 'stdout', stdout)) + if stderr is not None: + self.patched_funcs.enter_context( + mock.patch.object(sys, 'stderr', stderr)) + + def reRoot(self, root=None): + if root is None: + root = self.tmp_dir() + self.patchUtils(root) + self.patchOS(root) + self.patchOpen(root) + return root + + @contextmanager + def reRooted(self, root=None): + try: + yield self.reRoot(root) + finally: + self.patched_funcs.close() + + +class HttprettyTestCase(CiTestCase): + # necessary as http_proxy gets in the way of httpretty + # https://github.com/gabrielfalcao/HTTPretty/issues/122 + # Also make sure that allow_net_connect is set to False. + # And make sure reset and enable/disable are done. + + def setUp(self): + self.restore_proxy = os.environ.get('http_proxy') + if self.restore_proxy is not None: + del os.environ['http_proxy'] + super(HttprettyTestCase, self).setUp() + httpretty.HTTPretty.allow_net_connect = False + httpretty.reset() + httpretty.enable() + # Stop the logging from HttpPretty so our logs don't get mixed + # up with its logs + logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) + + def tearDown(self): + httpretty.disable() + httpretty.reset() + if self.restore_proxy: + os.environ['http_proxy'] = self.restore_proxy + super(HttprettyTestCase, self).tearDown() + + +class SchemaTestCaseMixin(unittest.TestCase): + + def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."): + """Assert the config is valid per self.schema. + + If there is only one top level key in the schema properties, then + the cfg will be put under that key.""" + props = list(self.schema.get('properties')) + # put cfg under top level key if there is only one in the schema + if len(props) == 1: + cfg = {props[0]: cfg} + try: + validate_cloudconfig_schema(cfg, self.schema, strict=True) + except SchemaValidationError: + self.fail(msg) + + +def populate_dir(path, files): + if not os.path.exists(path): + os.makedirs(path) + ret = [] + for (name, content) in files.items(): + p = os.path.sep.join([path, name]) + util.ensure_dir(os.path.dirname(p)) + with open(p, "wb") as fp: + if isinstance(content, bytes): + fp.write(content) + else: + fp.write(content.encode('utf-8')) + fp.close() + ret.append(p) + + return ret + + +def populate_dir_with_ts(path, data): + """data is {'file': ('contents', mtime)}. mtime relative to now.""" + populate_dir(path, dict((k, v[0]) for k, v in data.items())) + btime = time.time() + for fpath, (_contents, mtime) in data.items(): + ts = btime + mtime if mtime else btime + os.utime(os.path.sep.join((path, fpath)), (ts, ts)) + + +def dir2dict(startdir, prefix=None): + flist = {} + if prefix is None: + prefix = startdir + for root, _dirs, files in os.walk(startdir): + for fname in files: + fpath = os.path.join(root, fname) + key = fpath[len(prefix):] + flist[key] = util.load_file(fpath) + return flist + + +def wrap_and_call(prefix, mocks, func, *args, **kwargs): + """ + call func(args, **kwargs) with mocks applied, then unapplies mocks + nicer to read than repeating dectorators on each function + + prefix: prefix for mock names (e.g. 'cloudinit.stages.util') or None + mocks: dictionary of names (under 'prefix') to mock and either + a return value or a dictionary to pass to the mock.patch call + func: function to call with mocks applied + *args,**kwargs: arguments for 'func' + + return_value: return from 'func' + """ + delim = '.' + if prefix is None: + prefix = '' + prefix = prefix.rstrip(delim) + unwraps = [] + for fname, kw in mocks.items(): + if prefix: + fname = delim.join((prefix, fname)) + if not isinstance(kw, dict): + kw = {'return_value': kw} + p = mock.patch(fname, **kw) + p.start() + unwraps.append(p) + try: + return func(*args, **kwargs) + finally: + for p in unwraps: + p.stop() + + +def resourceLocation(subname=None): + path = os.path.join('tests', 'data') + if not subname: + return path + return os.path.join(path, subname) + + +def readResource(name, mode='r'): + with open(resourceLocation(name), mode) as fh: + return fh.read() + + +try: + import jsonschema + assert jsonschema # avoid pyflakes error F401: import unused + _missing_jsonschema_dep = False +except ImportError: + _missing_jsonschema_dep = True + + +def skipUnlessJsonSchema(): + return skipIf( + _missing_jsonschema_dep, "No python-jsonschema dependency present.") + + +def skipUnlessJinja(): + return skipIf(not JINJA_AVAILABLE, "No jinja dependency present.") + + +def skipIfJinja(): + return skipIf(JINJA_AVAILABLE, "Jinja dependency present.") + + +# older versions of mock do not have the useful 'assert_not_called' +if not hasattr(mock.Mock, 'assert_not_called'): + def __mock_assert_not_called(mmock): + if mmock.call_count != 0: + msg = ("[citest] Expected '%s' to not have been called. " + "Called %s times." % + (mmock._mock_name or 'mock', mmock.call_count)) + raise AssertionError(msg) + mock.Mock.assert_not_called = __mock_assert_not_called + +# vi: ts=4 expandtab diff --git a/tests/unittests/net/__init__.py b/tests/unittests/net/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py new file mode 100644 index 00000000..d3da3981 --- /dev/null +++ b/tests/unittests/net/test_dhcp.py @@ -0,0 +1,647 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import httpretty +import os +import signal +from textwrap import dedent + +import cloudinit.net as net +from cloudinit.net.dhcp import ( + InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, + parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, + parse_static_routes) +from cloudinit.util import ensure_file, write_file +from tests.unittests.helpers import ( + CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) + + +class TestParseDHCPLeasesFile(CiTestCase): + + def test_parse_empty_lease_file_errors(self): + """parse_dhcp_lease_file errors when file content is empty.""" + empty_file = self.tmp_path('leases') + ensure_file(empty_file) + with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: + parse_dhcp_lease_file(empty_file) + error = context_manager.exception + self.assertIn('Cannot parse empty dhcp lease file', str(error)) + + def test_parse_malformed_lease_file_content_errors(self): + """parse_dhcp_lease_file errors when file content isn't dhcp leases.""" + non_lease_file = self.tmp_path('leases') + write_file(non_lease_file, 'hi mom.') + with self.assertRaises(InvalidDHCPLeaseFileError) as context_manager: + parse_dhcp_lease_file(non_lease_file) + error = context_manager.exception + self.assertIn('Cannot parse dhcp lease file', str(error)) + + def test_parse_multiple_leases(self): + """parse_dhcp_lease_file returns a list of all leases within.""" + lease_file = self.tmp_path('leases') + content = dedent(""" + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + filename "http://192.168.2.50/boot.php?mac=${netX}"; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + renew 4 2017/07/27 18:02:30; + expire 5 2017/07/28 07:08:15; + } + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + filename "http://192.168.2.50/boot.php?mac=${netX}"; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + } + """) + expected = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15', + 'filename': 'http://192.168.2.50/boot.php?mac=${netX}'}, + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'filename': 'http://192.168.2.50/boot.php?mac=${netX}', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}] + write_file(lease_file, content) + self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) + + +class TestDHCPRFC3442(CiTestCase): + + def test_parse_lease_finds_rfc3442_classless_static_routes(self): + """parse_dhcp_lease_file returns rfc3442-classless-static-routes.""" + lease_file = self.tmp_path('leases') + content = dedent(""" + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + option rfc3442-classless-static-routes 0,130,56,240,1; + renew 4 2017/07/27 18:02:30; + expire 5 2017/07/28 07:08:15; + } + """) + expected = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + write_file(lease_file, content) + self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) + + def test_parse_lease_finds_classless_static_routes(self): + """ + parse_dhcp_lease_file returns classless-static-routes + for Centos lease format. + """ + lease_file = self.tmp_path('leases') + content = dedent(""" + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + option classless-static-routes 0 130.56.240.1; + renew 4 2017/07/27 18:02:30; + expire 5 2017/07/28 07:08:15; + } + """) + expected = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'classless-static-routes': '0 130.56.240.1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + write_file(lease_file, content) + self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): + """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network""" + lease = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + m_maybe.return_value = lease + eph = net.dhcp.EphemeralDHCPv4() + eph.obtain_lease() + expected_kwargs = { + 'interface': 'wlp3s0', + 'ip': '192.168.2.74', + 'prefix_or_mask': '255.255.255.0', + 'broadcast': '192.168.2.255', + 'static_routes': [('0.0.0.0/0', '130.56.240.1')], + 'router': '192.168.2.1'} + m_ipv4.assert_called_with(**expected_kwargs) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4): + """ + EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network + for Centos Lease format + """ + lease = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'classless-static-routes': '0 130.56.240.1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + m_maybe.return_value = lease + eph = net.dhcp.EphemeralDHCPv4() + eph.obtain_lease() + expected_kwargs = { + 'interface': 'wlp3s0', + 'ip': '192.168.2.74', + 'prefix_or_mask': '255.255.255.0', + 'broadcast': '192.168.2.255', + 'static_routes': [('0.0.0.0/0', '130.56.240.1')], + 'router': '192.168.2.1'} + m_ipv4.assert_called_with(**expected_kwargs) + + +class TestDHCPParseStaticRoutes(CiTestCase): + + with_logs = True + + def parse_static_routes_empty_string(self): + self.assertEqual([], parse_static_routes("")) + + def test_parse_static_routes_invalid_input_returns_empty_list(self): + rfc3442 = "32,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_bogus_width_returns_empty_list(self): + rfc3442 = "33,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip(self): + rfc3442 = "32,169,254,169,254,130,56,248,255" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): + rfc3442 = "32,169,254,169,254,130,56,248,255;" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_default_route(self): + rfc3442 = "0,130,56,240,1" + self.assertEqual([('0.0.0.0/0', '130.56.240.1')], + parse_static_routes(rfc3442)) + + def test_unspecified_gateway(self): + rfc3442 = "32,169,254,169,254,0,0,0,0" + self.assertEqual([('169.254.169.254/32', '0.0.0.0')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_class_c_b_a(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a = "8,10,10,0,0,4" + rfc3442 = ",".join([class_c, class_b, class_a]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ("10.0.0.0/8", "10.0.0.4") + ]), sorted(parse_static_routes(rfc3442))) + + def test_parse_static_routes_logs_error_truncated(self): + bad_rfc3442 = { + "class_c": "24,169,254,169,10", + "class_b": "16,172,16,10", + "class_a": "8,10,10", + "gateway": "0,0", + "netlen": "33,0", + } + for rfc3442 in bad_rfc3442.values(): + self.assertEqual([], parse_static_routes(rfc3442)) + + logs = self.logs.getvalue() + self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines())) + + def test_parse_static_routes_returns_valid_routes_until_parse_err(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a_error = "8,10,10,0,0" + rfc3442 = ",".join([class_c, class_b, class_a_error]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ]), sorted(parse_static_routes(rfc3442))) + + logs = self.logs.getvalue() + self.assertIn(rfc3442, logs.splitlines()[0]) + + def test_redhat_format(self): + redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1" + self.assertEqual(sorted([ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1") + ]), sorted(parse_static_routes(redhat_format))) + + def test_redhat_format_with_a_space_too_much_after_comma(self): + redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1" + self.assertEqual(sorted([ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1") + ]), sorted(parse_static_routes(redhat_format))) + + +class TestDHCPDiscoveryClean(CiTestCase): + with_logs = True + + @mock.patch('cloudinit.net.dhcp.find_fallback_nic') + def test_no_fallback_nic_found(self, m_fallback_nic): + """Log and do nothing when nic is absent and no fallback is found.""" + m_fallback_nic.return_value = None # No fallback nic found + self.assertEqual([], maybe_perform_dhcp_discovery()) + self.assertIn( + 'Skip dhcp_discovery: Unable to find fallback nic.', + self.logs.getvalue()) + + def test_provided_nic_does_not_exist(self): + """When the provided nic doesn't exist, log a message and no-op.""" + self.assertEqual([], maybe_perform_dhcp_discovery('idontexist')) + self.assertIn( + 'Skip dhcp_discovery: nic idontexist not found in get_devicelist.', + self.logs.getvalue()) + + @mock.patch('cloudinit.net.dhcp.subp.which') + @mock.patch('cloudinit.net.dhcp.find_fallback_nic') + def test_absent_dhclient_command(self, m_fallback, m_which): + """When dhclient doesn't exist in the OS, log the issue and no-op.""" + m_fallback.return_value = 'eth9' + m_which.return_value = None # dhclient isn't found + self.assertEqual([], maybe_perform_dhcp_discovery()) + self.assertIn( + 'Skip dhclient configuration: No dhclient command found.', + self.logs.getvalue()) + + @mock.patch('cloudinit.temp_utils.os.getuid') + @mock.patch('cloudinit.net.dhcp.dhcp_discovery') + @mock.patch('cloudinit.net.dhcp.subp.which') + @mock.patch('cloudinit.net.dhcp.find_fallback_nic') + def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid): + """maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery.""" + m_uid.return_value = 0 # Fake root user for tmpdir + m_fback.return_value = 'eth9' + m_which.return_value = '/sbin/dhclient' + m_dhcp.return_value = {'address': '192.168.2.2'} + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'_TMPDIR': {'new': None}, + 'os.getuid': 0}, + maybe_perform_dhcp_discovery) + self.assertEqual({'address': '192.168.2.2'}, retval) + self.assertEqual( + 1, m_dhcp.call_count, 'dhcp_discovery not called once') + call = m_dhcp.call_args_list[0] + self.assertEqual('/sbin/dhclient', call[0][0]) + self.assertEqual('eth9', call[0][1]) + self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) + + @mock.patch('time.sleep', mock.MagicMock()) + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.subp.subp') + def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, + m_kill): + """dhcp_discovery logs a warning when pidfile contains invalid content. + + Lease processing still occurs and no proc kill is attempted. + """ + m_subp.return_value = ('', '') + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') + script_content = '#!/bin/bash\necho fake-dhclient' + write_file(dhclient_script, script_content, mode=0o755) + write_file(self.tmp_path('dhclient.pid', tmpdir), '') # Empty pid '' + lease_content = dedent(""" + lease { + interface "eth9"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + } + """) + write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content) + + self.assertCountEqual( + [{'interface': 'eth9', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], + dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + self.assertIn( + "dhclient(pid=, parentpid=unknown) failed " + "to daemonize after 10.0 seconds", + self.logs.getvalue()) + m_kill.assert_not_called() + + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.util.wait_for_files') + @mock.patch('cloudinit.net.dhcp.subp.subp') + def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, + m_subp, + m_wait, + m_kill, + m_getppid): + """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" + m_subp.return_value = ('', '') + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') + script_content = '#!/bin/bash\necho fake-dhclient' + write_file(dhclient_script, script_content, mode=0o755) + # Don't create pid or leases file + pidfile = self.tmp_path('dhclient.pid', tmpdir) + leasefile = self.tmp_path('dhcp.leases', tmpdir) + m_wait.return_value = [pidfile] # Return the missing pidfile wait for + m_getppid.return_value = 1 # Indicate that dhclient has daemonized + self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + self.assertEqual( + mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), + m_wait.call_args_list[0]) + self.assertIn( + 'WARNING: dhclient did not produce expected files: dhclient.pid', + self.logs.getvalue()) + m_kill.assert_not_called() + + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.subp.subp') + def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): + """dhcp_discovery brings up the interface and runs dhclient. + + It also returns the parsed dhcp.leases file generated in the sandbox. + """ + m_subp.return_value = ('', '') + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') + script_content = '#!/bin/bash\necho fake-dhclient' + write_file(dhclient_script, script_content, mode=0o755) + lease_content = dedent(""" + lease { + interface "eth9"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + } + """) + lease_file = os.path.join(tmpdir, 'dhcp.leases') + write_file(lease_file, lease_content) + pid_file = os.path.join(tmpdir, 'dhclient.pid') + my_pid = 1 + write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized + + self.assertCountEqual( + [{'interface': 'eth9', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], + dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + # dhclient script got copied + with open(os.path.join(tmpdir, 'dhclient')) as stream: + self.assertEqual(script_content, stream.read()) + # Interface was brought up before dhclient called from sandbox + m_subp.assert_has_calls([ + mock.call( + ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), + mock.call( + [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf', + lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), + 'eth9', '-sf', '/bin/true'], capture=True)]) + m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) + + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.subp.subp') + def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid): + """dhcp_discovery brings up the interface and runs dhclient. + + It also returns the parsed dhcp.leases file generated in the sandbox. + """ + m_subp.return_value = ('', '') + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') + script_content = '#!/bin/bash\necho fake-dhclient' + write_file(dhclient_script, script_content, mode=0o755) + lease_content = dedent(""" + lease { + interface "eth9"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + } + """) + lease_file = os.path.join(tmpdir, 'dhcp.leases') + write_file(lease_file, lease_content) + pid_file = os.path.join(tmpdir, 'dhclient.pid') + my_pid = 1 + write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized + + with mock.patch('os.access', return_value=False): + self.assertCountEqual( + [{'interface': 'eth9', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], + dhcp_discovery(dhclient_script, 'eth9', tmpdir)) + # dhclient script got copied + with open(os.path.join(tmpdir, 'dhclient.orig')) as stream: + self.assertEqual(script_content, stream.read()) + # Interface was brought up before dhclient called from sandbox + m_subp.assert_has_calls([ + mock.call( + ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True), + mock.call( + [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf', + lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), + 'eth9', '-sf', '/bin/true'], capture=True)]) + m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) + + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') + @mock.patch('cloudinit.net.dhcp.os.kill') + @mock.patch('cloudinit.net.dhcp.subp.subp') + def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid): + """"dhcp_log_func is called with the output and error streams of + dhclinet when the callable is passed.""" + dhclient_err = 'FAKE DHCLIENT ERROR' + dhclient_out = 'FAKE DHCLIENT OUT' + m_subp.return_value = (dhclient_out, dhclient_err) + tmpdir = self.tmp_dir() + dhclient_script = os.path.join(tmpdir, 'dhclient.orig') + script_content = '#!/bin/bash\necho fake-dhclient' + write_file(dhclient_script, script_content, mode=0o755) + lease_content = dedent(""" + lease { + interface "eth9"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + } + """) + lease_file = os.path.join(tmpdir, 'dhcp.leases') + write_file(lease_file, lease_content) + pid_file = os.path.join(tmpdir, 'dhclient.pid') + my_pid = 1 + write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized + + def dhcp_log_func(out, err): + self.assertEqual(out, dhclient_out) + self.assertEqual(err, dhclient_err) + + dhcp_discovery( + dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func) + + +class TestSystemdParseLeases(CiTestCase): + + lxd_lease = dedent("""\ + # This is private data. Do not parse. + ADDRESS=10.75.205.242 + NETMASK=255.255.255.0 + ROUTER=10.75.205.1 + SERVER_ADDRESS=10.75.205.1 + NEXT_SERVER=10.75.205.1 + BROADCAST=10.75.205.255 + T1=1580 + T2=2930 + LIFETIME=3600 + DNS=10.75.205.1 + DOMAINNAME=lxd + HOSTNAME=a1 + CLIENTID=ffe617693400020000ab110c65a6a0866931c2 + """) + + lxd_parsed = { + 'ADDRESS': '10.75.205.242', + 'NETMASK': '255.255.255.0', + 'ROUTER': '10.75.205.1', + 'SERVER_ADDRESS': '10.75.205.1', + 'NEXT_SERVER': '10.75.205.1', + 'BROADCAST': '10.75.205.255', + 'T1': '1580', + 'T2': '2930', + 'LIFETIME': '3600', + 'DNS': '10.75.205.1', + 'DOMAINNAME': 'lxd', + 'HOSTNAME': 'a1', + 'CLIENTID': 'ffe617693400020000ab110c65a6a0866931c2', + } + + azure_lease = dedent("""\ + # This is private data. Do not parse. + ADDRESS=10.132.0.5 + NETMASK=255.255.255.255 + ROUTER=10.132.0.1 + SERVER_ADDRESS=169.254.169.254 + NEXT_SERVER=10.132.0.1 + MTU=1460 + T1=43200 + T2=75600 + LIFETIME=86400 + DNS=169.254.169.254 + NTP=169.254.169.254 + DOMAINNAME=c.ubuntu-foundations.internal + DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal + HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal + ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 + CLIENTID=ff405663a200020000ab11332859494d7a8b4c + OPTION_245=624c3620 + """) + + azure_parsed = { + 'ADDRESS': '10.132.0.5', + 'NETMASK': '255.255.255.255', + 'ROUTER': '10.132.0.1', + 'SERVER_ADDRESS': '169.254.169.254', + 'NEXT_SERVER': '10.132.0.1', + 'MTU': '1460', + 'T1': '43200', + 'T2': '75600', + 'LIFETIME': '86400', + 'DNS': '169.254.169.254', + 'NTP': '169.254.169.254', + 'DOMAINNAME': 'c.ubuntu-foundations.internal', + 'DOMAIN_SEARCH_LIST': 'c.ubuntu-foundations.internal google.internal', + 'HOSTNAME': 'tribaal-test-171002-1349.c.ubuntu-foundations.internal', + 'ROUTES': '10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1', + 'CLIENTID': 'ff405663a200020000ab11332859494d7a8b4c', + 'OPTION_245': '624c3620'} + + def setUp(self): + super(TestSystemdParseLeases, self).setUp() + self.lease_d = self.tmp_dir() + + def test_no_leases_returns_empty_dict(self): + """A leases dir with no lease files should return empty dictionary.""" + self.assertEqual({}, networkd_load_leases(self.lease_d)) + + def test_no_leases_dir_returns_empty_dict(self): + """A non-existing leases dir should return empty dict.""" + enodir = os.path.join(self.lease_d, 'does-not-exist') + self.assertEqual({}, networkd_load_leases(enodir)) + + def test_single_leases_file(self): + """A leases dir with one leases file.""" + populate_dir(self.lease_d, {'2': self.lxd_lease}) + self.assertEqual( + {'2': self.lxd_parsed}, networkd_load_leases(self.lease_d)) + + def test_single_azure_leases_file(self): + """On Azure, option 245 should be present, verify it specifically.""" + populate_dir(self.lease_d, {'1': self.azure_lease}) + self.assertEqual( + {'1': self.azure_parsed}, networkd_load_leases(self.lease_d)) + + def test_multiple_files(self): + """Multiple leases files on azure with one found return that value.""" + self.maxDiff = None + populate_dir(self.lease_d, {'1': self.azure_lease, + '9': self.lxd_lease}) + self.assertEqual({'1': self.azure_parsed, '9': self.lxd_parsed}, + networkd_load_leases(self.lease_d)) + + +class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): + + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_ephemeral_dhcp_no_network_if_url_connectivity(self, m_dhcp): + """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" + url = 'http://example.org/index.html' + + httpretty.register_uri(httpretty.GET, url) + with net.dhcp.EphemeralDHCPv4( + connectivity_url_data={'url': url}, + ) as lease: + self.assertIsNone(lease) + # Ensure that no teardown happens: + m_dhcp.assert_not_called() + + @mock.patch('cloudinit.net.dhcp.subp.subp') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_ephemeral_dhcp_setup_network_if_url_connectivity( + self, m_dhcp, m_subp): + """No EphemeralDhcp4 network setup when connectivity_url succeeds.""" + url = 'http://example.org/index.html' + fake_lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.2', + 'subnet-mask': '255.255.0.0'} + m_dhcp.return_value = [fake_lease] + m_subp.return_value = ('', '') + + httpretty.register_uri(httpretty.GET, url, body={}, status=404) + with net.dhcp.EphemeralDHCPv4( + connectivity_url_data={'url': url}, + ) as lease: + self.assertEqual(fake_lease, lease) + # Ensure that dhcp discovery occurs + m_dhcp.called_once_with() + +# vi: ts=4 expandtab diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py new file mode 100644 index 00000000..666e8425 --- /dev/null +++ b/tests/unittests/net/test_init.py @@ -0,0 +1,1402 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import errno +import ipaddress +import os +import textwrap +from unittest import mock + +import httpretty +import pytest +import requests + +import cloudinit.net as net +from cloudinit import safeyaml as yaml +from tests.unittests.helpers import CiTestCase, HttprettyTestCase +from cloudinit.subp import ProcessExecutionError +from cloudinit.util import ensure_file, write_file + + +class TestSysDevPath(CiTestCase): + + def test_sys_dev_path(self): + """sys_dev_path returns a path under SYS_CLASS_NET for a device.""" + dev = 'something' + path = 'attribute' + expected = net.SYS_CLASS_NET + dev + '/' + path + self.assertEqual(expected, net.sys_dev_path(dev, path)) + + def test_sys_dev_path_without_path(self): + """When path param isn't provided it defaults to empty string.""" + dev = 'something' + expected = net.SYS_CLASS_NET + dev + '/' + self.assertEqual(expected, net.sys_dev_path(dev)) + + +class TestReadSysNet(CiTestCase): + with_logs = True + + def setUp(self): + super(TestReadSysNet, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + + def test_read_sys_net_strips_contents_of_sys_path(self): + """read_sys_net strips whitespace from the contents of a sys file.""" + content = 'some stuff with trailing whitespace\t\r\n' + write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) + self.assertEqual(content.strip(), net.read_sys_net('dev', 'attr')) + + def test_read_sys_net_reraises_oserror(self): + """read_sys_net raises OSError/IOError when file doesn't exist.""" + # Non-specific Exception because versions of python OSError vs IOError. + with self.assertRaises(Exception) as context_manager: # noqa: H202 + net.read_sys_net('dev', 'attr') + error = context_manager.exception + self.assertIn('No such file or directory', str(error)) + + def test_read_sys_net_handles_error_with_on_enoent(self): + """read_sys_net handles OSError/IOError with on_enoent if provided.""" + handled_errors = [] + + def on_enoent(e): + handled_errors.append(e) + + net.read_sys_net('dev', 'attr', on_enoent=on_enoent) + error = handled_errors[0] + self.assertIsInstance(error, Exception) + self.assertIn('No such file or directory', str(error)) + + def test_read_sys_net_translates_content(self): + """read_sys_net translates content when translate dict is provided.""" + content = "you're welcome\n" + write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) + translate = {"you're welcome": 'de nada'} + self.assertEqual( + 'de nada', + net.read_sys_net('dev', 'attr', translate=translate)) + + def test_read_sys_net_errors_on_translation_failures(self): + """read_sys_net raises a KeyError and logs details on failure.""" + content = "you're welcome\n" + write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) + with self.assertRaises(KeyError) as context_manager: + net.read_sys_net('dev', 'attr', translate={}) + error = context_manager.exception + self.assertEqual('"you\'re welcome"', str(error)) + self.assertIn( + "Found unexpected (not translatable) value 'you're welcome' in " + "'{0}dev/attr".format(self.sysdir), + self.logs.getvalue()) + + def test_read_sys_net_handles_handles_with_onkeyerror(self): + """read_sys_net handles translation errors calling on_keyerror.""" + content = "you're welcome\n" + write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) + handled_errors = [] + + def on_keyerror(e): + handled_errors.append(e) + + net.read_sys_net('dev', 'attr', translate={}, on_keyerror=on_keyerror) + error = handled_errors[0] + self.assertIsInstance(error, KeyError) + self.assertEqual('"you\'re welcome"', str(error)) + + def test_read_sys_net_safe_false_on_translate_failure(self): + """read_sys_net_safe returns False on translation failures.""" + content = "you're welcome\n" + write_file(os.path.join(self.sysdir, 'dev', 'attr'), content) + self.assertFalse(net.read_sys_net_safe('dev', 'attr', translate={})) + + def test_read_sys_net_safe_returns_false_on_noent_failure(self): + """read_sys_net_safe returns False on file not found failures.""" + self.assertFalse(net.read_sys_net_safe('dev', 'attr')) + + def test_read_sys_net_int_returns_none_on_error(self): + """read_sys_net_safe returns None on failures.""" + self.assertFalse(net.read_sys_net_int('dev', 'attr')) + + def test_read_sys_net_int_returns_none_on_valueerror(self): + """read_sys_net_safe returns None when content is not an int.""" + write_file(os.path.join(self.sysdir, 'dev', 'attr'), 'NOTINT\n') + self.assertFalse(net.read_sys_net_int('dev', 'attr')) + + def test_read_sys_net_int_returns_integer_from_content(self): + """read_sys_net_safe returns None on failures.""" + write_file(os.path.join(self.sysdir, 'dev', 'attr'), '1\n') + self.assertEqual(1, net.read_sys_net_int('dev', 'attr')) + + def test_is_up_true(self): + """is_up is True if sys/net/devname/operstate is 'up' or 'unknown'.""" + for state in ['up', 'unknown']: + write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) + self.assertTrue(net.is_up('eth0')) + + def test_is_up_false(self): + """is_up is False if sys/net/devname/operstate is 'down' or invalid.""" + for state in ['down', 'incomprehensible']: + write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) + self.assertFalse(net.is_up('eth0')) + + def test_is_bridge(self): + """is_bridge is True when /sys/net/devname/bridge exists.""" + self.assertFalse(net.is_bridge('eth0')) + ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge')) + self.assertTrue(net.is_bridge('eth0')) + + def test_is_bond(self): + """is_bond is True when /sys/net/devname/bonding exists.""" + self.assertFalse(net.is_bond('eth0')) + ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) + self.assertTrue(net.is_bond('eth0')) + + def test_get_master(self): + """get_master returns the path when /sys/net/devname/master exists.""" + self.assertIsNone(net.get_master('enP1s1')) + master_path = os.path.join(self.sysdir, 'enP1s1', 'master') + ensure_file(master_path) + self.assertEqual(master_path, net.get_master('enP1s1')) + + def test_master_is_bridge_or_bond(self): + bridge_mac = 'aa:bb:cc:aa:bb:cc' + bond_mac = 'cc:bb:aa:cc:bb:aa' + + # No master => False + write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) + write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) + + self.assertFalse(net.master_is_bridge_or_bond('eth1')) + self.assertFalse(net.master_is_bridge_or_bond('eth2')) + + # masters without bridge/bonding => False + write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) + write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) + + os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) + os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) + + self.assertFalse(net.master_is_bridge_or_bond('eth1')) + self.assertFalse(net.master_is_bridge_or_bond('eth2')) + + # masters with bridge/bonding => True + write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') + write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') + + self.assertTrue(net.master_is_bridge_or_bond('eth1')) + self.assertTrue(net.master_is_bridge_or_bond('eth2')) + + def test_master_is_openvswitch(self): + ovs_mac = 'bb:cc:aa:bb:cc:aa' + + # No master => False + write_file(os.path.join(self.sysdir, 'eth1', 'address'), ovs_mac) + + self.assertFalse(net.master_is_bridge_or_bond('eth1')) + + # masters without ovs-system => False + write_file(os.path.join(self.sysdir, 'ovs-system', 'address'), ovs_mac) + + os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1', + 'master')) + + self.assertFalse(net.master_is_openvswitch('eth1')) + + # masters with ovs-system => True + os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth1', + 'upper_ovs-system')) + + self.assertTrue(net.master_is_openvswitch('eth1')) + + def test_is_vlan(self): + """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan.""" + ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent')) + self.assertFalse(net.is_vlan('eth0')) + content = 'junk\nDEVTYPE=vlan\njunk\n' + write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content) + self.assertTrue(net.is_vlan('eth0')) + + +class TestGenerateFallbackConfig(CiTestCase): + + def setUp(self): + super(TestGenerateFallbackConfig, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.is_container', 'm_is_container', + return_value=False) + self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') + self.add_patch('cloudinit.net.is_netfailover', 'm_netfail', + return_value=False) + self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master', + return_value=False) + + def test_generate_fallback_finds_connected_eth_with_mac(self): + """generate_fallback_config finds any connected device with a mac.""" + write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') + write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) + expected = { + 'ethernets': {'eth1': {'match': {'macaddress': mac}, + 'dhcp4': True, 'set-name': 'eth1'}}, + 'version': 2} + self.assertEqual(expected, net.generate_fallback_config()) + + def test_generate_fallback_finds_dormant_eth_with_mac(self): + """generate_fallback_config finds any dormant device with a mac.""" + write_file(os.path.join(self.sysdir, 'eth0', 'dormant'), '1') + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) + expected = { + 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, + 'set-name': 'eth0'}}, + 'version': 2} + self.assertEqual(expected, net.generate_fallback_config()) + + def test_generate_fallback_finds_eth_by_operstate(self): + """generate_fallback_config finds any dormant device with a mac.""" + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) + expected = { + 'ethernets': { + 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, + 'set-name': 'eth0'}}, + 'version': 2} + valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] + for state in valid_operstates: + write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) + self.assertEqual(expected, net.generate_fallback_config()) + write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'noworky') + self.assertIsNone(net.generate_fallback_config()) + + def test_generate_fallback_config_skips_veth(self): + """generate_fallback_config will skip any veth interfaces.""" + # A connected veth which gets ignored + write_file(os.path.join(self.sysdir, 'veth0', 'carrier'), '1') + self.assertIsNone(net.generate_fallback_config()) + + def test_generate_fallback_config_skips_bridges(self): + """generate_fallback_config will skip any bridges interfaces.""" + # A connected veth which gets ignored + write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) + ensure_file(os.path.join(self.sysdir, 'eth0', 'bridge')) + self.assertIsNone(net.generate_fallback_config()) + + def test_generate_fallback_config_skips_bonds(self): + """generate_fallback_config will skip any bonded interfaces.""" + # A connected veth which gets ignored + write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) + ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) + self.assertIsNone(net.generate_fallback_config()) + + def test_generate_fallback_config_skips_netfail_devs(self): + """gen_fallback_config ignores netfail primary,sby no mac on master.""" + mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac + for iface in ['ens3', 'ens3sby', 'enP0s1f3']: + write_file(os.path.join(self.sysdir, iface, 'carrier'), '1') + write_file( + os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') + write_file( + os.path.join(self.sysdir, iface, 'address'), mac) + + def is_netfail(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return False + return True + self.m_netfail.side_effect = is_netfail + + def is_netfail_master(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = is_netfail_master + expected = { + 'ethernets': { + 'ens3': {'dhcp4': True, 'match': {'name': 'ens3'}, + 'set-name': 'ens3'}}, + 'version': 2} + result = net.generate_fallback_config() + self.assertEqual(expected, result) + + +class TestNetFindFallBackNic(CiTestCase): + + def setUp(self): + super(TestNetFindFallBackNic, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.is_container', 'm_is_container', + return_value=False) + self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') + + def test_generate_fallback_finds_first_connected_eth_with_mac(self): + """find_fallback_nic finds any connected device with a mac.""" + write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') + write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) + self.assertEqual('eth1', net.find_fallback_nic()) + + +class TestGetDeviceList(CiTestCase): + + def setUp(self): + super(TestGetDeviceList, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + + def test_get_devicelist_raise_oserror(self): + """get_devicelist raise any non-ENOENT OSerror.""" + error = OSError('Can not do it') + error.errno = errno.EPERM # Set non-ENOENT + self.m_sys_path.side_effect = error + with self.assertRaises(OSError) as context_manager: + net.get_devicelist() + exception = context_manager.exception + self.assertEqual('Can not do it', str(exception)) + + def test_get_devicelist_empty_without_sys_net(self): + """get_devicelist returns empty list when missing SYS_CLASS_NET.""" + self.m_sys_path.return_value = 'idontexist' + self.assertEqual([], net.get_devicelist()) + + def test_get_devicelist_empty_with_no_devices_in_sys_net(self): + """get_devicelist returns empty directoty listing for SYS_CLASS_NET.""" + self.assertEqual([], net.get_devicelist()) + + def test_get_devicelist_lists_any_subdirectories_in_sys_net(self): + """get_devicelist returns a directory listing for SYS_CLASS_NET.""" + write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up') + write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up') + self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False), +) +class TestGetInterfaceMAC(CiTestCase): + + def setUp(self): + super(TestGetInterfaceMAC, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + + def test_get_interface_mac_false_with_no_mac(self): + """get_device_list returns False when no mac is reported.""" + ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) + mac_path = os.path.join(self.sysdir, 'eth0', 'address') + self.assertFalse(os.path.exists(mac_path)) + self.assertFalse(net.get_interface_mac('eth0')) + + def test_get_interface_mac(self): + """get_interfaces returns the mac from SYS_CLASS_NET/dev/address.""" + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) + self.assertEqual(mac, net.get_interface_mac('eth1')) + + def test_get_interface_mac_grabs_bonding_address(self): + """get_interfaces returns the source device mac for bonded devices.""" + source_dev_mac = 'aa:bb:cc:aa:bb:cc' + bonded_mac = 'dd:ee:ff:dd:ee:ff' + write_file(os.path.join(self.sysdir, 'eth1', 'address'), bonded_mac) + write_file( + os.path.join(self.sysdir, 'eth1', 'bonding_slave', 'perm_hwaddr'), + source_dev_mac) + self.assertEqual(source_dev_mac, net.get_interface_mac('eth1')) + + def test_get_interfaces_empty_list_without_sys_net(self): + """get_interfaces returns an empty list when missing SYS_CLASS_NET.""" + self.m_sys_path.return_value = 'idontexist' + self.assertEqual([], net.get_interfaces()) + + def test_get_interfaces_by_mac_skips_empty_mac(self): + """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac.""" + empty_mac = '00:00:00:00:00:00' + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'address'), empty_mac) + write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') + write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') + write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac) + expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] + self.assertEqual(expected, net.get_interfaces()) + + def test_get_interfaces_by_mac_skips_missing_mac(self): + """Ignore interfaces without an address from get_interfaces_by_mac.""" + write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') + address_path = os.path.join(self.sysdir, 'eth1', 'address') + self.assertFalse(os.path.exists(address_path)) + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') + write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac) + expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] + self.assertEqual(expected, net.get_interfaces()) + + def test_get_interfaces_by_mac_skips_master_devs(self): + """Ignore interfaces with a master device which would have dup mac.""" + mac1 = mac2 = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') + write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac1) + write_file(os.path.join(self.sysdir, 'eth1', 'master'), "blah") + write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') + write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac2) + expected = [('eth2', mac2, None, None)] + self.assertEqual(expected, net.get_interfaces()) + + @mock.patch('cloudinit.net.is_netfailover') + def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail): + """Ignore interfaces if netfailover primary or standby.""" + mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac + for iface in ['ens3', 'ens3sby', 'enP0s1f3']: + write_file( + os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') + write_file( + os.path.join(self.sysdir, iface, 'address'), mac) + + def is_netfail(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return False + else: + return True + m_netfail.side_effect = is_netfail + expected = [('ens3', mac, None, None)] + self.assertEqual(expected, net.get_interfaces()) + + def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds( + self + ): + bridge_mac = 'aa:bb:cc:aa:bb:cc' + bond_mac = 'cc:bb:aa:cc:bb:aa' + ovs_mac = 'bb:cc:aa:bb:cc:aa' + + write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) + write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') + + write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) + write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') + + write_file(os.path.join(self.sysdir, 'ovs-system', 'address'), + ovs_mac) + + write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) + os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) + + write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) + os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) + + write_file(os.path.join(self.sysdir, 'eth3', 'address'), ovs_mac) + os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3', + 'master')) + os.symlink('../ovs-system', os.path.join(self.sysdir, 'eth3', + 'upper_ovs-system')) + + interface_names = [interface[0] for interface in net.get_interfaces()] + self.assertEqual(['eth1', 'eth2', 'eth3', 'ovs-system'], + sorted(interface_names)) + + +class TestInterfaceHasOwnMAC(CiTestCase): + + def setUp(self): + super(TestInterfaceHasOwnMAC, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + + def test_interface_has_own_mac_false_when_stolen(self): + """Return False from interface_has_own_mac when address is stolen.""" + write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '2') + self.assertFalse(net.interface_has_own_mac('eth1')) + + def test_interface_has_own_mac_true_when_not_stolen(self): + """Return False from interface_has_own_mac when mac isn't stolen.""" + valid_assign_types = ['0', '1', '3'] + assign_path = os.path.join(self.sysdir, 'eth1', 'addr_assign_type') + for _type in valid_assign_types: + write_file(assign_path, _type) + self.assertTrue(net.interface_has_own_mac('eth1')) + + def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self): + """When addr_assign_type is absent, interface_has_own_mac errors.""" + with self.assertRaises(ValueError): + net.interface_has_own_mac('eth1', strict=True) + + +@mock.patch('cloudinit.net.subp.subp') +class TestEphemeralIPV4Network(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestEphemeralIPV4Network, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + + def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp): + """No required params for EphemeralIPv4Network can be None.""" + required_params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} + for key in required_params.keys(): + params = copy.deepcopy(required_params) + params[key] = None + with self.assertRaises(ValueError) as context_manager: + net.EphemeralIPv4Network(**params) + error = context_manager.exception + self.assertIn('Cannot init network on', str(error)) + self.assertEqual(0, m_subp.call_count) + + def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): + """Raise an error when prefix_or_mask is not a netmask or prefix.""" + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'broadcast': '192.168.2.255'} + invalid_masks = ('invalid', 'invalid.', '123.123.123') + for error_val in invalid_masks: + params['prefix_or_mask'] = error_val + with self.assertRaises(ValueError) as context_manager: + with net.EphemeralIPv4Network(**params): + pass + error = context_manager.exception + self.assertIn('Cannot setup network: netmask', str(error)) + self.assertEqual(0, m_subp.call_count) + + def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): + """EphemeralIPv4Network performs teardown on the device if setup.""" + expected_setup_calls = [ + mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'}), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], + capture=True)] + expected_teardown_calls = [ + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', + 'down'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'addr', 'del', '192.168.2.2/24', + 'dev', 'eth0'], capture=True)] + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} + with net.EphemeralIPv4Network(**params): + self.assertEqual(expected_setup_calls, m_subp.call_args_list) + m_subp.assert_has_calls(expected_teardown_calls) + + @mock.patch('cloudinit.net.readurl') + def test_ephemeral_ipv4_no_network_if_url_connectivity( + self, m_readurl, m_subp): + """No network setup is performed if we can successfully connect to + connectivity_url.""" + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', + 'connectivity_url_data': {'url': 'http://example.org/index.html'} + } + + with net.EphemeralIPv4Network(**params): + self.assertEqual( + [mock.call(url='http://example.org/index.html', timeout=5)], + m_readurl.call_args_list + ) + # Ensure that no teardown happens: + m_subp.assert_has_calls([]) + + def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp): + """EphemeralIPv4Network handles exception when address is setup. + + It performs no cleanup as the interface was already setup. + """ + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255'} + m_subp.side_effect = ProcessExecutionError( + '', 'RTNETLINK answers: File exists', 2) + expected_calls = [ + mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'})] + with net.EphemeralIPv4Network(**params): + pass + self.assertEqual(expected_calls, m_subp.call_args_list) + self.assertIn( + 'Skip ephemeral network setup, eth0 already has address', + self.logs.getvalue()) + + def test_ephemeral_ipv4_network_with_prefix(self, m_subp): + """EphemeralIPv4Network takes a valid prefix to setup the network.""" + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '24', 'broadcast': '192.168.2.255'} + for prefix_val in ['24', 16]: # prefix can be int or string + params['prefix_or_mask'] = prefix_val + with net.EphemeralIPv4Network(**params): + pass + m_subp.assert_has_calls([mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'})]) + m_subp.assert_has_calls([mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/16', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'})]) + + def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): + """Add the route when router is set and no default route exists.""" + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', + 'router': '192.168.2.1'} + m_subp.return_value = '', '' # Empty response from ip route gw check + expected_setup_calls = [ + mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'}), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], + capture=True), + mock.call( + ['ip', 'route', 'show', '0.0.0.0/0'], capture=True), + mock.call(['ip', '-4', 'route', 'add', '192.168.2.1', + 'dev', 'eth0', 'src', '192.168.2.2'], capture=True), + mock.call( + ['ip', '-4', 'route', 'add', 'default', 'via', + '192.168.2.1', 'dev', 'eth0'], capture=True)] + expected_teardown_calls = [ + mock.call(['ip', '-4', 'route', 'del', 'default', 'dev', 'eth0'], + capture=True), + mock.call(['ip', '-4', 'route', 'del', '192.168.2.1', + 'dev', 'eth0', 'src', '192.168.2.2'], capture=True), + ] + + with net.EphemeralIPv4Network(**params): + self.assertEqual(expected_setup_calls, m_subp.call_args_list) + m_subp.assert_has_calls(expected_teardown_calls) + + def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.255', 'broadcast': '192.168.2.255', + 'static_routes': [('192.168.2.1/32', '0.0.0.0'), + ('169.254.169.254/32', '192.168.2.1'), + ('0.0.0.0/0', '192.168.2.1')], + 'router': '192.168.2.1'} + expected_setup_calls = [ + mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/32', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'}), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], + capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '192.168.2.1/32', + 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] + expected_teardown_calls = [ + mock.call( + ['ip', '-4', 'route', 'del', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'del', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'del', '192.168.2.1/32', + 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', + 'eth0', 'down'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'addr', 'del', + '192.168.2.2/32', 'dev', 'eth0'], capture=True) + ] + with net.EphemeralIPv4Network(**params): + self.assertEqual(expected_setup_calls, m_subp.call_args_list) + m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls) + + +class TestApplyNetworkCfgNames(CiTestCase): + V1_CONFIG = textwrap.dedent("""\ + version: 1 + config: + - type: physical + name: interface0 + mac_address: "52:54:00:12:34:00" + subnets: + - type: static + address: 10.0.2.15 + netmask: 255.255.255.0 + gateway: 10.0.2.2 + """) + V2_CONFIG = textwrap.dedent("""\ + version: 2 + ethernets: + interface0: + match: + macaddress: "52:54:00:12:34:00" + addresses: + - 10.0.2.15/24 + gateway4: 10.0.2.2 + set-name: interface0 + """) + + V2_CONFIG_NO_SETNAME = textwrap.dedent("""\ + version: 2 + ethernets: + interface0: + match: + macaddress: "52:54:00:12:34:00" + addresses: + - 10.0.2.15/24 + gateway4: 10.0.2.2 + """) + + V2_CONFIG_NO_MAC = textwrap.dedent("""\ + version: 2 + ethernets: + interface0: + match: + driver: virtio-net + addresses: + - 10.0.2.15/24 + gateway4: 10.0.2.2 + set-name: interface0 + """) + + @mock.patch('cloudinit.net.device_devid') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net._rename_interfaces') + def test_apply_v1_renames(self, m_rename_interfaces, m_device_driver, + m_device_devid): + m_device_driver.return_value = 'virtio_net' + m_device_devid.return_value = '0x15d8' + + net.apply_network_config_names(yaml.load(self.V1_CONFIG)) + + call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] + m_rename_interfaces.assert_called_with([call]) + + @mock.patch('cloudinit.net.device_devid') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net._rename_interfaces') + def test_apply_v2_renames(self, m_rename_interfaces, m_device_driver, + m_device_devid): + m_device_driver.return_value = 'virtio_net' + m_device_devid.return_value = '0x15d8' + + net.apply_network_config_names(yaml.load(self.V2_CONFIG)) + + call = ['52:54:00:12:34:00', 'interface0', 'virtio_net', '0x15d8'] + m_rename_interfaces.assert_called_with([call]) + + @mock.patch('cloudinit.net._rename_interfaces') + def test_apply_v2_renames_skips_without_setname(self, m_rename_interfaces): + net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_SETNAME)) + m_rename_interfaces.assert_called_with([]) + + @mock.patch('cloudinit.net._rename_interfaces') + def test_apply_v2_renames_skips_without_mac(self, m_rename_interfaces): + net.apply_network_config_names(yaml.load(self.V2_CONFIG_NO_MAC)) + m_rename_interfaces.assert_called_with([]) + + def test_apply_v2_renames_raises_runtime_error_on_unknown_version(self): + with self.assertRaises(RuntimeError): + net.apply_network_config_names(yaml.load("version: 3")) + + +class TestHasURLConnectivity(HttprettyTestCase): + + def setUp(self): + super(TestHasURLConnectivity, self).setUp() + self.url = 'http://fake/' + self.kwargs = {'allow_redirects': True, 'timeout': 5.0} + + @mock.patch('cloudinit.net.readurl') + def test_url_timeout_on_connectivity_check(self, m_readurl): + """A timeout of 5 seconds is provided when reading a url.""" + self.assertTrue( + net.has_url_connectivity({'url': self.url}), + 'Expected True on url connect') + + def test_true_on_url_connectivity_success(self): + httpretty.register_uri(httpretty.GET, self.url) + self.assertTrue( + net.has_url_connectivity({'url': self.url}), + 'Expected True on url connect') + + @mock.patch('requests.Session.request') + def test_true_on_url_connectivity_timeout(self, m_request): + """A timeout raised accessing the url will return False.""" + m_request.side_effect = requests.Timeout('Fake Connection Timeout') + self.assertFalse( + net.has_url_connectivity({'url': self.url}), + 'Expected False on url timeout') + + def test_true_on_url_connectivity_failure(self): + httpretty.register_uri(httpretty.GET, self.url, body={}, status=404) + self.assertFalse( + net.has_url_connectivity({'url': self.url}), + 'Expected False on url fail') + + +def _mk_v1_phys(mac, name, driver, device_id): + v1_cfg = {'type': 'physical', 'name': name, 'mac_address': mac} + params = {} + if driver: + params.update({'driver': driver}) + if device_id: + params.update({'device_id': device_id}) + + if params: + v1_cfg.update({'params': params}) + + return v1_cfg + + +def _mk_v2_phys(mac, name, driver=None, device_id=None): + v2_cfg = {'set-name': name, 'match': {'macaddress': mac}} + if driver: + v2_cfg['match'].update({'driver': driver}) + if device_id: + v2_cfg['match'].update({'device_id': device_id}) + + return v2_cfg + + +class TestExtractPhysdevs(CiTestCase): + + def setUp(self): + super(TestExtractPhysdevs, self).setUp() + self.add_patch('cloudinit.net.device_driver', 'm_driver') + self.add_patch('cloudinit.net.device_devid', 'm_devid') + + def test_extract_physdevs_looks_up_driver_v1(self): + driver = 'virtio' + self.m_driver.return_value = driver + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + # insert the driver value for verification + physdevs[0][2] = driver + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_driver.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_driver_v2(self): + driver = 'virtio' + self.m_driver.return_value = driver + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + # insert the driver value for verification + physdevs[0][2] = driver + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_driver.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_devid_v1(self): + devid = '0x1000' + self.m_devid.return_value = devid + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + # insert the driver value for verification + physdevs[0][3] = devid + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_devid.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_devid_v2(self): + devid = '0x1000' + self.m_devid.return_value = devid + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + # insert the driver value for verification + physdevs[0][3] = devid + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_devid.assert_called_with('eth0') + + def test_get_v1_type_physical(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + + def test_get_v2_type_physical(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + + def test_get_v2_type_physical_skips_if_no_set_name(self): + netcfg = { + 'version': 2, + 'ethernets': { + 'ens3': { + 'match': {'macaddress': '00:11:22:33:44:55'}, + } + } + } + self.assertEqual([], net.extract_physdevs(netcfg)) + + def test_runtime_error_on_unknown_netcfg_version(self): + with self.assertRaises(RuntimeError): + net.extract_physdevs({'version': 3, 'awesome_config': []}) + + +class TestNetFailOver(CiTestCase): + + def setUp(self): + super(TestNetFailOver, self).setUp() + self.add_patch('cloudinit.net.util', 'm_util') + self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net') + self.add_patch('cloudinit.net.device_driver', 'm_device_driver') + + def test_get_dev_features(self): + devname = self.random_string() + features = self.random_string() + self.m_read_sys_net.return_value = features + + self.assertEqual(features, net.get_dev_features(devname)) + self.assertEqual(1, self.m_read_sys_net.call_count) + self.assertEqual(mock.call(devname, 'device/features'), + self.m_read_sys_net.call_args_list[0]) + + def test_get_dev_features_none_returns_empty_string(self): + devname = self.random_string() + self.m_read_sys_net.side_effect = Exception('error') + self.assertEqual('', net.get_dev_features(devname)) + self.assertEqual(1, self.m_read_sys_net.call_count) + self.assertEqual(mock.call(devname, 'device/features'), + self.m_read_sys_net.call_args_list[0]) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature(self, m_dev_features): + devname = self.random_string() + standby_features = ('0' * 62) + '1' + '0' + m_dev_features.return_value = standby_features + self.assertTrue(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_short_is_false(self, m_dev_features): + devname = self.random_string() + standby_features = self.random_string() + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_not_present_is_false(self, + m_dev_features): + devname = self.random_string() + standby_features = '0' * 64 + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_no_features_is_false(self, + m_dev_features): + devname = self.random_string() + standby_features = None + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # no master sysfs attr + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_master_checks_master_attr(self, m_sysdev): + devname = self.random_string() + driver = 'virtio_net' + m_sysdev.return_value = self.random_string() + self.assertFalse(net.is_netfail_master(devname, driver)) + self.assertEqual(1, m_sysdev.call_count) + self.assertEqual(mock.call(devname, path='master'), + m_sysdev.call_args_list[0]) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_wrong_driver(self, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_has_master_attr(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # no master sysfs attr + m_standby.return_value = False # no standby feature flag + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'virtio_net' # master virtio_net + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_primary(devname, driver)) + self.assertEqual(1, self.m_device_driver.call_count) + self.assertEqual(mock.call(master_devname), + self.m_device_driver.call_args_list[0]) + self.assertEqual(1, m_standby.call_count) + self.assertEqual(mock.call(master_devname), + m_standby.call_args_list[0]) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = 'virtio_net' + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + m_exists.return_value = False # no master sysfs attr + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'XXXX' # master not virtio_net + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'virtio_net' # master virtio_net + m_standby.return_value = False # master has no standby feature flag + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_no_master(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # has master sysfs attr + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + m_standby.return_value = False # has standby feature flag + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_primary(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = True + m_standby.return_value = False + self.assertTrue(net.is_netfailover(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_standby(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = False + m_standby.return_value = True + self.assertTrue(net.is_netfailover(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_returns_false(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = False + m_standby.return_value = False + self.assertFalse(net.is_netfailover(devname, driver)) + + +class TestOpenvswitchIsInstalled: + """Test cloudinit.net.openvswitch_is_installed. + + Uses the ``clear_lru_cache`` local autouse fixture to allow us to test + despite the ``lru_cache`` decorator on the unit under test. + """ + + @pytest.fixture(autouse=True) + def clear_lru_cache(self): + net.openvswitch_is_installed.cache_clear() + + @pytest.mark.parametrize( + "expected,which_return", [(True, "/some/path"), (False, None)] + ) + @mock.patch("cloudinit.net.subp.which") + def test_mirrors_which_result(self, m_which, expected, which_return): + m_which.return_value = which_return + assert expected == net.openvswitch_is_installed() + + @mock.patch("cloudinit.net.subp.which") + def test_only_calls_which_once(self, m_which): + net.openvswitch_is_installed() + net.openvswitch_is_installed() + assert 1 == m_which.call_count + + +@mock.patch("cloudinit.net.subp.subp", return_value=("", "")) +class TestGetOVSInternalInterfaces: + """Test cloudinit.net.get_ovs_internal_interfaces. + + Uses the ``clear_lru_cache`` local autouse fixture to allow us to test + despite the ``lru_cache`` decorator on the unit under test. + """ + @pytest.fixture(autouse=True) + def clear_lru_cache(self): + net.get_ovs_internal_interfaces.cache_clear() + + def test_command_used(self, m_subp): + """Test we use the correct command when we call subp""" + net.get_ovs_internal_interfaces() + + assert [ + mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD) + ] == m_subp.call_args_list + + def test_subp_contents_split_and_returned(self, m_subp): + """Test that the command output is appropriately mangled.""" + stdout = "iface1\niface2\niface3\n" + m_subp.return_value = (stdout, "") + + assert [ + "iface1", + "iface2", + "iface3", + ] == net.get_ovs_internal_interfaces() + + def test_database_connection_error_handled_gracefully(self, m_subp): + """Test that the error indicating OVS is down is handled gracefully.""" + m_subp.side_effect = ProcessExecutionError( + stderr="database connection failed" + ) + + assert [] == net.get_ovs_internal_interfaces() + + def test_other_errors_raised(self, m_subp): + """Test that only database connection errors are handled.""" + m_subp.side_effect = ProcessExecutionError() + + with pytest.raises(ProcessExecutionError): + net.get_ovs_internal_interfaces() + + def test_only_runs_once(self, m_subp): + """Test that we cache the value.""" + net.get_ovs_internal_interfaces() + net.get_ovs_internal_interfaces() + + assert 1 == m_subp.call_count + + +@mock.patch("cloudinit.net.get_ovs_internal_interfaces") +@mock.patch("cloudinit.net.openvswitch_is_installed") +class TestIsOpenVSwitchInternalInterface: + def test_false_if_ovs_not_installed( + self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces + ): + """Test that OVS' absence returns False.""" + m_openvswitch_is_installed.return_value = False + + assert not net.is_openvswitch_internal_interface("devname") + + @pytest.mark.parametrize( + "detected_interfaces,devname,expected_return", + [ + ([], "devname", False), + (["notdevname"], "devname", False), + (["devname"], "devname", True), + (["some", "other", "devices", "and", "ours"], "ours", True), + ], + ) + def test_return_value_based_on_detected_interfaces( + self, + m_openvswitch_is_installed, + m_get_ovs_internal_interfaces, + detected_interfaces, + devname, + expected_return, + ): + """Test that the detected interfaces are used correctly.""" + m_openvswitch_is_installed.return_value = True + m_get_ovs_internal_interfaces.return_value = detected_interfaces + assert expected_return == net.is_openvswitch_internal_interface( + devname + ) + + +class TestIsIpAddress: + """Tests for net.is_ip_address. + + Instead of testing with values we rely on the ipaddress stdlib module to + handle all values correctly, so simply test that is_ip_address defers to + the ipaddress module correctly. + """ + + @pytest.mark.parametrize('ip_address_side_effect,expected_return', ( + (ValueError, False), + (lambda _: ipaddress.IPv4Address('192.168.0.1'), True), + (lambda _: ipaddress.IPv6Address('2001:db8::'), True), + )) + def test_is_ip_address(self, ip_address_side_effect, expected_return): + with mock.patch('cloudinit.net.ipaddress.ip_address', + side_effect=ip_address_side_effect) as m_ip_address: + ret = net.is_ip_address(mock.sentinel.ip_address_in) + assert expected_return == ret + expected_call = mock.call(mock.sentinel.ip_address_in) + assert [expected_call] == m_ip_address.call_args_list + + +class TestIsIpv4Address: + """Tests for net.is_ipv4_address. + + Instead of testing with values we rely on the ipaddress stdlib module to + handle all values correctly, so simply test that is_ipv4_address defers to + the ipaddress module correctly. + """ + + @pytest.mark.parametrize('ipv4address_mock,expected_return', ( + (mock.Mock(side_effect=ValueError), False), + (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True), + )) + def test_is_ip_address(self, ipv4address_mock, expected_return): + with mock.patch('cloudinit.net.ipaddress.IPv4Address', + ipv4address_mock) as m_ipv4address: + ret = net.is_ipv4_address(mock.sentinel.ip_address_in) + assert expected_return == ret + expected_call = mock.call(mock.sentinel.ip_address_in) + assert [expected_call] == m_ipv4address.call_args_list + + +# vi: ts=4 expandtab diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py new file mode 100644 index 00000000..fdcd5296 --- /dev/null +++ b/tests/unittests/net/test_network_state.py @@ -0,0 +1,164 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from unittest import mock + +import pytest + +from cloudinit import safeyaml +from cloudinit.net import network_state +from tests.unittests.helpers import CiTestCase + +netstate_path = 'cloudinit.net.network_state' + + +_V1_CONFIG_NAMESERVERS = """\ +network: + version: 1 + config: + - type: nameserver + interface: {iface} + address: + - 192.168.1.1 + - 8.8.8.8 + search: + - spam.local + - type: nameserver + address: + - 192.168.1.0 + - 4.4.4.4 + search: + - eggs.local + - type: physical + name: eth0 + mac_address: '00:11:22:33:44:55' + - type: physical + name: eth1 + mac_address: '66:77:88:99:00:11' +""" + +V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface='eth1') +V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface='eth90') + +V2_CONFIG_NAMESERVERS = """\ +network: + version: 2 + ethernets: + eth0: + match: + macaddress: '00:11:22:33:44:55' + nameservers: + search: [spam.local, eggs.local] + addresses: [8.8.8.8] + eth1: + match: + macaddress: '66:77:88:99:00:11' + set-name: "ens92" + nameservers: + search: [foo.local, bar.local] + addresses: [4.4.4.4] +""" + + +class TestNetworkStateParseConfig(CiTestCase): + + def setUp(self): + super(TestNetworkStateParseConfig, self).setUp() + nsi_path = netstate_path + '.NetworkStateInterpreter' + self.add_patch(nsi_path, 'm_nsi') + + def test_missing_version_returns_none(self): + ncfg = {} + with self.assertRaises(RuntimeError): + network_state.parse_net_config_data(ncfg) + + def test_unknown_versions_returns_none(self): + ncfg = {'version': 13.2} + with self.assertRaises(RuntimeError): + network_state.parse_net_config_data(ncfg) + + def test_version_2_passes_self_as_config(self): + ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} + network_state.parse_net_config_data(ncfg) + self.assertEqual([mock.call(version=2, config=ncfg)], + self.m_nsi.call_args_list) + + def test_valid_config_gets_network_state(self): + ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} + result = network_state.parse_net_config_data(ncfg) + self.assertNotEqual(None, result) + + def test_empty_v1_config_gets_network_state(self): + ncfg = {'version': 1, 'config': []} + result = network_state.parse_net_config_data(ncfg) + self.assertNotEqual(None, result) + + def test_empty_v2_config_gets_network_state(self): + ncfg = {'version': 2} + result = network_state.parse_net_config_data(ncfg) + self.assertNotEqual(None, result) + + +class TestNetworkStateParseConfigV2(CiTestCase): + + def test_version_2_ignores_renderer_key(self): + ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}} + nsi = network_state.NetworkStateInterpreter(version=ncfg['version'], + config=ncfg) + nsi.parse_config(skip_broken=False) + self.assertEqual(ncfg, nsi.as_dict()['config']) + + +class TestNetworkStateParseNameservers: + def _parse_network_state_from_config(self, config): + yaml = safeyaml.load(config) + return network_state.parse_net_config_data(yaml['network']) + + def test_v1_nameservers_valid(self): + config = self._parse_network_state_from_config( + V1_CONFIG_NAMESERVERS_VALID) + + # If an interface was specified, DNS shouldn't be in the global list + assert ['192.168.1.0', '4.4.4.4'] == sorted( + config.dns_nameservers) + assert ['eggs.local'] == config.dns_searchdomains + + # If an interface was specified, DNS should be part of the interface + for iface in config.iter_interfaces(): + if iface['name'] == 'eth1': + assert iface['dns']['addresses'] == ['192.168.1.1', '8.8.8.8'] + assert iface['dns']['search'] == ['spam.local'] + else: + assert 'dns' not in iface + + def test_v1_nameservers_invalid(self): + with pytest.raises(ValueError): + self._parse_network_state_from_config( + V1_CONFIG_NAMESERVERS_INVALID) + + def test_v2_nameservers(self): + config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS) + + # Ensure DNS defined on interface exists on interface + for iface in config.iter_interfaces(): + if iface['name'] == 'eth0': + assert iface['dns'] == { + 'nameservers': ['8.8.8.8'], + 'search': ['spam.local', 'eggs.local'], + } + else: + assert iface['dns'] == { + 'nameservers': ['4.4.4.4'], + 'search': ['foo.local', 'bar.local'] + } + + # Ensure DNS defined on interface also exists globally (since there + # is no global DNS definitions in v2) + assert ['4.4.4.4', '8.8.8.8'] == sorted(config.dns_nameservers) + assert [ + 'bar.local', + 'eggs.local', + 'foo.local', + 'spam.local', + ] == sorted(config.dns_searchdomains) + +# vi: ts=4 expandtab diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py new file mode 100644 index 00000000..8dc90b48 --- /dev/null +++ b/tests/unittests/net/test_networkd.py @@ -0,0 +1,64 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import safeyaml +from cloudinit.net import networkd, network_state + +V2_CONFIG_SET_NAME = """\ +network: + version: 2 + ethernets: + eth0: + match: + macaddress: '00:11:22:33:44:55' + nameservers: + search: [spam.local, eggs.local] + addresses: [8.8.8.8] + eth1: + match: + macaddress: '66:77:88:99:00:11' + set-name: "ens92" + nameservers: + search: [foo.local, bar.local] + addresses: [4.4.4.4] +""" + +V2_CONFIG_SET_NAME_RENDERED_ETH0 = """[Match] +MACAddress=00:11:22:33:44:55 +Name=eth0 + +[Network] +DHCP=no +DNS=8.8.8.8 +Domains=spam.local eggs.local + +""" + +V2_CONFIG_SET_NAME_RENDERED_ETH1 = """[Match] +MACAddress=66:77:88:99:00:11 +Name=ens92 + +[Network] +DHCP=no +DNS=4.4.4.4 +Domains=foo.local bar.local + +""" + + +class TestNetworkdRenderState: + def _parse_network_state_from_config(self, config): + yaml = safeyaml.load(config) + return network_state.parse_net_config_data(yaml["network"]) + + def test_networkd_render_with_set_name(self): + ns = self._parse_network_state_from_config(V2_CONFIG_SET_NAME) + renderer = networkd.Renderer() + rendered_content = renderer._render_content(ns) + + assert "eth0" in rendered_content + assert rendered_content["eth0"] == V2_CONFIG_SET_NAME_RENDERED_ETH0 + assert "ens92" in rendered_content + assert rendered_content["ens92"] == V2_CONFIG_SET_NAME_RENDERED_ETH1 + + +# vi: ts=4 expandtab diff --git a/tests/unittests/runs/__init__.py b/tests/unittests/runs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py new file mode 100644 index 00000000..29439c8a --- /dev/null +++ b/tests/unittests/runs/test_merge_run.py @@ -0,0 +1,60 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import shutil +import tempfile + +from tests.unittests import helpers + +from cloudinit.settings import PER_INSTANCE +from cloudinit import safeyaml +from cloudinit import stages +from cloudinit import util + + +class TestMergeRun(helpers.FilesystemMockingTestCase): + def _patchIn(self, root): + self.patchOS(root) + self.patchUtils(root) + + def test_none_ds(self): + new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, new_root) + self.replicateTestRoot('simple_ubuntu', new_root) + cfg = { + 'datasource_list': ['None'], + 'cloud_init_modules': ['write-files'], + 'system_info': {'paths': {'run_dir': new_root}} + } + ud = helpers.readResource('user_data.1.txt') + cloud_cfg = safeyaml.dumps(cfg) + util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) + util.write_file(os.path.join(new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + self._patchIn(new_root) + + # Now start verifying whats created + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.datasource.userdata_raw = ud + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mirrors = initer.distro.get_option('package_mirrors') + self.assertEqual(1, len(mirrors)) + mirror = mirrors[0] + self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah']) + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertTrue(os.path.exists('/etc/blah.ini')) + self.assertIn('write-files', which_ran) + contents = util.load_file('/etc/blah.ini') + self.assertEqual(contents, 'blah') + +# vi: ts=4 expandtab diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py new file mode 100644 index 00000000..aa78dda3 --- /dev/null +++ b/tests/unittests/runs/test_simple_run.py @@ -0,0 +1,182 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os + + +from cloudinit.settings import PER_INSTANCE +from cloudinit import safeyaml +from cloudinit import stages +from tests.unittests import helpers +from cloudinit import util + + +class TestSimpleRun(helpers.FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestSimpleRun, self).setUp() + self.new_root = self.tmp_dir() + self.replicateTestRoot('simple_ubuntu', self.new_root) + + # Seed cloud.cfg file for our tests + self.cfg = { + 'datasource_list': ['None'], + 'runcmd': ['ls /etc'], # test ALL_DISTROS + 'spacewalk': {}, # test non-ubuntu distros module definition + 'system_info': {'paths': {'run_dir': self.new_root}}, + 'write_files': [ + { + 'path': '/etc/blah.ini', + 'content': 'blah', + 'permissions': 0o755, + }, + ], + 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], + } + cloud_cfg = safeyaml.dumps(self.cfg) + util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + util.write_file(os.path.join(self.new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + self.patchOS(self.new_root) + self.patchUtils(self.new_root) + + def test_none_ds_populates_var_lib_cloud(self): + """Init and run_section default behavior creates appropriate dirs.""" + # Now start verifying whats created + initer = stages.Init() + initer.read_cfg() + initer.initialize() + self.assertTrue(os.path.exists("/var/lib/cloud")) + for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']: + self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) + + initer.fetch() + iid = initer.instancify() + self.assertEqual(iid, 'iid-datasource-none') + initer.update() + self.assertTrue(os.path.islink("var/lib/cloud/instance")) + + def test_none_ds_runs_modules_which_do_not_define_distros(self): + """Any modules which do not define a distros attribute are run.""" + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', initer.consume_data, + args=[PER_INSTANCE], freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertTrue(os.path.exists('/etc/blah.ini')) + self.assertIn('write-files', which_ran) + contents = util.load_file('/etc/blah.ini') + self.assertEqual(contents, 'blah') + self.assertNotIn( + "Skipping modules ['write-files'] because they are not verified on" + " distro 'ubuntu'", + self.logs.getvalue()) + + def test_none_ds_skips_modules_which_define_unmatched_distros(self): + """Skip modules which define distros which don't match the current.""" + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', initer.consume_data, + args=[PER_INSTANCE], freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertIn( + "Skipping modules 'spacewalk' because they are not verified on" + " distro 'ubuntu'", + self.logs.getvalue()) + self.assertNotIn('spacewalk', which_ran) + + def test_none_ds_runs_modules_which_distros_all(self): + """Skip modules which define distros attribute as supporting 'all'. + + This is done in the module with the declaration: + distros = [ALL_DISTROS]. runcmd is an example. + """ + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', initer.consume_data, + args=[PER_INSTANCE], freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertIn('runcmd', which_ran) + self.assertNotIn( + "Skipping modules 'runcmd' because they are not verified on" + " distro 'ubuntu'", + self.logs.getvalue()) + + def test_none_ds_forces_run_via_unverified_modules(self): + """run_section forced skipped modules by using unverified_modules.""" + + # re-write cloud.cfg with unverified_modules override + cfg = copy.deepcopy(self.cfg) + cfg['unverified_modules'] = ['spacewalk'] # Would have skipped + cloud_cfg = safeyaml.dumps(cfg) + util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + util.write_file(os.path.join(self.new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', initer.consume_data, + args=[PER_INSTANCE], freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertIn('spacewalk', which_ran) + self.assertIn( + "running unverified_modules: 'spacewalk'", + self.logs.getvalue()) + + def test_none_ds_run_with_no_config_modules(self): + """run_section will report no modules run when none are configured.""" + + # re-write cloud.cfg with unverified_modules override + cfg = copy.deepcopy(self.cfg) + # Represent empty configuration in /etc/cloud/cloud.cfg + cfg['cloud_init_modules'] = None + cloud_cfg = safeyaml.dumps(cfg) + util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + util.write_file(os.path.join(self.new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', initer.consume_data, + args=[PER_INSTANCE], freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertEqual([], which_ran) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/__init__.py b/tests/unittests/sources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/sources/helpers/test_netlink.py b/tests/unittests/sources/helpers/test_netlink.py new file mode 100644 index 00000000..478ce375 --- /dev/null +++ b/tests/unittests/sources/helpers/test_netlink.py @@ -0,0 +1,480 @@ +# Author: Tamilmani Manoharan +# +# This file is part of cloud-init. See LICENSE file for license information. + +from tests.unittests.helpers import CiTestCase, mock +import socket +import struct +import codecs +from cloudinit.sources.helpers.netlink import ( + NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket, + read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect, + wait_for_nic_attach_event, wait_for_nic_detach_event, + OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT, + OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK, + RTM_SETLINK, RTM_GETLINK, MAX_SIZE) + + +def int_to_bytes(i): + '''convert integer to binary: eg: 1 to \x01''' + hex_value = '{0:x}'.format(i) + hex_value = '0' * (len(hex_value) % 2) + hex_value + return codecs.decode(hex_value, 'hex_codec') + + +class TestCreateBoundNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + def test_socket_error_on_create(self, m_socket): + '''create_bound_netlink_socket catches socket creation exception''' + + """NetlinkCreateSocketError is raised when socket creation errors.""" + m_socket.side_effect = socket.error("Fake socket failure") + with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: + create_bound_netlink_socket() + self.assertEqual( + 'Exception during netlink socket create: Fake socket failure', + str(ctx_mgr.exception)) + + +class TestReadNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_read_netlink_socket(self, m_select, m_socket): + '''read_netlink_socket able to receive data''' + data = 'netlinktest' + m_select.return_value = [m_socket], None, None + m_socket.recv.return_value = data + recv_data = read_netlink_socket(m_socket, 2) + m_select.assert_called_with([m_socket], [], [], 2) + m_socket.recv.assert_called_with(MAX_SIZE) + self.assertIsNotNone(recv_data) + self.assertEqual(recv_data, data) + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_netlink_read_timeout(self, m_select, m_socket): + '''read_netlink_socket should timeout if nothing to read''' + m_select.return_value = [], None, None + data = read_netlink_socket(m_socket, 1) + m_select.assert_called_with([m_socket], [], [], 1) + self.assertEqual(m_socket.recv.call_count, 0) + self.assertIsNone(data) + + def test_read_invalid_socket(self): + '''read_netlink_socket raises assert error if socket is invalid''' + socket = None + with self.assertRaises(AssertionError) as context: + read_netlink_socket(socket, 1) + self.assertTrue('netlink socket is none' in str(context.exception)) + + +class TestParseNetlinkMessage(CiTestCase): + + def test_read_rta_oper_state(self): + '''read_rta_oper_state could parse netlink message and extract data''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + buf = bytearray(48) + struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5, + 16, int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertEqual(interface_state.ifname, ifname) + self.assertEqual(interface_state.operstate, OPER_DOWN) + + def test_read_none_data(self): + '''read_rta_oper_state raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(data) + self.assertEqual('data is none', str(context.exception)) + + def test_read_invalid_rta_operstate_none(self): + '''read_rta_oper_state returns none if operstate is none''' + ifname = "eth0" + buf = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_rta_ifname_none(self): + '''read_rta_oper_state returns none if ifname is none''' + buf = bytearray(40) + struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_data_len(self): + '''raise assert error if data size is smaller than required size''' + buf = bytearray(32) + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(buf) + self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in + str(context.exception)) + + def test_unpack_rta_attr_none_data(self): + '''unpack_rta_attr raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, RTATTR_START_OFFSET) + self.assertTrue('data is none' in str(context.exception)) + + def test_unpack_rta_attr_invalid_offset(self): + '''unpack_rta_attr raises assert error if offset is invalid''' + data = bytearray(48) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, "offset") + self.assertTrue('offset is not integer' in str(context.exception)) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, 31) + self.assertTrue('rta offset is less than expected length' in + str(context.exception)) + + +@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') +@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +class TestNicAttachDetach(CiTestCase): + with_logs = True + + def _media_switch_data(self, ifname, msg_type, operstate): + '''construct netlink data with specified fields''' + if ifname and operstate is not None: + data = bytearray(48) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(operstate)) + elif ifname: + data = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) + elif operstate: + data = bytearray(40) + struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(operstate)) + struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) + return data + + def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket): + '''Test for a new nic attached''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_op_down] + ifread = wait_for_nic_attach_event(m_socket, []) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual(ifname, ifread) + + def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket): + '''Test for a new nic attached''' + ifname = "eth0" + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_up] + ifread = wait_for_nic_attach_event(m_socket, []) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual(ifname, ifread) + + def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket): + '''Test that we read only the interfaces we are interested in.''' + data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) + data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_eth0, data_eth1] + ifread = wait_for_nic_attach_event(m_socket, ["eth0"]) + self.assertEqual(m_read_netlink_socket.call_count, 2) + self.assertEqual("eth1", ifread) + + def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket): + '''Test that we read only the interfaces we are interested in.''' + data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN) + data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_eth0, data_eth1] + ifread = wait_for_nic_attach_event(m_socket, ["eth1"]) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual("eth0", ifread) + + def test_nic_detached(self, m_read_netlink_socket, m_socket): + '''Test for an existing nic detached''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN) + m_read_netlink_socket.side_effect = [data_op_down] + ifread = wait_for_nic_detach_event(m_socket) + self.assertEqual(m_read_netlink_socket.call_count, 1) + self.assertEqual(ifname, ifread) + + +@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') +@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +class TestWaitForMediaDisconnectConnect(CiTestCase): + with_logs = True + + def _media_switch_data(self, ifname, msg_type, operstate): + '''construct netlink data with specified fields''' + if ifname and operstate is not None: + data = bytearray(48) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(operstate)) + elif ifname: + data = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) + elif operstate: + data = bytearray(40) + struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(operstate)) + struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) + return data + + def test_media_down_up_scenario(self, m_read_netlink_socket, + m_socket): + '''Test for media down up sequence for required interface name''' + ifname = "eth0" + # construct data for Oper State down + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + # construct data for Oper State up + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) + + def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect ignores unexpected interfaces. + + The first two messages are for other interfaces and last two are for + expected interface. So the function exit only after receiving last + 2 messages and therefore the call count for m_read_netlink_socket + has to be 4 + ''' + other_ifname = "eth1" + expected_ifname = "eth0" + data_op_down_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_DOWN + ) + data_op_up_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_UP + ) + data_op_down_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_DOWN + ) + data_op_up_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [ + data_op_down_eth1, + data_op_up_eth1, + data_op_down_eth0, + data_op_up_eth0 + ] + wait_for_media_disconnect_connect(m_socket, expected_ifname) + self.assertIn('Ignored netlink event on interface %s' % other_ifname, + self.logs.getvalue()) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores GETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which netlink module will ignore. The last 2 messages are RTM_NEWLINK + with oper state down and up messages. Therefore the call count for + m_read_netlink_socket has to be 4 ignoring first 2 messages + of RTM_GETLINK + ''' + ifname = "eth0" + data_getlink_down = self._media_switch_data( + ifname, RTM_GETLINK, OPER_DOWN + ) + data_getlink_up = self._media_switch_data( + ifname, RTM_GETLINK, OPER_UP + ) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN + ) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP + ) + m_read_netlink_socket.side_effect = [ + data_getlink_down, + data_getlink_up, + data_newlink_down, + data_newlink_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores SETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down + and up messages. This function should exit after 4th messages since it + sees down->up scenario. So the call count for m_read_netlink_socket + has to be 4 ignoring first 2 messages of RTM_GETLINK and + last 2 messages of RTM_NEWLINK + ''' + ifname = "eth0" + data_setlink_down = self._media_switch_data( + ifname, RTM_SETLINK, OPER_DOWN + ) + data_setlink_up = self._media_switch_data( + ifname, RTM_SETLINK, OPER_UP + ) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN + ) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP + ) + m_read_netlink_socket.side_effect = [ + data_setlink_down, + data_setlink_up, + data_newlink_down, + data_newlink_up, + data_newlink_down, + data_newlink_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket, + m_socket): + '''returns only if it receives UP event after a DOWN event''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DORMANT + ) + data_op_notpresent = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_NOTPRESENT + ) + data_op_lowerdown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN + ) + data_op_testing = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_TESTING + ) + data_op_unknown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UNKNOWN + ) + m_read_netlink_socket.side_effect = [ + data_op_up, data_op_up, + data_op_dormant, data_op_up, + data_op_notpresent, data_op_up, + data_op_lowerdown, data_op_up, + data_op_testing, data_op_up, + data_op_unknown, data_op_up, + data_op_down, data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 14) + + def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect handles in between transitions''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DORMANT) + data_op_unknown = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UNKNOWN) + m_read_netlink_socket.side_effect = [ + data_op_down, data_op_dormant, + data_op_unknown, data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect should handle invalid operstates. + + The function should not fail and return even if it receives invalid + operstates. It always should wait for down up sequence. + ''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) + m_read_netlink_socket.side_effect = [ + data_op_invalid, data_op_up, + data_op_down, data_op_invalid, + data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 5) + + def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none netlink socket.''' + socket = None + ifname = "eth0" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(socket, ifname) + self.assertTrue('netlink socket is none' in str(context.exception)) + + def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none interface name''' + ifname = None + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name is none' in str(context.exception)) + ifname = "" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name cannot be empty' in + str(context.exception)) + + def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket): + ''' wait_for_media_disconnect_connect handles invalid rta data''' + ifname = "eth0" + data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN) + data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [ + data_invalid1, data_invalid2, data_op_down, data_op_up + ] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read multiple messages in single receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data = bytearray(96) + struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) + struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, + 3, bytes, 5, 16, int_to_bytes(OPER_UP) + ) + m_read_netlink_socket.return_value = data + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 1) + + def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read partial messages in receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data1 = bytearray(112) + data2 = bytearray(32) + struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) + struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN) + ) + struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into( + "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP) + ) + m_read_netlink_socket.side_effect = [data1, data2] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) diff --git a/tests/unittests/sources/helpers/test_openstack.py b/tests/unittests/sources/helpers/test_openstack.py new file mode 100644 index 00000000..74743e7c --- /dev/null +++ b/tests/unittests/sources/helpers/test_openstack.py @@ -0,0 +1,49 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# ./cloudinit/sources/helpers/tests/test_openstack.py +from unittest import mock + +from cloudinit.sources.helpers import openstack +from tests.unittests import helpers as test_helpers + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestConvertNetJson(test_helpers.CiTestCase): + + def test_phy_types(self): + """Verify the different known physical types are handled.""" + # network_data.json example from + # https://docs.openstack.org/nova/latest/user/metadata.html + mac0 = "fa:16:3e:9c:bf:3d" + net_json = { + "links": [ + {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a", + "mtu": None, "type": "bridge", + "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"} + ], + "networks": [ + {"id": "network0", "link": "tapcd9f6d46-4a", + "network_id": "99e88329-f20d-4741-9593-25bf07847b16", + "type": "ipv4_dhcp"} + ], + "services": [{"address": "8.8.8.8", "type": "dns"}] + } + macs = {mac0: 'eth0'} + + expected = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:9c:bf:3d', + 'mtu': None, 'name': 'eth0', + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical'}, + {'address': '8.8.8.8', 'type': 'nameserver'}]} + + for t in openstack.KNOWN_PHYSICAL_TYPES: + net_json["links"][0]["type"] = t + self.assertEqual( + expected, + openstack.convert_net_json(network_json=net_json, + known_macs=macs)) diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py new file mode 100644 index 00000000..00209913 --- /dev/null +++ b/tests/unittests/sources/test_aliyun.py @@ -0,0 +1,248 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import functools +import httpretty +import os +from unittest import mock + +from cloudinit import helpers +from cloudinit.sources import DataSourceAliYun as ay +from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config +from tests.unittests import helpers as test_helpers + +DEFAULT_METADATA = { + 'instance-id': 'aliyun-test-vm-00', + 'eipv4': '10.0.0.1', + 'hostname': 'test-hostname', + 'image-id': 'm-test', + 'launch-index': '0', + 'mac': '00:16:3e:00:00:00', + 'network-type': 'vpc', + 'private-ipv4': '192.168.0.1', + 'serial-number': 'test-string', + 'vpc-cidr-block': '192.168.0.0/16', + 'vpc-id': 'test-vpc', + 'vswitch-id': 'test-vpc', + 'vswitch-cidr-block': '192.168.0.0/16', + 'zone-id': 'test-zone-1', + 'ntp-conf': {'ntp_servers': [ + 'ntp1.aliyun.com', + 'ntp2.aliyun.com', + 'ntp3.aliyun.com']}, + 'source-address': ['http://mirrors.aliyun.com', + 'http://mirrors.aliyuncs.com'], + 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'}, + 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}} +} + +DEFAULT_USERDATA = """\ +#cloud-config + +hostname: localhost""" + + +def register_mock_metaserver(base_url, data): + def register_helper(register, base_url, body): + if isinstance(body, str): + register(base_url, body) + elif isinstance(body, list): + register(base_url.rstrip('/'), '\n'.join(body) + '\n') + elif isinstance(body, dict): + if not body: + register(base_url.rstrip('/') + '/', 'not found', + status_code=404) + vals = [] + for k, v in body.items(): + if isinstance(v, (str, list)): + suffix = k.rstrip('/') + else: + suffix = k.rstrip('/') + '/' + vals.append(suffix) + url = base_url.rstrip('/') + '/' + suffix + register_helper(register, url, v) + register(base_url, '\n'.join(vals) + '\n') + + register = functools.partial(httpretty.register_uri, httpretty.GET) + register_helper(register, base_url, data) + + +class TestAliYunDatasource(test_helpers.HttprettyTestCase): + def setUp(self): + super(TestAliYunDatasource, self).setUp() + cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} + distro = {} + paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.ds = ay.DataSourceAliYun(cfg, distro, paths) + self.metadata_address = self.ds.metadata_urls[0] + + @property + def default_metadata(self): + return DEFAULT_METADATA + + @property + def default_userdata(self): + return DEFAULT_USERDATA + + @property + def metadata_url(self): + return os.path.join( + self.metadata_address, + self.ds.min_metadata_version, 'meta-data') + '/' + + @property + def userdata_url(self): + return os.path.join( + self.metadata_address, + self.ds.min_metadata_version, 'user-data') + + # EC2 provides an instance-identity document which must return 404 here + # for this test to pass. + @property + def default_identity(self): + return {} + + @property + def identity_url(self): + return os.path.join(self.metadata_address, + self.ds.min_metadata_version, + 'dynamic', 'instance-identity') + + def regist_default_server(self): + register_mock_metaserver(self.metadata_url, self.default_metadata) + register_mock_metaserver(self.userdata_url, self.default_userdata) + register_mock_metaserver(self.identity_url, self.default_identity) + + def _test_get_data(self): + self.assertEqual(self.ds.metadata, self.default_metadata) + self.assertEqual(self.ds.userdata_raw, + self.default_userdata.encode('utf8')) + + def _test_get_sshkey(self): + pub_keys = [v['openssh-key'] for (_, v) in + self.default_metadata['public-keys'].items()] + self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys) + + def _test_get_iid(self): + self.assertEqual(self.default_metadata['instance-id'], + self.ds.get_instance_id()) + + def _test_host_name(self): + self.assertEqual(self.default_metadata['hostname'], + self.ds.get_hostname()) + + @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") + def test_with_mock_server(self, m_is_aliyun): + m_is_aliyun.return_value = True + self.regist_default_server() + ret = self.ds.get_data() + self.assertEqual(True, ret) + self.assertEqual(1, m_is_aliyun.call_count) + self._test_get_data() + self._test_get_sshkey() + self._test_get_iid() + self._test_host_name() + self.assertEqual('aliyun', self.ds.cloud_name) + self.assertEqual('ec2', self.ds.platform) + self.assertEqual( + 'metadata (http://100.100.100.200)', self.ds.subplatform) + + @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") + def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): + """If is_aliyun returns false, then get_data should return False.""" + m_is_aliyun.return_value = False + self.regist_default_server() + ret = self.ds.get_data() + self.assertEqual(1, m_is_aliyun.call_count) + self.assertEqual(False, ret) + + def test_parse_public_keys(self): + public_keys = {} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': 'ssh-key-0'} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']]) + + public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'} + self.assertEqual(set(ay.parse_public_keys(public_keys)), + set([public_keys['key-pair-0'], + public_keys['key-pair-1']])) + + public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']) + + public_keys = {'key-pair-0': {'openssh-key': []}} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']['openssh-key']]) + + public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0', + 'ssh-key-1']}} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']['openssh-key']) + + def test_route_metric_calculated_without_device_number(self): + """Test that route-metric code works without `device-number` + + `device-number` is part of EC2 metadata, but not supported on aliyun. + Attempting to access it will raise a KeyError. + + LP: #1917875 + """ + netcfg = convert_ec2_metadata_network_config( + {"interfaces": {"macs": { + "06:17:04:d7:26:09": { + "interface-id": "eni-e44ef49e", + }, + "06:17:04:d7:26:08": { + "interface-id": "eni-e44ef49f", + } + }}}, + macs_to_nics={ + '06:17:04:d7:26:09': 'eth0', + '06:17:04:d7:26:08': 'eth1', + } + ) + + met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] + met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] + + # route-metric numbers should be 100 apart + assert 100 == abs(met0 - met1) + + +class TestIsAliYun(test_helpers.CiTestCase): + ALIYUN_PRODUCT = 'Alibaba Cloud ECS' + read_dmi_data_expected = [mock.call('system-product-name')] + + @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") + def test_true_on_aliyun_product(self, m_read_dmi_data): + """Should return true if the dmi product data has expected value.""" + m_read_dmi_data.return_value = self.ALIYUN_PRODUCT + ret = ay._is_aliyun() + self.assertEqual(self.read_dmi_data_expected, + m_read_dmi_data.call_args_list) + self.assertEqual(True, ret) + + @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") + def test_false_on_empty_string(self, m_read_dmi_data): + """Should return false on empty value returned.""" + m_read_dmi_data.return_value = "" + ret = ay._is_aliyun() + self.assertEqual(self.read_dmi_data_expected, + m_read_dmi_data.call_args_list) + self.assertEqual(False, ret) + + @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") + def test_false_on_unknown_string(self, m_read_dmi_data): + """Should return false on an unrelated string.""" + m_read_dmi_data.return_value = "cubs win" + ret = ay._is_aliyun() + self.assertEqual(self.read_dmi_data_expected, + m_read_dmi_data.call_args_list) + self.assertEqual(False, ret) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_altcloud.py b/tests/unittests/sources/test_altcloud.py new file mode 100644 index 00000000..7384c104 --- /dev/null +++ b/tests/unittests/sources/test_altcloud.py @@ -0,0 +1,450 @@ +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joe VLcek +# +# This file is part of cloud-init. See LICENSE file for license information. + +''' +This test file exercises the code in sources DataSourceAltCloud.py +''' + +import os +import shutil +import tempfile + +from cloudinit import dmi +from cloudinit import helpers +from cloudinit import subp +from cloudinit import util + +from tests.unittests.helpers import CiTestCase, mock + +import cloudinit.sources.DataSourceAltCloud as dsac + +OS_UNAME_ORIG = getattr(os, 'uname') + + +def _write_user_data_files(mount_dir, value): + ''' + Populate the deltacloud_user_data_file the user_data_file + which would be populated with user data. + ''' + deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' + user_data_file = mount_dir + '/user-data.txt' + + udfile = open(deltacloud_user_data_file, 'w') + udfile.write(value) + udfile.close() + os.chmod(deltacloud_user_data_file, 0o664) + + udfile = open(user_data_file, 'w') + udfile.write(value) + udfile.close() + os.chmod(user_data_file, 0o664) + + +def _remove_user_data_files(mount_dir, + dc_file=True, + non_dc_file=True): + ''' + Remove the test files: deltacloud_user_data_file and + user_data_file + ''' + deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' + user_data_file = mount_dir + '/user-data.txt' + + # Ignore any failures removeing files that are already gone. + if dc_file: + try: + os.remove(deltacloud_user_data_file) + except OSError: + pass + + if non_dc_file: + try: + os.remove(user_data_file) + except OSError: + pass + + +def _dmi_data(expected): + ''' + Spoof the data received over DMI + ''' + def _data(key): + return expected + + return _data + + +class TestGetCloudType(CiTestCase): + '''Test to exercise method: DataSourceAltCloud.get_cloud_type()''' + + with_logs = True + + def setUp(self): + '''Set up.''' + super(TestGetCloudType, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.dmi_data = dmi.read_dmi_data + # We have a different code path for arm to deal with LP1243287 + # We have to switch arch to x86_64 to avoid test failure + force_arch('x86_64') + + def tearDown(self): + # Reset + dmi.read_dmi_data = self.dmi_data + force_arch() + + def test_cloud_info_file_ioerror(self): + """Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors.""" + self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE) + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + # Attempting to read the directory generates IOError + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp): + self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) + self.assertIn( + "[Errno 21] Is a directory: '%s'" % self.tmp, + self.logs.getvalue()) + + def test_cloud_info_file(self): + """Return uppercase stripped content from /etc/sysconfig/cloud-info.""" + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + cloud_info = self.tmp_path('cloud-info', dir=self.tmp) + util.write_file(cloud_info, ' OverRiDdeN CloudType ') + # Attempting to read the directory generates IOError + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info): + self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type()) + + def test_rhev(self): + ''' + Test method get_cloud_type() for RHEVm systems. + Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor + ''' + dmi.read_dmi_data = _dmi_data('RHEV') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual('RHEV', dsrc.get_cloud_type()) + + def test_vsphere(self): + ''' + Test method get_cloud_type() for vSphere systems. + Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor + ''' + dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual('VSPHERE', dsrc.get_cloud_type()) + + def test_unknown(self): + ''' + Test method get_cloud_type() for unknown systems. + Forcing read_dmi_data return to match an unrecognized return. + ''' + dmi.read_dmi_data = _dmi_data('Unrecognized Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) + + +class TestGetDataCloudInfoFile(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.get_data() + With a contrived CLOUD_INFO_FILE + ''' + def setUp(self): + '''Set up.''' + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp) + + def test_rhev(self): + '''Success Test module get_data() forcing RHEV.''' + + util.write_file(self.cloud_info_file, 'RHEV') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_rhevm = lambda: True + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(True, dsrc.get_data()) + self.assertEqual('altcloud', dsrc.cloud_name) + self.assertEqual('altcloud', dsrc.platform_type) + self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform) + + def test_vsphere(self): + '''Success Test module get_data() forcing VSPHERE.''' + + util.write_file(self.cloud_info_file, 'VSPHERE') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_vsphere = lambda: True + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(True, dsrc.get_data()) + self.assertEqual('altcloud', dsrc.cloud_name) + self.assertEqual('altcloud', dsrc.platform_type) + self.assertEqual('vsphere (unknown)', dsrc.subplatform) + + def test_fail_rhev(self): + '''Failure Test module get_data() forcing RHEV.''' + + util.write_file(self.cloud_info_file, 'RHEV') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_rhevm = lambda: False + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) + + def test_fail_vsphere(self): + '''Failure Test module get_data() forcing VSPHERE.''' + + util.write_file(self.cloud_info_file, 'VSPHERE') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_vsphere = lambda: False + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) + + def test_unrecognized(self): + '''Failure Test module get_data() forcing unrecognized.''' + + util.write_file(self.cloud_info_file, 'unrecognized') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): + self.assertEqual(False, dsrc.get_data()) + + +class TestGetDataNoCloudInfoFile(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.get_data() + Without a CLOUD_INFO_FILE + ''' + def setUp(self): + '''Set up.''' + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + self.dmi_data = dmi.read_dmi_data + dsac.CLOUD_INFO_FILE = \ + 'no such file' + # We have a different code path for arm to deal with LP1243287 + # We have to switch arch to x86_64 to avoid test failure + force_arch('x86_64') + + def tearDown(self): + # Reset + dsac.CLOUD_INFO_FILE = \ + '/etc/sysconfig/cloud-info' + dmi.read_dmi_data = self.dmi_data + # Return back to original arch + force_arch() + + def test_rhev_no_cloud_file(self): + '''Test No cloud info file module get_data() forcing RHEV.''' + + dmi.read_dmi_data = _dmi_data('RHEV Hypervisor') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_rhevm = lambda: True + self.assertEqual(True, dsrc.get_data()) + + def test_vsphere_no_cloud_file(self): + '''Test No cloud info file module get_data() forcing VSPHERE.''' + + dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + dsrc.user_data_vsphere = lambda: True + self.assertEqual(True, dsrc.get_data()) + + def test_failure_no_cloud_file(self): + '''Test No cloud info file module get_data() forcing unrecognized.''' + + dmi.read_dmi_data = _dmi_data('Unrecognized Platform') + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.get_data()) + + +class TestUserDataRhevm(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.user_data_rhevm() + ''' + def setUp(self): + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.mount_dir = self.tmp_dir() + _write_user_data_files(self.mount_dir, 'test user data') + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', + 'm_modprobe_floppy', return_value=None) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', + 'm_udevadm_settle', return_value=('', '')) + self.add_patch( + 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', + 'm_mount_cb') + + def test_mount_cb_fails(self): + '''Test user_data_rhevm() where mount_cb fails.''' + + self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_modprobe_fails(self): + '''Test user_data_rhevm() where modprobe fails.''' + + self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( + "Failed modprobe") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_no_modprobe_cmd(self): + '''Test user_data_rhevm() with no modprobe command.''' + + self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( + "No such file or dir") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_udevadm_fails(self): + '''Test user_data_rhevm() where udevadm fails.''' + + self.m_udevadm_settle.side_effect = subp.ProcessExecutionError( + "Failed settle.") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + def test_no_udevadm_cmd(self): + '''Test user_data_rhevm() with no udevadm command.''' + + self.m_udevadm_settle.side_effect = OSError("No such file or dir") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_rhevm()) + + +class TestUserDataVsphere(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.user_data_vsphere() + ''' + def setUp(self): + '''Set up.''' + self.tmp = self.tmp_dir() + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.mount_dir = tempfile.mkdtemp() + + _write_user_data_files(self.mount_dir, 'test user data') + + def tearDown(self): + # Reset + + _remove_user_data_files(self.mount_dir) + + # Attempt to remove the temp dir ignoring errors + try: + shutil.rmtree(self.mount_dir) + except OSError: + pass + + dsac.CLOUD_INFO_FILE = \ + '/etc/sysconfig/cloud-info' + + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with): + '''Test user_data_vsphere() where mount_cb fails.''' + + m_mount_cb.return_value = [] + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_vsphere()) + self.assertEqual(0, m_mount_cb.call_count) + + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with): + '''Test user_data_vsphere() where mount_cb fails.''' + + m_find_devs_with.return_value = ["/dev/mock/cdrom"] + m_mount_cb.side_effect = util.MountFailedError("Unable To mount") + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + self.assertEqual(False, dsrc.user_data_vsphere()) + self.assertEqual(1, m_find_devs_with.call_count) + self.assertEqual(1, m_mount_cb.call_count) + + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") + @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") + def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with): + """Test user_data_vsphere() where successful.""" + m_find_devs_with.return_value = ["/dev/mock/cdrom"] + m_mount_cb.return_value = 'raw userdata from cdrom' + dsrc = dsac.DataSourceAltCloud({}, None, self.paths) + cloud_info = self.tmp_path('cloud-info', dir=self.tmp) + util.write_file(cloud_info, 'VSPHERE') + self.assertEqual(True, dsrc.user_data_vsphere()) + m_find_devs_with.assert_called_once_with('LABEL=CDROM') + m_mount_cb.assert_called_once_with( + '/dev/mock/cdrom', dsac.read_user_data_callback) + with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'): + self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform) + + +class TestReadUserDataCallback(CiTestCase): + ''' + Test to exercise method: DataSourceAltCloud.read_user_data_callback() + ''' + def setUp(self): + '''Set up.''' + self.paths = helpers.Paths({'cloud_dir': '/tmp'}) + self.mount_dir = tempfile.mkdtemp() + + _write_user_data_files(self.mount_dir, 'test user data') + + def tearDown(self): + # Reset + + _remove_user_data_files(self.mount_dir) + + # Attempt to remove the temp dir ignoring errors + try: + shutil.rmtree(self.mount_dir) + except OSError: + pass + + def test_callback_both(self): + '''Test read_user_data_callback() with both files.''' + + self.assertEqual('test user data', + dsac.read_user_data_callback(self.mount_dir)) + + def test_callback_dc(self): + '''Test read_user_data_callback() with only DC file.''' + + _remove_user_data_files(self.mount_dir, + dc_file=False, + non_dc_file=True) + + self.assertEqual('test user data', + dsac.read_user_data_callback(self.mount_dir)) + + def test_callback_non_dc(self): + '''Test read_user_data_callback() with only non-DC file.''' + + _remove_user_data_files(self.mount_dir, + dc_file=True, + non_dc_file=False) + + self.assertEqual('test user data', + dsac.read_user_data_callback(self.mount_dir)) + + def test_callback_none(self): + '''Test read_user_data_callback() no files are found.''' + + _remove_user_data_files(self.mount_dir) + self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) + + +def force_arch(arch=None): + + def _os_uname(): + return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch) + + if arch: + setattr(os, 'uname', _os_uname) + elif arch is None: + setattr(os, 'uname', OS_UNAME_ORIG) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py new file mode 100644 index 00000000..b221a0d7 --- /dev/null +++ b/tests/unittests/sources/test_azure.py @@ -0,0 +1,3394 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import url_helper +from cloudinit.sources import ( + UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) +from cloudinit.util import (b64e, decode_binary, load_file, write_file, + MountFailedError, json_dumps, load_json) +from cloudinit.version import version_string as vs +from tests.unittests.helpers import ( + HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, + ExitStack, resourceLocation) +from cloudinit.sources.helpers import netlink + +import copy +import crypt +import httpretty +import json +import os +import requests +import stat +import xml.etree.ElementTree as ET +import yaml + + +def construct_valid_ovf_env(data=None, pubkeys=None, + userdata=None, platform_settings=None): + if data is None: + data = {'HostName': 'FOOHOST'} + if pubkeys is None: + pubkeys = {} + + content = """ + + + 1.0 + + LinuxProvisioningConfiguration + """ + for key, dval in data.items(): + if isinstance(dval, dict): + val = dict(dval).get('text') + attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v + in dict(dval).items() if k != 'text']) + else: + val = dval + attrs = "" + content += "<%s%s>%s\n" % (key, attrs, val, key) + + if userdata: + content += "%s\n" % (b64e(userdata)) + + if pubkeys: + content += "\n" + for fp, path, value in pubkeys: + content += " " + if fp and path: + content += ("%s%s" % + (fp, path)) + if value: + content += "%s" % value + content += "\n" + content += "" + content += """ + + + 1.0 + + kms.core.windows.net + false + """ + if platform_settings: + for k, v in platform_settings.items(): + content += "<%s>%s\n" % (k, v, k) + if "PreprovisionedVMType" not in platform_settings: + content += """""" + content += """ +""" + + return content + + +NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "", + "publicKeys": [ + { + "keyData": "ssh-rsa key1", + "path": "path1" + } + ] + }, + "network": { + "interface": [ + { + "macAddress": "000D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.0.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81" + } + ] + } + } + ] + } +} + +SECONDARY_INTERFACE = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.1.5", + } + ] + } +} + +SECONDARY_INTERFACE_NO_IP = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [] + } +} + +IMDS_NETWORK_METADATA = { + "interface": [ + { + "macAddress": "000D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.0.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.0.4", + "publicIpAddress": "104.46.124.81" + } + ] + } + } + ] +} + +MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8' + + +class TestParseNetworkConfig(CiTestCase): + + maxDiff = None + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_single_ipv4_nic_configuration(self, m_driver): + """parse_network_config emits dhcp on single nic with ipv4""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_increases_route_metric_for_non_primary_nics(self, m_driver): + """parse_network_config increases route-metric for each nic""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver): + """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp6': False, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + third_intf['ipv6'] = { + "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver): + """parse_network_config emits primary ipv4 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver): + """parse_network_config emits primary ipv6 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + # Secondary ipv6 addresses currently ignored/unconfigured + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value='hv_netvsc') + def test_match_driver_for_netvsc(self, m_driver): + """parse_network_config emits driver when using netvsc.""" + expected = {'ethernets': { + 'eth0': { + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': { + 'macaddress': '00:0d:3a:04:75:98', + 'driver': 'hv_netvsc', + }, + 'set-name': 'eth0' + }}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata( + self, m_fallback_config, m_driver): + """parse_network_config generates fallback network config when the + IMDS instance metadata is corrupted/invalid, such as when + network metadata is not present. + """ + imds_metadata_missing_network_metadata = copy.deepcopy( + NETWORK_METADATA) + del imds_metadata_missing_network_metadata['network'] + m_fallback_config.return_value = self.fallback_config + self.assertEqual( + self.fallback_config, + dsaz.parse_network_config( + imds_metadata_missing_network_metadata)) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata( + self, m_fallback_config, m_driver): + """parse_network_config generates fallback network config when the + IMDS instance metadata is corrupted/invalid, such as when + network interface metadata is not present. + """ + imds_metadata_missing_interface_metadata = copy.deepcopy( + NETWORK_METADATA) + del imds_metadata_missing_interface_metadata['network']['interface'] + m_fallback_config.return_value = self.fallback_config + self.assertEqual( + self.fallback_config, + dsaz.parse_network_config( + imds_metadata_missing_interface_metadata)) + + +class TestGetMetadataFromIMDS(HttprettyTestCase): + + with_logs = True + + def setUp(self): + super(TestGetMetadataFromIMDS, self).setUp() + self.network_md_url = "{}/instance?api-version=2019-06-01".format( + dsaz.IMDS_URL + ) + + @mock.patch(MOCKPATH + 'readurl') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_does_not_dhcp_if_network_is_up( + self, m_net_is_up, m_dhcp, m_readurl): + """Do not perform DHCP setup when nic is already up.""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_not_called() + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_metadata_uses_instance_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3, md_type=dsaz.metadata_type.all) + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_network_metadata_uses_network_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + network metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3, md_type=dsaz.metadata_type.network) + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance/network?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_default_metadata_uses_instance_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3) + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance?api-version=" + "2019-06-01", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_metadata_uses_extended_url( + self, m_net_is_up, m_dhcp, m_readurl): + """Make sure readurl is called with the correct url when accessing + metadata""" + m_net_is_up.return_value = True + m_readurl.return_value = url_helper.StringResponse( + json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) + + dsaz.get_metadata_from_imds( + 'eth0', retries=3, md_type=dsaz.metadata_type.all, + api_version="2021-08-01") + m_readurl.assert_called_with( + "http://169.254.169.254/metadata/instance?api-version=" + "2021-08-01&extended=true", exception_cb=mock.ANY, + headers=mock.ANY, retries=mock.ANY, + timeout=mock.ANY, infinite=False) + + @mock.patch(MOCKPATH + 'readurl', autospec=True) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_performs_dhcp_when_network_is_down( + self, m_net_is_up, m_dhcp, m_readurl): + """Perform DHCP setup when nic is not up.""" + m_net_is_up.return_value = False + m_readurl.return_value = url_helper.StringResponse( + json.dumps(NETWORK_METADATA).encode('utf-8')) + + self.assertEqual( + NETWORK_METADATA, + dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + m_readurl.assert_called_with( + self.network_md_url, exception_cb=mock.ANY, + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) + + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_from_imds_empty_when_no_imds_present( + self, m_net_is_up, m_sleep): + """Return empty dict when IMDS network metadata is absent.""" + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + '/instance?api-version=2017-12-01', + body={}, status=404) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + @mock.patch('requests.Session.request') + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + def test_get_metadata_from_imds_retries_on_timeout( + self, m_net_is_up, m_sleep, m_request): + """Retry IMDS network metadata on timeout errors.""" + + self.attempt = 0 + m_request.side_effect = requests.Timeout('Fake Connection Timeout') + + def retry_callback(request, uri, headers): + self.attempt += 1 + raise requests.Timeout('Fake connection timeout') + + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + body=retry_callback) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + + +class TestAzureDataSource(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestAzureDataSource, self).setUp() + self.tmp = self.tmp_dir() + + # patch cloud_dir, so our 'seed_dir' is guaranteed empty + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') + + self.patches = ExitStack() + self.addCleanup(self.patches.close) + + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) + self.m_get_metadata_from_imds = self.patches.enter_context( + mock.patch.object( + dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value=NETWORK_METADATA))) + self.m_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.sources.net.find_fallback_nic', + return_value='eth9')) + self.m_remove_ubuntu_network_scripts = self.patches.enter_context( + mock.patch.object( + dsaz, 'maybe_remove_ubuntu_network_config_scripts', + mock.MagicMock())) + super(TestAzureDataSource, self).setUp() + + def apply_patches(self, patches): + for module, name, new in patches: + self.patches.enter_context(mock.patch.object(module, name, new)) + + def _get_mockds(self): + sysctl_out = "dev.storvsc.3.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.2.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.1.%pnpinfo: "\ + "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ + "deviceid=00000000-0001-8899-0000-000000000000\n" + camctl_devbus = """ +scbus0 on ata0 bus 0 +scbus1 on ata1 bus 0 +scbus2 on blkvsc0 bus 0 +scbus3 on blkvsc1 bus 0 +scbus4 on storvsc2 bus 0 +scbus5 on storvsc3 bus 0 +scbus-1 on xpt0 bus 0 + """ + camctl_dev = """ + at scbus1 target 0 lun 0 (cd0,pass0) + at scbus2 target 0 lun 0 (da0,pass1) + at scbus3 target 1 lun 0 (da1,pass2) + """ + self.apply_patches([ + (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( + return_value=sysctl_out)), + (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( + return_value=camctl_devbus)), + (dsaz, 'get_camcontrol_dev', mock.MagicMock( + return_value=camctl_dev)) + ]) + return dsaz + + def _get_ds(self, data, distro='ubuntu', + apply_network=None, instance_id=None): + + def _wait_for_files(flist, _maxwait=None, _naplen=None): + data['waited'] = flist + return [] + + def _load_possible_azure_ds(seed_dir, cache_dir): + yield seed_dir + yield dsaz.DEFAULT_PROVISIONING_ISO_DEV + yield from data.get('dsdevs', []) + if cache_dir: + yield cache_dir + + seed_dir = os.path.join(self.paths.seed_dir, "azure") + if data.get('ovfcontent') is not None: + populate_dir(seed_dir, + {'ovf-env.xml': data['ovfcontent']}) + + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + self.m_is_platform_viable = mock.MagicMock(autospec=True) + self.m_get_metadata_from_fabric = mock.MagicMock( + return_value={'public-keys': []}) + self.m_report_failure_to_fabric = mock.MagicMock(autospec=True) + self.m_ephemeral_dhcpv4 = mock.MagicMock() + self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() + self.m_list_possible_azure_ds = mock.MagicMock( + side_effect=_load_possible_azure_ds) + + if instance_id: + self.instance_id = instance_id + else: + self.instance_id = EXAMPLE_UUID + + def _dmi_mocks(key): + if key == 'system-uuid': + return self.instance_id + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + + self.apply_patches([ + (dsaz, 'list_possible_azure_ds', + self.m_list_possible_azure_ds), + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), + (dsaz, 'get_hostname', mock.MagicMock()), + (dsaz, 'set_hostname', mock.MagicMock()), + (dsaz, '_is_platform_viable', + self.m_is_platform_viable), + (dsaz, 'get_metadata_from_fabric', + self.m_get_metadata_from_fabric), + (dsaz, 'report_failure_to_fabric', + self.m_report_failure_to_fabric), + (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4), + (dsaz, 'EphemeralDHCPv4WithReporting', + self.m_ephemeral_dhcpv4_with_reporting), + (dsaz, 'get_boot_telemetry', mock.MagicMock()), + (dsaz, 'get_system_info', mock.MagicMock()), + (dsaz.subp, 'which', lambda x: True), + (dsaz.dmi, 'read_dmi_data', mock.MagicMock( + side_effect=_dmi_mocks)), + (dsaz.util, 'wait_for_files', mock.MagicMock( + side_effect=_wait_for_files)), + ]) + + if isinstance(distro, str): + distro_cls = distros.fetch(distro) + distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) + dsrc = dsaz.DataSourceAzure( + data.get('sys_cfg', {}), distro=distro, paths=self.paths) + if apply_network is not None: + dsrc.ds_cfg['apply_network_config'] = apply_network + + return dsrc + + def _get_and_setup(self, dsrc): + ret = dsrc.get_data() + if ret: + dsrc.setup(True) + return ret + + def xml_equals(self, oxml, nxml): + """Compare two sets of XML to make sure they are equal""" + + def create_tag_index(xml): + et = ET.fromstring(xml) + ret = {} + for x in et.iter(): + ret[x.tag] = x + return ret + + def tags_exists(x, y): + for tag in x.keys(): + assert tag in y + for tag in y.keys(): + assert tag in x + + def tags_equal(x, y): + for x_val in x.values(): + y_val = y.get(x_val.tag) + assert x_val.text == y_val.text + + old_cnt = create_tag_index(oxml) + new_cnt = create_tag_index(nxml) + tags_exists(old_cnt, new_cnt) + tags_equal(old_cnt, new_cnt) + + def xml_notequals(self, oxml, nxml): + try: + self.xml_equals(oxml, nxml) + except AssertionError: + return + raise AssertionError("XML is the same") + + def test_get_resource_disk(self): + ds = self._get_mockds() + dev = ds.get_resource_disk_on_freebsd(1) + self.assertEqual("da1", dev) + + def test_not_is_platform_viable_seed_should_return_no_datasource(self): + """Check seed_dir using _is_platform_viable and return False.""" + # Return a non-matching asset tag value + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = False + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_report_failure') as m_report_failure: + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertFalse(ret) + # Assert that for non viable platforms, + # there is no communication with the Azure datasource. + self.assertEqual( + 0, + m_crawl_metadata.call_count) + self.assertEqual( + 0, + m_report_failure.call_count) + + def test_platform_viable_but_no_devs_should_return_no_datasource(self): + """For platforms where the Azure platform is viable + (which is indicated by the matching asset tag), + the absence of any devs at all (devs == candidate sources + for crawling Azure datasource) is NOT expected. + Report failure to Azure as this is an unexpected fatal error. + """ + data = {} + dsrc = self._get_ds(data) + with mock.patch.object(dsrc, '_report_failure') as m_report_failure: + self.m_is_platform_viable.return_value = True + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertFalse(ret) + self.assertEqual( + 1, + m_report_failure.call_count) + + def test_crawl_metadata_exception_returns_no_datasource(self): + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = True + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + m_crawl_metadata.side_effect = Exception + ret = dsrc.get_data() + self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) + self.assertEqual( + 1, + m_crawl_metadata.call_count) + self.assertFalse(ret) + + def test_crawl_metadata_exception_should_report_failure_with_msg(self): + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = True + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_report_failure') as m_report_failure: + m_crawl_metadata.side_effect = Exception + dsrc.get_data() + self.assertEqual( + 1, + m_crawl_metadata.call_count) + m_report_failure.assert_called_once_with( + description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + + def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self): + data = {} + dsrc = self._get_ds(data) + self.m_is_platform_viable.return_value = True + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + m_crawl_metadata.side_effect = Exception + dsrc.get_data() + self.assertEqual( + 1, + m_crawl_metadata.call_count) + self.assertIn( + "Could not crawl Azure metadata", + self.logs.getvalue()) + + def test_basic_seed_dir(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, "") + self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) + self.assertTrue(os.path.isfile( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + self.assertEqual('azure', dsrc.cloud_name) + self.assertEqual('azure', dsrc.platform_type) + self.assertEqual( + 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform) + + def test_basic_dev_file(self): + """When a device path is used, present that in subplatform.""" + data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']} + dsrc = self._get_ds(data) + # DSAzure will attempt to mount /dev/sr0 first, which should + # fail with mount error since the list of devices doesn't have + # /dev/sr0 + with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb: + m_mount_cb.side_effect = [ + MountFailedError("fail"), + ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) + ] + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.userdata_raw, 'ud') + self.assertEqual(dsrc.metadata['local-hostname'], 'me') + self.assertEqual('azure', dsrc.cloud_name) + self.assertEqual('azure', dsrc.platform_type) + self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform) + + def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): + """get_data on non-Ubuntu will not remove ubuntu net scripts.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + dsrc = self._get_ds(data, distro='debian') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + + def test_get_data_on_ubuntu_will_remove_network_scripts(self): + """get_data will remove ubuntu net scripts on Ubuntu distro.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_called_once_with() + + def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): + """When apply_network_config false, do not remove scripts on Ubuntu.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + + def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): + """Return all structured metadata and cache no class attributes.""" + yaml_cfg = "" + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, + 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + dsrc = self._get_ds(data) + expected_cfg = { + 'PreprovisionedVMType': None, + 'PreprovisionedVm': False, + 'datasource': {'Azure': {}}, + 'system_info': {'default_user': {'name': 'myuser'}}} + expected_metadata = { + 'azure_data': { + 'configurationsettype': 'LinuxProvisioningConfiguration'}, + 'imds': NETWORK_METADATA, + 'instance-id': EXAMPLE_UUID, + 'local-hostname': 'myhost', + 'random_seed': 'wild'} + + crawled_metadata = dsrc.crawl_metadata() + + self.assertCountEqual( + crawled_metadata.keys(), + ['cfg', 'files', 'metadata', 'userdata_raw']) + self.assertEqual(crawled_metadata['cfg'], expected_cfg) + self.assertEqual( + list(crawled_metadata['files'].keys()), ['ovf-env.xml']) + self.assertIn( + b'myhost', + crawled_metadata['files']['ovf-env.xml']) + self.assertEqual(crawled_metadata['metadata'], expected_metadata) + self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') + self.assertEqual(dsrc.userdata_raw, None) + self.assertEqual(dsrc.metadata, {}) + self.assertEqual(dsrc._metadata_imds, UNSET) + self.assertFalse(os.path.isfile( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + + def test_crawl_metadata_raises_invalid_metadata_on_error(self): + """crawl_metadata raises an exception on invalid ovf-env.xml.""" + data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} + dsrc = self._get_ds(data) + error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' + ' syntax error: line 1, column 0') + with self.assertRaises(InvalidMetaDataException) as cm: + dsrc.crawl_metadata() + self.assertEqual(str(cm.exception), error_msg) + + def test_crawl_metadata_call_imds_once_no_reprovision(self): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + dsrc.crawl_metadata() + self.assertEqual(1, self.m_get_metadata_from_imds.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_crawl_metadata_call_imds_twice_with_reprovision( + self, poll_imds_func, m_report_ready, m_write, m_dhcp + ): + """If reprovisioning, imds metadata will be fetched twice""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(2, self.m_get_metadata_from_imds.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_crawl_metadata_on_reprovision_reports_ready( + self, poll_imds_func, m_report_ready, m_write, m_dhcp + ): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, m_report_ready.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' + '_wait_for_all_nics_ready') + def test_crawl_metadata_waits_for_nic_on_savable_vms( + self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp + ): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVMType": "Savable", + "PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, report_ready_func.call_count) + self.assertEqual(1, detect_nics.call_count) + + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' + '_wait_for_all_nics_ready') + @mock.patch('os.path.isfile') + def test_detect_nics_when_marker_present( + self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write, + m_dhcp): + """If reprovisioning, wait for nic attach if marker present""" + + def is_file_ret(key): + return key == dsaz.REPROVISION_NIC_ATTACH_MARKER_FILE + + is_file.side_effect = is_file_ret + ovfenv = construct_valid_ovf_env() + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, report_ready_func.call_count) + self.assertEqual(1, detect_nics.call_count) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + def test_crawl_metadata_on_reprovision_reports_ready_using_lease( + self, m_readurl, m_report_ready, + m_media_switch, m_write + ): + """If reprovisioning, report ready using the obtained lease""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"} + ) + + data = { + 'ovfcontent': ovfenv, + 'sys_cfg': {} + } + dsrc = self._get_ds(data) + + with mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + + # For this mock, net should not be up, + # so that cached ephemeral won't be used. + # This is so that a NEW ephemeral dhcp lease will be discovered + # and used instead. + m_dsrc_distro_networking_is_up.return_value = False + + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + self.m_ephemeral_dhcpv4_with_reporting.return_value \ + .__enter__.return_value = lease + m_media_switch.return_value = None + + reprovision_ovfenv = construct_valid_ovf_env() + m_readurl.return_value = url_helper.StringResponse( + reprovision_ovfenv.encode('utf-8')) + + dsrc.crawl_metadata() + self.assertEqual(2, m_report_ready.call_count) + m_report_ready.assert_called_with(lease=lease) + + def test_waagent_d_has_0700_perms(self): + # we expect /var/lib/waagent to be created 0700 + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(os.path.isdir(self.waagent_d)) + self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds(self, m_driver): + """Datasource.network_config returns IMDS network data.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds_route_metric_for_secondary_nic( + self, m_driver): + """Datasource.network_config adds route-metric to secondary nics.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds_for_secondary_nic_no_ip( + self, m_driver): + """If an IP address is empty then there should no config for it.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + + def test_sys_cfg_set_never_destroy_ntfs(self): + sys_cfg = {'datasource': {'Azure': { + 'never_destroy_ntfs': 'user-supplied-value'}}} + data = {'ovfcontent': construct_valid_ovf_env(data={}), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), + 'user-supplied-value') + + def test_username_used(self): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.cfg['system_info']['default_user']['name'], + "myuser") + + def test_password_given(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': "mypass"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertIn('default_user', dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertFalse(defuser['lock_passwd']) + # passwd is crypt formated string $id$salt$encrypted + # encrypting plaintext with salt value of everything up to final '$' + # should equal that after the '$' + pos = defuser['passwd'].rfind("$") + 1 + self.assertEqual(defuser['passwd'], + crypt.crypt(odata['UserPassword'], + defuser['passwd'][0:pos])) + + # the same hashed value should also be present in cfg['password'] + self.assertEqual(defuser['passwd'], dsrc.cfg['password']) + + def test_user_not_locked_if_password_redacted(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': dsaz.DEF_PASSWD_REDACTION} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertIn('default_user', dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertIn('lock_passwd', defuser) + self.assertFalse(defuser['lock_passwd']) + + def test_userdata_plain(self): + mydata = "FOOBAR" + odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(decode_binary(dsrc.userdata_raw), mydata) + + def test_userdata_found(self): + mydata = "FOOBAR" + odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) + + def test_default_ephemeral_configs_ephemeral_exists(self): + # make sure the ephemeral configs are correct if disk present + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + + self.assertEqual(dsrc.device_name_to_device("ephemeral0"), + dsaz.RESOURCE_DISK_PATH) + assert 'disk_setup' in cfg + assert 'fs_setup' in cfg + self.assertIsInstance(cfg['disk_setup'], dict) + self.assertIsInstance(cfg['fs_setup'], list) + + def test_default_ephemeral_configs_ephemeral_does_not_exist(self): + # make sure the ephemeral configs are correct if disk not present + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + + assert 'disk_setup' not in cfg + assert 'fs_setup' not in cfg + + def test_provide_disk_aliases(self): + # Make sure that user can affect disk aliases + dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} + odata = {'HostName': "myhost", 'UserName': "myuser", + 'dscfg': {'text': b64e(yaml.dump(dscfg)), + 'encoding': 'base64'}} + usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, + 'ephemeral0': False}} + userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" + + ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata) + data = {'ovfcontent': ovfcontent, 'sys_cfg': {}} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + self.assertTrue(cfg) + + def test_userdata_arrives(self): + userdata = "This is my user-data" + xml = construct_valid_ovf_env(data={}, userdata=userdata) + data = {'ovfcontent': xml} + dsrc = self._get_ds(data) + dsrc.get_data() + + self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw) + + def test_password_redacted_in_ovf(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': "mypass"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + dsrc = self._get_ds(data) + ret = dsrc.get_data() + + self.assertTrue(ret) + ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + + # The XML should not be same since the user password is redacted + on_disk_ovf = load_file(ovf_env_path) + self.xml_notequals(data['ovfcontent'], on_disk_ovf) + + # Make sure that the redacted password on disk is not used by CI + self.assertNotEqual(dsrc.cfg.get('password'), + dsaz.DEF_PASSWD_REDACTION) + + # Make sure that the password was really encrypted + et = ET.fromstring(on_disk_ovf) + for elem in et.iter(): + if 'UserPassword' in elem.tag: + self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) + + def test_ovf_env_arrives_in_waagent_dir(self): + xml = construct_valid_ovf_env(data={}, userdata="FOODATA") + dsrc = self._get_ds({'ovfcontent': xml}) + dsrc.get_data() + + # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir) + # we expect that the ovf-env.xml file is copied there. + ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + self.assertTrue(os.path.exists(ovf_env_path)) + self.xml_equals(xml, load_file(ovf_env_path)) + + def test_ovf_can_include_unicode(self): + xml = construct_valid_ovf_env(data={}) + xml = '\ufeff{0}'.format(xml) + dsrc = self._get_ds({'ovfcontent': xml}) + dsrc.get_data() + + def test_dsaz_report_ready_returns_true_when_report_succeeds( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.assertTrue(dsrc._report_ready(lease=mock.MagicMock())) + + def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.m_get_metadata_from_fabric.side_effect = Exception + self.assertFalse(dsrc._report_ready(lease=mock.MagicMock())) + + def test_dsaz_report_failure_returns_true_when_report_succeeds(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + self.assertTrue(dsrc._report_failure()) + self.assertEqual( + 1, + self.m_report_failure_to_fabric.call_count) + + def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ + as m_ephemeral_dhcp_ctx, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # setup mocks to allow using cached ephemeral dhcp lease + m_dsrc_distro_networking_is_up.return_value = True + test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' + test_lease = {'unknown-245': test_lease_dhcp_option_245} + m_ephemeral_dhcp_ctx.lease = test_lease + + # We expect 3 calls to report_failure_to_fabric, + # because we try 3 different methods of calling report failure. + # The different methods are attempted in the following order: + # 1. Using cached ephemeral dhcp context to report failure to Azure + # 2. Using new ephemeral dhcp to report failure to Azure + # 3. Using fallback lease to report failure to Azure + self.m_report_failure_to_fabric.side_effect = Exception + self.assertFalse(dsrc._report_failure()) + self.assertEqual( + 3, + self.m_report_failure_to_fabric.call_count) + + def test_dsaz_report_failure_description_msg(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + test_msg = 'Test report failure description message' + self.assertTrue(dsrc._report_failure(description=test_msg)) + self.m_report_failure_to_fabric.assert_called_once_with( + dhcp_opts=mock.ANY, description=test_msg) + + def test_dsaz_report_failure_no_description_msg(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: + m_crawl_metadata.side_effect = Exception + + self.assertTrue(dsrc._report_failure()) # no description msg + self.m_report_failure_to_fabric.assert_called_once_with( + dhcp_opts=mock.ANY, description=None) + + def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ + as m_ephemeral_dhcp_ctx, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # setup mocks to allow using cached ephemeral dhcp lease + m_dsrc_distro_networking_is_up.return_value = True + test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' + test_lease = {'unknown-245': test_lease_dhcp_option_245} + m_ephemeral_dhcp_ctx.lease = test_lease + + self.assertTrue(dsrc._report_failure()) + + # ensure called with cached ephemeral dhcp lease option 245 + self.m_report_failure_to_fabric.assert_called_once_with( + description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) + + # ensure cached ephemeral is cleaned + self.assertEqual( + 1, + m_ephemeral_dhcp_ctx.clean_network.call_count) + + def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # net is not up and cannot use cached ephemeral dhcp + m_dsrc_distro_networking_is_up.return_value = False + # setup ephemeral dhcp lease discovery mock + test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' + test_lease = {'unknown-245': test_lease_dhcp_option_245} + self.m_ephemeral_dhcpv4_with_reporting.return_value \ + .__enter__.return_value = test_lease + + self.assertTrue(dsrc._report_failure()) + + # ensure called with the newly discovered + # ephemeral dhcp lease option 245 + self.m_report_failure_to_fabric.assert_called_once_with( + description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) + + def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( + self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + + with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ + mock.patch.object(dsrc.distro.networking, 'is_up') \ + as m_dsrc_distro_networking_is_up: + # mock crawl metadata failure to cause report failure + m_crawl_metadata.side_effect = Exception + + # net is not up and cannot use cached ephemeral dhcp + m_dsrc_distro_networking_is_up.return_value = False + # ephemeral dhcp discovery failure, + # so cannot use a new ephemeral dhcp + self.m_ephemeral_dhcpv4_with_reporting.return_value \ + .__enter__.side_effect = Exception + + self.assertTrue(dsrc._report_failure()) + + # ensure called with fallback lease + self.m_report_failure_to_fabric.assert_called_once_with( + description=mock.ANY, + fallback_lease_file=dsrc.dhclient_lease_file) + + def test_exception_fetching_fabric_data_doesnt_propagate(self): + """Errors communicating with fabric should warn, but return True.""" + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.m_get_metadata_from_fabric.side_effect = Exception + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + + def test_fabric_data_included_in_metadata(self): + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.m_get_metadata_from_fabric.return_value = {'test': 'value'} + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual('value', dsrc.metadata['test']) + + def test_instance_id_case_insensitive(self): + """Return the previous iid when current is a case-insensitive match.""" + lower_iid = EXAMPLE_UUID.lower() + upper_iid = EXAMPLE_UUID.upper() + # lowercase current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid + ) + # UPPERCASE previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + upper_iid) + ds.get_data() + self.assertEqual(upper_iid, ds.metadata['instance-id']) + + # UPPERCASE current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid + ) + # lowercase previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + lower_iid) + ds.get_data() + self.assertEqual(lower_iid, ds.metadata['instance-id']) + + def test_instance_id_endianness(self): + """Return the previous iid when dmi uuid is the byteswapped iid.""" + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + # byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + # not byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + + def test_instance_id_from_dmidecode_used(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + + def test_instance_id_from_dmidecode_used_for_builtin(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + + @mock.patch(MOCKPATH + 'util.is_FreeBSD') + @mock.patch(MOCKPATH + '_check_freebsd_cdrom') + def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, + m_is_FreeBSD): + """On FreeBSD, possible devs should show /dev/cd0.""" + m_is_FreeBSD.return_value = True + m_check_fbsd_cdrom.return_value = True + possible_ds = [] + for src in dsaz.list_possible_azure_ds( + "seed_dir", "cache_dir"): + possible_ds.append(src) + self.assertEqual(possible_ds, ["seed_dir", + dsaz.DEFAULT_PROVISIONING_ISO_DEV, + "/dev/cd0", + "cache_dir"]) + self.assertEqual( + [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) + + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_config(self, mock_fallback, m_driver): + """Network config is generated from IMDS network data when present.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + expected_cfg = { + 'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, + 'version': 2} + + self.assertEqual(expected_cfg, dsrc.network_config) + mock_fallback.assert_not_called() + + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_ignored_when_apply_network_config_false( + self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): + """When apply_network_config is False, use fallback instead of IMDS.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0'] + mock_dd.return_value = ['hv_netsvc'] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.network_config, fallback_config) + + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config', autospec=True) + def test_fallback_network_config(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + """On absent IMDS network data, generate network fallback config.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0'] + mock_dd.return_value = ['hv_netsvc'] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + # Represent empty response from network imds + self.m_get_metadata_from_imds.return_value = {} + ret = dsrc.get_data() + self.assertTrue(ret) + + netconfig = dsrc.network_config + self.assertEqual(netconfig, fallback_config) + mock_fallback.assert_called_with( + blacklist_drivers=['mlx4_core', 'mlx5_core'], + config_driver=True) + + @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True) + def test_blacklist_through_distro( + self, m_net_get_interfaces): + """Verify Azure DS updates blacklist drivers in the distro's + networking object.""" + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsrc = self._get_ds(data, distro=distro) + dsrc.get_data() + self.assertEqual(distro.networking.blacklist_drivers, + dsaz.BLACKLIST_DRIVERS) + + distro.networking.get_interfaces_by_mac() + m_net_get_interfaces.assert_called_with( + blacklist_drivers=dsaz.BLACKLIST_DRIVERS) + + @mock.patch(MOCKPATH + 'subp.subp', autospec=True) + def test_get_hostname_with_no_args(self, m_subp): + dsaz.get_hostname() + m_subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch(MOCKPATH + 'subp.subp', autospec=True) + def test_get_hostname_with_string_arg(self, m_subp): + dsaz.get_hostname(hostname_command="hostname") + m_subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch(MOCKPATH + 'subp.subp', autospec=True) + def test_get_hostname_with_iterable_arg(self, m_subp): + dsaz.get_hostname(hostname_command=("hostname",)) + m_subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch( + 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') + def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, ["ssh-rsa key1"]) + self.assertEqual(m_parse_certificates.call_count, 0) + + def test_key_without_crlf_valid(self): + test_key = 'ssh-rsa somerandomkeystuff some comment' + assert True is dsaz._key_is_openssh_formatted(test_key) + + def test_key_with_crlf_invalid(self): + test_key = 'ssh-rsa someran\r\ndomkeystuff some comment' + assert False is dsaz._key_is_openssh_formatted(test_key) + + def test_key_endswith_crlf_valid(self): + test_key = 'ssh-rsa somerandomkeystuff some comment\r\n' + assert True is dsaz._key_is_openssh_formatted(test_key) + + @mock.patch( + 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_get_public_ssh_keys_with_no_openssh_format( + self, + m_get_metadata_from_imds, + m_parse_certificates): + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' + m_get_metadata_from_imds.return_value = imds_data + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, []) + self.assertEqual(m_parse_certificates.call_count, 0) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_get_public_ssh_keys_without_imds( + self, + m_get_metadata_from_imds): + m_get_metadata_from_imds.return_value = dict() + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']} + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, ['key2']) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_imds_api_version_wanted_nonexistent( + self, + m_get_metadata_from_imds): + def get_metadata_from_imds_side_eff(*args, **kwargs): + if kwargs['api_version'] == dsaz.IMDS_VER_WANT: + raise url_helper.UrlError("No IMDS version", code=400) + return NETWORK_METADATA + m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertIsNotNone(dsrc.metadata) + self.assertTrue(dsrc.failed_desired_api_version) + + @mock.patch( + MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) + def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertIsNotNone(dsrc.metadata) + self.assertFalse(dsrc.failed_desired_api_version) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_hostname_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_username_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual( + dsrc.cfg["system_info"]["default_user"]["name"], + "username1" + ) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_disable_password_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertTrue(dsrc.metadata["disable_password"]) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_userdata_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + userdata = "userdataImds" + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true", + ) + imds_data["compute"]["userData"] = b64e(userdata) + m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_userdata_from_imds_with_customdata_from_OVF( + self, m_get_metadata_from_imds): + userdataOVF = "userdataOVF" + odata = { + 'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} + } + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + + userdataImds = "userdataImds" + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true", + ) + imds_data["compute"]["userData"] = b64e(userdataImds) + m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) + + +class TestAzureBounce(CiTestCase): + + with_logs = True + + def mock_out_azure_moving_parts(self): + + def _load_possible_azure_ds(seed_dir, cache_dir): + yield seed_dir + yield dsaz.DEFAULT_PROVISIONING_ISO_DEV + if cache_dir: + yield cache_dir + + self.patches.enter_context( + mock.patch.object(dsaz.util, 'wait_for_files')) + self.patches.enter_context( + mock.patch.object( + dsaz, 'list_possible_azure_ds', + mock.MagicMock(side_effect=_load_possible_azure_ds))) + self.patches.enter_context( + mock.patch.object(dsaz, 'get_metadata_from_fabric', + mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(dsaz, 'get_metadata_from_imds', + mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(dsaz.subp, 'which', lambda x: True)) + self.patches.enter_context(mock.patch.object( + dsaz, '_get_random_seed', return_value='wild')) + + def _dmi_mocks(key): + if key == 'system-uuid': + return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + raise RuntimeError('should not get here') + + self.patches.enter_context( + mock.patch.object(dsaz.dmi, 'read_dmi_data', + mock.MagicMock(side_effect=_dmi_mocks))) + + def setUp(self): + super(TestAzureBounce, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + self.patches = ExitStack() + self.mock_out_azure_moving_parts() + self.get_hostname = self.patches.enter_context( + mock.patch.object(dsaz, 'get_hostname')) + self.set_hostname = self.patches.enter_context( + mock.patch.object(dsaz, 'set_hostname')) + self.subp = self.patches.enter_context( + mock.patch(MOCKPATH + 'subp.subp')) + self.find_fallback_nic = self.patches.enter_context( + mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) + + def tearDown(self): + self.patches.close() + super(TestAzureBounce, self).tearDown() + + def _get_ds(self, ovfcontent=None): + if ovfcontent is not None: + populate_dir(os.path.join(self.paths.seed_dir, "azure"), + {'ovf-env.xml': ovfcontent}) + dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + return dsrc + + def _get_and_setup(self, dsrc): + ret = dsrc.get_data() + if ret: + dsrc.setup(True) + return ret + + def get_ovf_env_with_dscfg(self, hostname, cfg): + odata = { + 'HostName': hostname, + 'dscfg': { + 'text': b64e(yaml.dump(cfg)), + 'encoding': 'base64' + } + } + return construct_valid_ovf_env(data=odata) + + def test_disabled_bounce_does_not_change_hostname(self): + cfg = {'hostname_bounce': {'policy': 'off'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() + self.assertEqual(0, self.set_hostname.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_disabled_bounce_does_not_perform_bounce( + self, perform_hostname_bounce): + cfg = {'hostname_bounce': {'policy': 'off'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) + ds.get_data() + self.assertEqual(0, perform_hostname_bounce.call_count) + + def test_same_hostname_does_not_change_hostname(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'yes'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() + self.assertEqual(0, self.set_hostname.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_unchanged_hostname_does_not_perform_bounce( + self, perform_hostname_bounce): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'yes'}} + ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ds.get_data() + self.assertEqual(0, perform_hostname_bounce.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_force_performs_bounce_regardless(self, perform_hostname_bounce): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_bounce_skipped_on_ifupdown_absent(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) + patch_path = MOCKPATH + 'subp.which' + with mock.patch(patch_path) as m_which: + m_which.return_value = None + ret = self._get_and_setup(dsrc) + self.assertEqual([mock.call('ifup')], m_which.call_args_list) + self.assertTrue(ret) + self.assertIn( + "Skipping network bounce: ifupdown utils aren't present.", + self.logs.getvalue()) + + def test_different_hostnames_sets_hostname(self): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(expected_hostname, + self.set_hostname.call_args_list[0][0][0]) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_different_hostnames_performs_bounce( + self, perform_hostname_bounce): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_different_hostnames_sets_hostname_back(self): + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_failure_in_bounce_still_resets_host_name( + self, perform_hostname_bounce): + perform_hostname_bounce.side_effect = Exception + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name + dsrc = self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {})) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): + interface = 'int0' + hostname = 'my-new-host' + old_hostname = 'my-old-host' + self.get_hostname.return_value = old_hostname + cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} + data = self.get_ovf_env_with_dscfg(hostname, cfg) + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, self.subp.call_count) + bounce_env = self.subp.call_args[1]['env'] + self.assertEqual(interface, bounce_env['interface']) + self.assertEqual(hostname, bounce_env['hostname']) + self.assertEqual(old_hostname, bounce_env['old_hostname']) + + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): + cfg = {'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(1, self.subp.call_count) + bounce_args = self.subp.call_args[1]['args'] + self.assertEqual( + dsaz.BOUNCE_COMMAND_IFUP, bounce_args) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_set_hostname_option_can_disable_bounce( + self, perform_hostname_bounce): + cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, perform_hostname_bounce.call_count) + + def test_set_hostname_option_can_disable_hostname_set(self): + cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, self.set_hostname.call_count) + + @mock.patch(MOCKPATH + 'perform_hostname_bounce') + def test_set_hostname_failed_disable_bounce( + self, perform_hostname_bounce): + cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} + self.get_hostname.return_value = "old-hostname" + self.set_hostname.side_effect = Exception + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, perform_hostname_bounce.call_count) + + +class TestLoadAzureDsDir(CiTestCase): + """Tests for load_azure_ds_dir.""" + + def setUp(self): + self.source_dir = self.tmp_dir() + super(TestLoadAzureDsDir, self).setUp() + + def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self): + """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit.""" + with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'No ovf-env file found', + str(context_manager.exception)) + + def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): + """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" + ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') + with open(ovf_path, 'wb') as stream: + stream.write(b'invalid xml') + with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'Invalid ovf-env.xml: syntax error: line 1, column 0', + str(context_manager.exception)) + + +class TestReadAzureOvf(CiTestCase): + + def test_invalid_xml_raises_non_azure_ds(self): + invalid_xml = "" + construct_valid_ovf_env(data={}) + self.assertRaises(dsaz.BrokenAzureDataSource, + dsaz.read_azure_ovf, invalid_xml) + + def test_load_with_pubkeys(self): + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] + content = construct_valid_ovf_env(pubkeys=pubkeys) + (_md, _ud, cfg) = dsaz.read_azure_ovf(content) + for mypk in mypklist: + self.assertIn(mypk, cfg['_pubkeys']) + + +class TestCanDevBeReformatted(CiTestCase): + warning_file = 'dataloss_warning_readme.txt' + + def _domock(self, mockpath, sattr=None): + patcher = mock.patch(mockpath) + setattr(self, sattr, patcher.start()) + self.addCleanup(patcher.stop) + + def patchup(self, devs): + bypath = {} + for path, data in devs.items(): + bypath[path] = data + if 'realpath' in data: + bypath[data['realpath']] = data + for ppath, pdata in data.get('partitions', {}).items(): + bypath[ppath] = pdata + if 'realpath' in data: + bypath[pdata['realpath']] = pdata + + def realpath(d): + return bypath[d].get('realpath', d) + + def partitions_on_device(devpath): + parts = bypath.get(devpath, {}).get('partitions', {}) + ret = [] + for path, data in parts.items(): + ret.append((data.get('num'), realpath(path))) + # return sorted by partition number + return sorted(ret, key=lambda d: d[0]) + + def mount_cb(device, callback, mtype, update_env_for_mount): + self.assertEqual('ntfs', mtype) + self.assertEqual('C', update_env_for_mount.get('LANG')) + p = self.tmp_dir() + for f in bypath.get(device).get('files', []): + write_file(os.path.join(p, f), content=f) + return callback(p) + + def has_ntfs_fs(device): + return bypath.get(device, {}).get('fs') == 'ntfs' + + p = MOCKPATH + self._domock(p + "_partitions_on_device", 'm_partitions_on_device') + self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem') + self._domock(p + "util.mount_cb", 'm_mount_cb') + self._domock(p + "os.path.realpath", 'm_realpath') + self._domock(p + "os.path.exists", 'm_exists') + self._domock(p + "util.SeLinuxGuard", 'm_selguard') + + self.m_exists.side_effect = lambda p: p in bypath + self.m_realpath.side_effect = realpath + self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs + self.m_mount_cb.side_effect = mount_cb + self.m_partitions_on_device.side_effect = partitions_on_device + self.m_selguard.__enter__ = mock.Mock(return_value=False) + self.m_selguard.__exit__ = mock.Mock() + + def test_three_partitions_is_false(self): + """A disk with 3 partitions can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2}, + '/dev/sda3': {'num': 3}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("3 or more", msg.lower()) + + def test_no_partitions_is_false(self): + """A disk with no partitions can not be formatted.""" + self.patchup({'/dev/sda': {}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("not partitioned", msg.lower()) + + def test_two_partitions_not_ntfs_false(self): + """2 partitions and 2nd not ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("not ntfs", msg.lower()) + + def test_two_partitions_ntfs_populated_false(self): + """2 partitions and populated ntfs fs on 2nd can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', + 'files': ['secret.txt']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("files on it", msg.lower()) + + def test_two_partitions_ntfs_empty_is_true(self): + """2 partitions and empty ntfs fs on 2nd can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_not_ntfs_false(self): + """1 partition witih fs other than ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'zfs'}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("not ntfs", msg.lower()) + + def test_one_partition_ntfs_populated_false(self): + """1 mountable ntfs partition with many files can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['file1.txt', 'file2.exe']}, + }}}) + with mock.patch.object(dsaz.LOG, 'warning') as warning: + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + wmsg = warning.call_args[0][0] + self.assertIn("looks like you're using NTFS on the ephemeral disk", + wmsg) + self.assertFalse(value) + self.assertIn("files on it", msg.lower()) + + def test_one_partition_ntfs_empty_is_true(self): + """1 mountable ntfs partition and no files can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): + """1 mountable ntfs partition and only warn file can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_through_realpath_is_true(self): + """A symlink to a device with 1 ntfs partition can be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn("safe for", msg.lower()) + + def test_three_partition_through_realpath_is_false(self): + """A symlink to a device with 3 partitions can not be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'}, + epath + '-part2': {'num': 2, 'fs': 'ext3', + 'realpath': '/dev/sdb2'}, + epath + '-part3': {'num': 3, 'fs': 'ext', + 'realpath': '/dev/sdb3'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) + self.assertFalse(value) + self.assertIn("3 or more", msg.lower()) + + def test_ntfs_mount_errors_true(self): + """can_dev_be_reformatted does not fail if NTFS is unknown fstype.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + + error_msgs = [ + "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL + "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES + ] + + for err_msg in error_msgs: + self.m_mount_cb.side_effect = MountFailedError( + "Failed mounting %s to %s due to: \nUnexpected.\n%s" % + ('/dev/sda', '/fake-tmp/dir', err_msg)) + + value, msg = dsaz.can_dev_be_reformatted('/dev/sda', + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn('cannot mount NTFS, assuming', msg) + + def test_never_destroy_ntfs_config_false(self): + """Normally formattable situation with never_destroy_ntfs set.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=True) + self.assertFalse(value) + self.assertIn("config says to never destroy NTFS " + "(datasource.Azure.never_destroy_ntfs)", msg) + + +class TestClearCachedData(CiTestCase): + + def test_clear_cached_attrs_clears_imds(self): + """All class attributes are reset to defaults, including imds data.""" + tmp = self.tmp_dir() + paths = helpers.Paths( + {'cloud_dir': tmp, 'run_dir': tmp}) + dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths) + clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] + dsrc.metadata = 'md' + dsrc.userdata = 'ud' + dsrc._metadata_imds = 'imds' + dsrc._dirty_cache = True + dsrc.clear_cached_attrs() + self.assertEqual( + [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], + clean_values) + + +class TestAzureNetExists(CiTestCase): + + def test_azure_net_must_exist_for_legacy_objpkl(self): + """DataSourceAzureNet must exist for old obj.pkl files + that reference it.""" + self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) + + +class TestPreprovisioningReadAzureOvfFlag(CiTestCase): + + def test_read_azure_ovf_with_true_flag(self): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag if the proper setting is present.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_with_false_flag(self): + """The read_azure_ovf method should set the PreprovisionedVM + cfg flag to false if the proper setting is false.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "False"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + + def test_read_azure_ovf_without_flag(self): + """The read_azure_ovf method should not set the + PreprovisionedVM cfg flag.""" + content = construct_valid_ovf_env() + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertFalse(cfg['PreprovisionedVm']) + self.assertEqual(None, cfg["PreprovisionedVMType"]) + + def test_read_azure_ovf_with_running_type(self): + """The read_azure_ovf method should set PreprovisionedVMType + cfg flag to Running.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVMType": "Running", + "PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + self.assertEqual("Running", cfg['PreprovisionedVMType']) + + def test_read_azure_ovf_with_savable_type(self): + """The read_azure_ovf method should set PreprovisionedVMType + cfg flag to Savable.""" + content = construct_valid_ovf_env( + platform_settings={"PreprovisionedVMType": "Savable", + "PreprovisionedVm": "True"}) + ret = dsaz.read_azure_ovf(content) + cfg = ret[2] + self.assertTrue(cfg['PreprovisionedVm']) + self.assertEqual("Savable", cfg['PreprovisionedVMType']) + + +@mock.patch('os.path.isfile') +class TestPreprovisioningShouldReprovision(CiTestCase): + + def setUp(self): + super(TestPreprovisioningShouldReprovision, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch(MOCKPATH + 'util.write_file') + def test__should_reprovision_with_true_cfg(self, isfile, write_f): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + def test__should_reprovision_with_file_existing(self, isfile): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + def test__should_reprovision_returns_false(self, isfile): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + def test__should_reprovision_uses_imds_md(self, write_file, isfile): + """The _should_reprovision method should be able to + retrieve the preprovisioning VM type from imds metadata""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {}, None), + {'extended': {'compute': {'ppsType': 'Running'}}})) + self.assertFalse(dsa._should_reprovision( + (None, None, {}, None), + {})) + self.assertFalse(dsa._should_reprovision( + (None, None, {}, None), + {'extended': {'compute': {"hasCustomData": False}}})) + + @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds') + def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): + """_reprovision will poll IMDS.""" + isfile.return_value = False + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + _poll_imds.return_value = construct_valid_ovf_env(data=odata) + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + dsa._reprovision() + _poll_imds.assert_called_with() + + +class TestPreprovisioningHotAttachNics(CiTestCase): + + def setUp(self): + super(TestPreprovisioningHotAttachNics, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + + @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event', + autospec=True) + @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + def test_nic_detach_writes_marker(self, m_writefile, m_detach): + """When we detect that a nic gets detached, we write a marker for it""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + nl_sock = mock.MagicMock() + dsa._wait_for_nic_detach(nl_sock) + m_detach.assert_called_with(nl_sock) + self.assertEqual(1, m_detach.call_count) + m_writefile.assert_called_with( + dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY) + + @mock.patch(MOCKPATH + 'util.write_file', autospec=True) + @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') + @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + def test_detect_nic_attach_reports_ready_and_waits_for_detach( + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, + m_writefile): + """Report ready first and then wait for nic detach""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + dsa._wait_for_all_nics_ready() + m_fallback_if.return_value = "Dummy interface" + self.assertEqual(1, m_report_ready.call_count) + self.assertEqual(1, m_detach.call_count) + self.assertEqual(1, m_writefile.call_count) + self.assertEqual(1, m_dhcp.call_count) + m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE, + mock.ANY) + + @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') + @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + def test_detect_nic_attach_skips_report_ready_when_marker_present( + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): + """Skip reporting ready if we already have a marker file.""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + + def isfile(key): + return key == dsaz.REPORTED_READY_MARKER_FILE + + m_isfile.side_effect = isfile + dsa._wait_for_all_nics_ready() + m_fallback_if.return_value = "Dummy interface" + self.assertEqual(0, m_report_ready.call_count) + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual(1, m_detach.call_count) + + @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') + @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + def test_detect_nic_attach_skips_nic_detach_when_marker_present( + self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): + """Skip wait for nic detach if it already happened.""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + + m_isfile.return_value = True + dsa._wait_for_all_nics_ready() + m_fallback_if.return_value = "Dummy interface" + self.assertEqual(0, m_report_ready.call_count) + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual(0, m_detach.call_count) + + @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True) + @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') + @mock.patch('cloudinit.sources.net.find_fallback_nic') + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + @mock.patch('os.path.isfile') + def test_wait_for_nic_attach_if_no_fallback_interface( + self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, + m_attach, m_link_up): + """Wait for nic attach if we do not have a fallback interface""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + + m_isfile.return_value = True + m_attach.return_value = "eth0" + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + m_imds.return_value = IMDS_NETWORK_METADATA + m_fallback_if.return_value = None + + dsa._wait_for_all_nics_ready() + + self.assertEqual(0, m_detach.call_count) + self.assertEqual(1, m_attach.call_count) + self.assertEqual(1, m_dhcpv4.call_count) + self.assertEqual(1, m_imds.call_count) + self.assertEqual(1, m_link_up.call_count) + m_link_up.assert_called_with(mock.ANY, "eth0") + + @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') + @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') + @mock.patch('cloudinit.sources.net.find_fallback_nic') + @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') + @mock.patch('os.path.isfile') + def test_wait_for_nic_attach_multinic_attach( + self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, + m_attach, m_link_up): + """Wait for nic attach if we do not have a fallback interface""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_attach_call_count = 0 + + def nic_attach_ret(nl_sock, nics_found): + nonlocal m_attach_call_count + m_attach_call_count = m_attach_call_count + 1 + if m_attach_call_count == 1: + return "eth0" + elif m_attach_call_count == 2: + return "eth1" + raise RuntimeError("Must have found primary nic by now.") + + # Simulate two NICs by adding the same one twice. + md = { + "interface": [ + IMDS_NETWORK_METADATA['interface'][0], + IMDS_NETWORK_METADATA['interface'][0] + ] + } + + def network_metadata_ret(ifname, retries, type, exc_cb, infinite): + if ifname == "eth0": + return md + raise requests.Timeout('Fake connection timeout') + + m_isfile.return_value = True + m_attach.side_effect = nic_attach_ret + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + m_imds.side_effect = network_metadata_ret + m_fallback_if.return_value = None + + dsa._wait_for_all_nics_ready() + + self.assertEqual(0, m_detach.call_count) + self.assertEqual(2, m_attach.call_count) + # DHCP and network metadata calls will only happen on the primary NIC. + self.assertEqual(1, m_dhcpv4.call_count) + self.assertEqual(1, m_imds.call_count) + self.assertEqual(2, m_link_up.call_count) + + @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_check_if_nic_is_primary_retries_on_failures( + self, m_dhcpv4, m_imds): + """Retry polling for network metadata on all failures except timeout + and network unreachable errors""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + + eth0Retries = [] + eth1Retries = [] + # Simulate two NICs by adding the same one twice. + md = { + "interface": [ + IMDS_NETWORK_METADATA['interface'][0], + IMDS_NETWORK_METADATA['interface'][0] + ] + } + + def network_metadata_ret(ifname, retries, type, exc_cb, infinite): + nonlocal eth0Retries, eth1Retries + + # Simulate readurl functionality with retries and + # exception callbacks so that the callback logic can be + # validated. + if ifname == "eth0": + cause = requests.HTTPError() + for _ in range(0, 15): + error = url_helper.UrlError(cause=cause, code=410) + eth0Retries.append(exc_cb("No goal state.", error)) + else: + for _ in range(0, 10): + # We are expected to retry for a certain period for both + # timeout errors and network unreachable errors. + if _ < 5: + cause = requests.Timeout('Fake connection timeout') + else: + cause = requests.ConnectionError('Network Unreachable') + error = url_helper.UrlError(cause=cause) + eth1Retries.append(exc_cb("Connection timeout", error)) + # Should stop retrying after 10 retries + eth1Retries.append(exc_cb("Connection timeout", error)) + raise cause + return md + + m_imds.side_effect = network_metadata_ret + + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + + is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") + self.assertEqual(True, is_primary) + self.assertEqual(2, expected_nic_count) + + # All Eth0 errors are non-timeout errors. So we should have been + # retrying indefinitely until success. + for i in eth0Retries: + self.assertTrue(i) + + is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") + self.assertEqual(False, is_primary) + + # All Eth1 errors are timeout errors. Retry happens for a max of 10 and + # then we should have moved on assuming it is not the primary nic. + for i in range(0, 10): + self.assertTrue(eth1Retries[i]) + self.assertFalse(eth1Retries[10]) + + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_returns_if_already_up( + self, m_is_link_up): + """Waiting for link to be up should return immediately if the link is + already up.""" + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + m_is_link_up.return_value = True + + dsa.wait_for_link_up("eth0") + self.assertEqual(1, m_is_link_up.call_count) + + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + @mock.patch(MOCKPATH + 'util.write_file') + @mock.patch('cloudinit.net.read_sys_net') + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_checks_link_after_sleep( + self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up): + """Waiting for link to be up should return immediately if the link is + already up.""" + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + m_try_set_link_up.return_value = False + + callcount = 0 + + def is_up_mock(key): + nonlocal callcount + if callcount == 0: + callcount += 1 + return False + return True + + m_is_up.side_effect = is_up_mock + + dsa.wait_for_link_up("eth0") + self.assertEqual(2, m_try_set_link_up.call_count) + self.assertEqual(2, m_is_up.call_count) + + @mock.patch(MOCKPATH + 'util.write_file') + @mock.patch('cloudinit.net.read_sys_net') + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_writes_to_device_file( + self, m_is_link_up, m_read_sys_net, m_writefile): + """Waiting for link to be up should return immediately if the link is + already up.""" + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + + callcount = 0 + + def linkup(key): + nonlocal callcount + if callcount == 0: + callcount += 1 + return False + return True + + m_is_link_up.side_effect = linkup + + dsa.wait_for_link_up("eth0") + self.assertEqual(2, m_is_link_up.call_count) + self.assertEqual(1, m_read_sys_net.call_count) + self.assertEqual(2, m_writefile.call_count) + + @mock.patch('cloudinit.sources.helpers.netlink.' + 'create_bound_netlink_socket') + def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket): + """Waiting for all nics should raise exception if netlink socket + creation fails.""" + + m_socket.side_effect = netlink.NetlinkCreateSocketError + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + + self.assertRaises(netlink.NetlinkCreateSocketError, + dsa._wait_for_all_nics_ready) + # dsa._wait_for_all_nics_ready() + + +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') +@mock.patch('requests.Session.request') +@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') +class TestPreprovisioningPollIMDS(CiTestCase): + + def setUp(self): + super(TestPreprovisioningPollIMDS, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch('time.sleep', mock.MagicMock()) + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready, + m_request, m_media_switch, m_dhcp, + m_net): + """The poll_imds will retry DHCP on IMDS timeout.""" + report_file = self.tmp_path('report_marker', self.tmp) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_dhcp.return_value = [lease] + m_media_switch.return_value = None + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + + self.tries = 0 + + def fake_timeout_once(**kwargs): + self.tries += 1 + if self.tries == 1: + raise requests.Timeout('Fake connection timeout') + elif self.tries in (2, 3): + response = requests.Response() + response.status_code = 404 if self.tries == 2 else 410 + raise requests.exceptions.HTTPError( + "fake {}".format(response.status_code), response=response + ) + # Third try should succeed and stop retries or redhcp + return mock.MagicMock(status_code=200, text="good", content="good") + + m_request.side_effect = fake_timeout_once + + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(m_report_ready.call_count, 1) + m_report_ready.assert_called_with(lease=lease) + self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls') + self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS') + + @mock.patch('os.path.isfile') + def test_poll_imds_skips_dhcp_if_ctx_present( + self, m_isfile, report_ready_func, fake_resp, m_media_switch, + m_dhcp, m_net): + """The poll_imds function should reuse the dhcp ctx if it is already + present. This happens when we wait for nic to be hot-attached before + polling for reprovisiondata. Note that if this ctx is set when + _poll_imds is called, then it is not expected to be waiting for + media_disconnect_connect either.""" + report_file = self.tmp_path('report_marker', self.tmp) + m_isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx" + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual(0, m_media_switch.call_count) + + @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_poll_imds_does_dhcp_on_retries_if_ctx_present( + self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request, + m_media_switch, m_dhcp, m_net): + """The poll_imds function should reuse the dhcp ctx if it is already + present. This happens when we wait for nic to be hot-attached before + polling for reprovisiondata. Note that if this ctx is set when + _poll_imds is called, then it is not expected to be waiting for + media_disconnect_connect either.""" + + tries = 0 + + def fake_timeout_once(**kwargs): + nonlocal tries + tries += 1 + if tries == 1: + raise requests.Timeout('Fake connection timeout') + return mock.MagicMock(status_code=200, text="good", content="good") + + m_request.side_effect = fake_timeout_once + report_file = self.tmp_path('report_marker', self.tmp) + m_isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\ + mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx: + m_dhcp_ctx.obtain_lease.return_value = "Dummy lease" + dsa._ephemeral_dhcp_ctx = m_dhcp_ctx + dsa._poll_imds() + self.assertEqual(1, m_dhcp_ctx.clean_network.call_count) + self.assertEqual(1, m_ephemeral_dhcpv4.call_count) + self.assertEqual(0, m_media_switch.call_count) + self.assertEqual(2, m_request.call_count) + + def test_does_not_poll_imds_report_ready_when_marker_file_exists( + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + """poll_imds should not call report ready when the reported ready + marker file exists""" + report_file = self.tmp_path('report_marker', self.tmp) + write_file(report_file, content='dont run report_ready :)') + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + m_media_switch.return_value = None + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(m_report_ready.call_count, 0) + + def test_poll_imds_report_ready_success_writes_marker_file( + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + """poll_imds should write the report_ready marker file if + reporting ready succeeds""" + report_file = self.tmp_path('report_marker', self.tmp) + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + m_media_switch.return_value = None + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(os.path.exists(report_file)) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + dsa._poll_imds() + self.assertEqual(m_report_ready.call_count, 1) + self.assertTrue(os.path.exists(report_file)) + + def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker( + self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): + """poll_imds should write the report_ready marker file if + reporting ready succeeds""" + report_file = self.tmp_path('report_marker', self.tmp) + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + m_media_switch.return_value = None + m_report_ready.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(os.path.exists(report_file)) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): + self.assertRaises( + InvalidMetaDataException, + dsa._poll_imds) + self.assertEqual(m_report_ready.call_count, 1) + self.assertFalse(os.path.exists(report_file)) + + +@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock()) +@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock()) +@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock()) +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True) +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('requests.Session.request') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + def test_poll_imds_returns_ovf_env(self, m_request, + m_dhcp, m_net, + m_media_switch): + """The _poll_imds method should return the ovf_env.xml.""" + m_media_switch.return_value = None + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' + host = "169.254.169.254" + full_url = url.format(host) + m_request.return_value = mock.MagicMock(status_code=200, text="ovf", + content="ovf") + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + self.assertTrue(len(dsa._poll_imds()) > 0) + self.assertEqual(m_request.call_args_list, + [mock.call(allow_redirects=True, + headers={'Metadata': 'true', + 'User-Agent': + 'Cloud-Init/%s' % vs() + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) + self.assertEqual(m_dhcp.call_count, 2) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) + self.assertEqual(m_net.call_count, 2) + + def test__reprovision_calls__poll_imds(self, m_request, + m_dhcp, m_net, + m_media_switch): + """The _reprovision method should call poll IMDS.""" + m_media_switch.return_value = None + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' + host = "169.254.169.254" + full_url = url.format(host) + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + content = construct_valid_ovf_env(data=odata) + m_request.return_value = mock.MagicMock(status_code=200, text=content, + content=content) + dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) + md, _ud, cfg, _d = dsa._reprovision() + self.assertEqual(md['local-hostname'], hostname) + self.assertEqual(cfg['system_info']['default_user']['name'], username) + self.assertIn( + mock.call( + allow_redirects=True, + headers={ + 'Metadata': 'true', + 'User-Agent': 'Cloud-Init/%s' % vs() + }, + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url + ), + m_request.call_args_list) + self.assertEqual(m_dhcp.call_count, 2) + m_net.assert_any_call( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) + self.assertEqual(m_net.call_count, 2) + + +class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestRemoveUbuntuNetworkConfigScripts, self).setUp() + self.tmp = self.tmp_dir() + + def test_remove_network_scripts_removes_both_files_and_directories(self): + """Any files or directories in paths are removed when present.""" + file1 = self.tmp_path('file1', dir=self.tmp) + subdir = self.tmp_path('sub1', dir=self.tmp) + subfile = self.tmp_path('leaf1', dir=subdir) + write_file(file1, 'file1content') + write_file(subfile, 'leafcontent') + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) + + for path in (file1, subdir, subfile): + self.assertFalse(os.path.exists(path), + 'Found unremoved: %s' % path) + + expected_logs = [ + 'INFO: Removing Ubuntu extended network scripts because cloud-init' + ' updates Azure network configuration on the following events:' + " ['boot', 'boot-legacy']", + 'Recursively deleting %s' % subdir, + 'Attempting to remove %s' % file1] + for log in expected_logs: + self.assertIn(log, self.logs.getvalue()) + + def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): + """Any files or directories absent are skipped without error.""" + dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ + self.tmp_path('nodirhere/', dir=self.tmp), + self.tmp_path('notfilehere', dir=self.tmp)]) + self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs + + @mock.patch(MOCKPATH + 'os.path.exists') + def test_remove_network_scripts_default_removes_stock_scripts(self, + m_exists): + """Azure's stock ubuntu image scripts and artifacts are removed.""" + # Report path absent on all to avoid delete operation + m_exists.return_value = False + dsaz.maybe_remove_ubuntu_network_config_scripts() + calls = m_exists.call_args_list + for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS: + self.assertIn(mock.call(path), calls) + + +class TestWBIsPlatformViable(CiTestCase): + """White box tests for _is_platform_viable.""" + with_logs = True + + @mock.patch(MOCKPATH + 'dmi.read_dmi_data') + def test_true_on_non_azure_chassis(self, m_read_dmi_data): + """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) + + @mock.patch(MOCKPATH + 'os.path.exists') + @mock.patch(MOCKPATH + 'dmi.read_dmi_data') + def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): + """Return True if ovf-env.xml exists in known seed dirs.""" + # Non-matching Azure chassis-asset-tag + m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + + m_exist.return_value = True + self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) + m_exist.called_once_with('/other/seed/dir') + + def test_false_on_no_matching_azure_criteria(self): + """Report non-azure on unmatched asset tag, ovf-env absent and no dev. + + Return False when the asset tag doesn't match Azure's static + AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs + and no devices have a label starting with prefix 'rd_rdfe_'. + """ + self.assertFalse(wrap_and_call( + MOCKPATH, + {'os.path.exists': False, + # Non-matching Azure chassis-asset-tag + 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', + 'subp.which': None}, + dsaz._is_platform_viable, 'doesnotmatter')) + self.assertIn( + "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( + dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), + self.logs.getvalue()) + + +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py new file mode 100644 index 00000000..24c582c2 --- /dev/null +++ b/tests/unittests/sources/test_azure_helper.py @@ -0,0 +1,1441 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os +import re +import unittest +from textwrap import dedent +from xml.etree import ElementTree +from xml.sax.saxutils import escape, unescape + +from cloudinit.sources.helpers import azure as azure_helper +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir + +from cloudinit.util import load_file +from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim + +GOAL_STATE_TEMPLATE = """\ + + + 2012-11-30 + {incarnation} + + Started + 300000 + + 16001 + + FALSE + + + {container_id} + + + {instance_id} + Started + + + http://100.86.192.70:80/...hostingEnvironmentConfig... + + http://100.86.192.70:80/..SharedConfig.. + + http://100.86.192.70:80/...extensionsConfig... + + http://100.86.192.70:80/...fullConfig... + {certificates_url} + 68ce47.0.68ce47.0.utl-trusty--292258.1.xml + + + + + +""" + +HEALTH_REPORT_XML_TEMPLATE = '''\ + + + {incarnation} + + {container_id} + + + {instance_id} + + {health_status} + {health_detail_subsection} + + + + + +''' + +HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\ +
+ {health_substatus} + {health_description} +
+ ''') + +HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512 + + +class SentinelException(Exception): + pass + + +class TestFindEndpoint(CiTestCase): + + def setUp(self): + super(TestFindEndpoint, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.load_file = patches.enter_context( + mock.patch.object(azure_helper.util, 'load_file')) + + self.dhcp_options = patches.enter_context( + mock.patch.object(wa_shim, '_load_dhclient_json')) + + self.networkd_leases = patches.enter_context( + mock.patch.object(wa_shim, '_networkd_get_value_from_leases')) + self.networkd_leases.return_value = None + + def test_missing_file(self): + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") + + def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ + self.load_file.return_value = '' + self.dhcp_options.return_value = {'eth0': {'key': 'value'}} + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") + + @staticmethod + def _build_lease_content(encoded_address): + endpoint = azure_helper._get_dhcp_endpoint_option_name() + return '\n'.join([ + 'lease {', + ' interface "eth0";', + ' option {0} {1};'.format(endpoint, encoded_address), + '}']) + + def test_from_dhcp_client(self): + self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} + self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) + + def test_latest_lease_used(self): + encoded_addresses = ['5:4:3:2', '4:3:2:1'] + file_content = '\n'.join([self._build_lease_content(encoded_address) + for encoded_address in encoded_addresses]) + self.load_file.return_value = file_content + self.assertEqual(encoded_addresses[-1].replace(':', '.'), + wa_shim.find_endpoint("foobar")) + + +class TestExtractIpAddressFromLeaseValue(CiTestCase): + + def test_hex_string(self): + ip_address, encoded_address = '98.76.54.32', '62:4c:36:20' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_hex_string_with_single_character_part(self): + ip_address, encoded_address = '4.3.2.1', '4:3:2:1' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_packed_string(self): + ip_address, encoded_address = '98.76.54.32', 'bL6 ' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_packed_string_with_escaped_quote(self): + ip_address, encoded_address = '100.72.34.108', 'dH\\"l' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + def test_packed_string_containing_a_colon(self): + ip_address, encoded_address = '100.72.58.108', 'dH:l' + self.assertEqual( + ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) + + +class TestGoalStateParsing(CiTestCase): + + default_parameters = { + 'incarnation': 1, + 'container_id': 'MyContainerId', + 'instance_id': 'MyInstanceId', + 'certificates_url': 'MyCertificatesUrl', + } + + def _get_formatted_goal_state_xml_string(self, **kwargs): + parameters = self.default_parameters.copy() + parameters.update(kwargs) + xml = GOAL_STATE_TEMPLATE.format(**parameters) + if parameters['certificates_url'] is None: + new_xml_lines = [] + for line in xml.splitlines(): + if 'Certificates' in line: + continue + new_xml_lines.append(line) + xml = '\n'.join(new_xml_lines) + return xml + + def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs): + if m_azure_endpoint_client is None: + m_azure_endpoint_client = mock.MagicMock() + xml = self._get_formatted_goal_state_xml_string(**kwargs) + return azure_helper.GoalState(xml, m_azure_endpoint_client) + + def test_incarnation_parsed_correctly(self): + incarnation = '123' + goal_state = self._get_goal_state(incarnation=incarnation) + self.assertEqual(incarnation, goal_state.incarnation) + + def test_container_id_parsed_correctly(self): + container_id = 'TestContainerId' + goal_state = self._get_goal_state(container_id=container_id) + self.assertEqual(container_id, goal_state.container_id) + + def test_instance_id_parsed_correctly(self): + instance_id = 'TestInstanceId' + goal_state = self._get_goal_state(instance_id=instance_id) + self.assertEqual(instance_id, goal_state.instance_id) + + def test_instance_id_byte_swap(self): + """Return true when previous_iid is byteswapped current_iid""" + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" + self.assertTrue( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_same_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_diff_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_certificates_xml_parsed_and_fetched_correctly(self): + m_azure_endpoint_client = mock.MagicMock() + certificates_url = 'TestCertificatesUrl' + goal_state = self._get_goal_state( + m_azure_endpoint_client=m_azure_endpoint_client, + certificates_url=certificates_url) + certificates_xml = goal_state.certificates_xml + self.assertEqual(1, m_azure_endpoint_client.get.call_count) + self.assertEqual( + certificates_url, + m_azure_endpoint_client.get.call_args[0][0]) + self.assertTrue( + m_azure_endpoint_client.get.call_args[1].get( + 'secure', False)) + self.assertEqual( + m_azure_endpoint_client.get.return_value.contents, + certificates_xml) + + def test_missing_certificates_skips_http_get(self): + m_azure_endpoint_client = mock.MagicMock() + goal_state = self._get_goal_state( + m_azure_endpoint_client=m_azure_endpoint_client, + certificates_url=None) + certificates_xml = goal_state.certificates_xml + self.assertEqual(0, m_azure_endpoint_client.get.call_count) + self.assertIsNone(certificates_xml) + + def test_invalid_goal_state_xml_raises_parse_error(self): + xml = 'random non-xml data' + with self.assertRaises(ElementTree.ParseError): + azure_helper.GoalState(xml, mock.MagicMock()) + + def test_missing_container_id_in_goal_state_xml_raises_exc(self): + xml = self._get_formatted_goal_state_xml_string() + xml = re.sub('.*', '', xml) + with self.assertRaises(azure_helper.InvalidGoalStateXMLException): + azure_helper.GoalState(xml, mock.MagicMock()) + + def test_missing_instance_id_in_goal_state_xml_raises_exc(self): + xml = self._get_formatted_goal_state_xml_string() + xml = re.sub('.*', '', xml) + with self.assertRaises(azure_helper.InvalidGoalStateXMLException): + azure_helper.GoalState(xml, mock.MagicMock()) + + def test_missing_incarnation_in_goal_state_xml_raises_exc(self): + xml = self._get_formatted_goal_state_xml_string() + xml = re.sub('.*', '', xml) + with self.assertRaises(azure_helper.InvalidGoalStateXMLException): + azure_helper.GoalState(xml, mock.MagicMock()) + + +class TestAzureEndpointHttpClient(CiTestCase): + + regular_headers = { + 'x-ms-agent-name': 'WALinuxAgent', + 'x-ms-version': '2012-11-30', + } + + def setUp(self): + super(TestAzureEndpointHttpClient, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + self.m_http_with_retries = patches.enter_context( + mock.patch.object(azure_helper, 'http_with_retries')) + + def test_non_secure_get(self): + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + url = 'MyTestUrl' + response = client.get(url, secure=False) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual(self.m_http_with_retries.return_value, response) + self.assertEqual( + mock.call(url, headers=self.regular_headers), + self.m_http_with_retries.call_args) + + def test_non_secure_get_raises_exception(self): + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + url = 'MyTestUrl' + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises(SentinelException, client.get, url, secure=False) + self.assertEqual(1, self.m_http_with_retries.call_count) + + def test_secure_get(self): + url = 'MyTestUrl' + m_certificate = mock.MagicMock() + expected_headers = self.regular_headers.copy() + expected_headers.update({ + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": m_certificate, + }) + client = azure_helper.AzureEndpointHttpClient(m_certificate) + response = client.get(url, secure=True) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual(self.m_http_with_retries.return_value, response) + self.assertEqual( + mock.call(url, headers=expected_headers), + self.m_http_with_retries.call_args) + + def test_secure_get_raises_exception(self): + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises(SentinelException, client.get, url, secure=True) + self.assertEqual(1, self.m_http_with_retries.call_count) + + def test_post(self): + m_data = mock.MagicMock() + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + response = client.post(url, data=m_data) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual(self.m_http_with_retries.return_value, response) + self.assertEqual( + mock.call(url, data=m_data, headers=self.regular_headers), + self.m_http_with_retries.call_args) + + def test_post_raises_exception(self): + m_data = mock.MagicMock() + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises(SentinelException, client.post, url, data=m_data) + self.assertEqual(1, self.m_http_with_retries.call_count) + + def test_post_with_extra_headers(self): + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + extra_headers = {'test': 'header'} + client.post(url, extra_headers=extra_headers) + expected_headers = self.regular_headers.copy() + expected_headers.update(extra_headers) + self.assertEqual(1, self.m_http_with_retries.call_count) + self.assertEqual( + mock.call(url, data=mock.ANY, headers=expected_headers), + self.m_http_with_retries.call_args) + + def test_post_with_sleep_with_extra_headers_raises_exception(self): + m_data = mock.MagicMock() + url = 'MyTestUrl' + extra_headers = {'test': 'header'} + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + self.m_http_with_retries.side_effect = SentinelException + self.assertRaises( + SentinelException, client.post, + url, data=m_data, extra_headers=extra_headers) + self.assertEqual(1, self.m_http_with_retries.call_count) + + +class TestAzureHelperHttpWithRetries(CiTestCase): + + with_logs = True + + max_readurl_attempts = 240 + default_readurl_timeout = 5 + sleep_duration_between_retries = 5 + periodic_logging_attempts = 12 + + def setUp(self): + super(TestAzureHelperHttpWithRetries, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.m_readurl = patches.enter_context( + mock.patch.object( + azure_helper.url_helper, 'readurl', mock.MagicMock())) + self.m_sleep = patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', autospec=True)) + + def test_http_with_retries(self): + self.m_readurl.return_value = 'TestResp' + self.assertEqual( + azure_helper.http_with_retries('testurl'), + self.m_readurl.return_value) + self.assertEqual(self.m_readurl.call_count, 1) + + def test_http_with_retries_propagates_readurl_exc_and_logs_exc( + self): + self.m_readurl.side_effect = SentinelException + + self.assertRaises( + SentinelException, azure_helper.http_with_retries, 'testurl') + self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts) + + self.assertIsNotNone( + re.search( + r'Failed HTTP request with Azure endpoint \S* during ' + r'attempt \d+ with exception: \S*', + self.logs.getvalue())) + self.assertIsNone( + re.search( + r'Successful HTTP request with Azure endpoint \S* after ' + r'\d+ attempts', + self.logs.getvalue())) + + def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc( + self): + self.m_readurl.side_effect = \ + [SentinelException] * self.periodic_logging_attempts + \ + ['TestResp'] + self.m_readurl.return_value = 'TestResp' + + response = azure_helper.http_with_retries('testurl') + self.assertEqual( + response, + self.m_readurl.return_value) + self.assertEqual( + self.m_readurl.call_count, + self.periodic_logging_attempts + 1) + + # Ensure that cloud-init did sleep between each failed request + self.assertEqual( + self.m_sleep.call_count, + self.periodic_logging_attempts) + self.m_sleep.assert_called_with(self.sleep_duration_between_retries) + + def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): + self.m_readurl.side_effect = \ + [SentinelException] * self.periodic_logging_attempts + \ + ['TestResp'] + self.m_readurl.return_value = 'TestResp' + + azure_helper.http_with_retries('testurl') + + self.assertEqual( + self.m_readurl.call_count, + self.periodic_logging_attempts + 1) + self.assertIsNotNone( + re.search( + r'Failed HTTP request with Azure endpoint \S* during ' + r'attempt \d+ with exception: \S*', + self.logs.getvalue())) + self.assertIsNotNone( + re.search( + r'Successful HTTP request with Azure endpoint \S* after ' + r'\d+ attempts', + self.logs.getvalue())) + + def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg( + self): + self.m_readurl.side_effect = \ + [SentinelException] * \ + (self.periodic_logging_attempts - 1) + \ + ['TestResp'] + self.m_readurl.return_value = 'TestResp' + + azure_helper.http_with_retries('testurl') + self.assertEqual( + self.m_readurl.call_count, + self.periodic_logging_attempts) + + self.assertIsNone( + re.search( + r'Failed HTTP request with Azure endpoint \S* during ' + r'attempt \d+ with exception: \S*', + self.logs.getvalue())) + self.assertIsNotNone( + re.search( + r'Successful HTTP request with Azure endpoint \S* after ' + r'\d+ attempts', + self.logs.getvalue())) + + def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self): + testurl = mock.MagicMock() + kwargs = { + 'headers': mock.MagicMock(), + 'data': mock.MagicMock(), + # timeout kwarg should not be modified or deleted if present + 'timeout': mock.MagicMock() + } + azure_helper.http_with_retries(testurl, **kwargs) + self.m_readurl.assert_called_once_with(testurl, **kwargs) + + def test_http_with_retries_adds_timeout_kwarg_if_not_present(self): + testurl = mock.MagicMock() + kwargs = { + 'headers': mock.MagicMock(), + 'data': mock.MagicMock() + } + expected_kwargs = copy.deepcopy(kwargs) + expected_kwargs['timeout'] = self.default_readurl_timeout + + azure_helper.http_with_retries(testurl, **kwargs) + self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) + + def test_http_with_retries_deletes_retries_kwargs_passed_in( + self): + """http_with_retries already implements retry logic, + so url_helper.readurl should not have retries. + http_with_retries should delete kwargs that + cause url_helper.readurl to retry. + """ + testurl = mock.MagicMock() + kwargs = { + 'headers': mock.MagicMock(), + 'data': mock.MagicMock(), + 'timeout': mock.MagicMock(), + 'retries': mock.MagicMock(), + 'infinite': mock.MagicMock() + } + expected_kwargs = copy.deepcopy(kwargs) + expected_kwargs.pop('retries', None) + expected_kwargs.pop('infinite', None) + + azure_helper.http_with_retries(testurl, **kwargs) + self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) + self.assertIn( + 'retries kwarg passed in for communication with Azure endpoint.', + self.logs.getvalue()) + self.assertIn( + 'infinite kwarg passed in for communication with Azure endpoint.', + self.logs.getvalue()) + + +class TestOpenSSLManager(CiTestCase): + + def setUp(self): + super(TestOpenSSLManager, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.subp = patches.enter_context( + mock.patch.object(azure_helper.subp, 'subp')) + try: + self.open = patches.enter_context( + mock.patch('__builtin__.open')) + except ImportError: + self.open = patches.enter_context( + mock.patch('builtins.open')) + + @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) + @mock.patch.object(azure_helper.temp_utils, 'mkdtemp') + def test_openssl_manager_creates_a_tmpdir(self, mkdtemp): + manager = azure_helper.OpenSSLManager() + self.assertEqual(mkdtemp.return_value, manager.tmpdir) + + def test_generate_certificate_uses_tmpdir(self): + subp_directory = {} + + def capture_directory(*args, **kwargs): + subp_directory['path'] = os.getcwd() + + self.subp.side_effect = capture_directory + manager = azure_helper.OpenSSLManager() + self.assertEqual(manager.tmpdir, subp_directory['path']) + manager.clean_up() + + @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) + @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock()) + @mock.patch.object(azure_helper.util, 'del_dir') + def test_clean_up(self, del_dir): + manager = azure_helper.OpenSSLManager() + manager.clean_up() + self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) + + +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + +class TestGoalStateHealthReporter(CiTestCase): + + maxDiff = None + + default_parameters = { + 'incarnation': 1634, + 'container_id': 'MyContainerId', + 'instance_id': 'MyInstanceId' + } + + test_azure_endpoint = 'TestEndpoint' + test_health_report_url = 'http://{0}/machine?comp=health'.format( + test_azure_endpoint) + test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'} + + provisioning_success_status = 'Ready' + provisioning_not_ready_status = 'NotReady' + provisioning_failure_substatus = 'ProvisioningFailed' + provisioning_failure_err_description = ( + 'Test error message containing provisioning failure details') + + def setUp(self): + super(TestGoalStateHealthReporter, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) + self.read_file_or_url = patches.enter_context( + mock.patch.object(azure_helper.url_helper, 'read_file_or_url')) + + self.post = patches.enter_context( + mock.patch.object(azure_helper.AzureEndpointHttpClient, + 'post')) + + self.GoalState = patches.enter_context( + mock.patch.object(azure_helper, 'GoalState')) + self.GoalState.return_value.container_id = \ + self.default_parameters['container_id'] + self.GoalState.return_value.instance_id = \ + self.default_parameters['instance_id'] + self.GoalState.return_value.incarnation = \ + self.default_parameters['incarnation'] + + def _text_from_xpath_in_xroot(self, xroot, xpath): + element = xroot.find(xpath) + if element is not None: + return element.text + return None + + def _get_formatted_health_report_xml_string(self, **kwargs): + return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs) + + def _get_formatted_health_detail_subsection_xml_string(self, **kwargs): + return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs) + + def _get_report_ready_health_document(self): + return self._get_formatted_health_report_xml_string( + incarnation=escape(str(self.default_parameters['incarnation'])), + container_id=escape(self.default_parameters['container_id']), + instance_id=escape(self.default_parameters['instance_id']), + health_status=escape(self.provisioning_success_status), + health_detail_subsection='') + + def _get_report_failure_health_document(self): + health_detail_subsection = \ + self._get_formatted_health_detail_subsection_xml_string( + health_substatus=escape(self.provisioning_failure_substatus), + health_description=escape( + self.provisioning_failure_err_description)) + + return self._get_formatted_health_report_xml_string( + incarnation=escape(str(self.default_parameters['incarnation'])), + container_id=escape(self.default_parameters['container_id']), + instance_id=escape(self.default_parameters['instance_id']), + health_status=escape(self.provisioning_not_ready_status), + health_detail_subsection=health_detail_subsection) + + def test_send_ready_signal_sends_post_request(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, + 'build_report') as m_build_report: + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + client, self.test_azure_endpoint) + reporter.send_ready_signal() + + self.assertEqual(1, self.post.call_count) + self.assertEqual( + mock.call( + self.test_health_report_url, + data=m_build_report.return_value, + extra_headers=self.test_default_headers), + self.post.call_args) + + def test_send_failure_signal_sends_post_request(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, + 'build_report') as m_build_report: + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + client, self.test_azure_endpoint) + reporter.send_failure_signal( + description=self.provisioning_failure_err_description) + + self.assertEqual(1, self.post.call_count) + self.assertEqual( + mock.call( + self.test_health_report_url, + data=m_build_report.return_value, + extra_headers=self.test_default_headers), + self.post.call_args) + + def test_build_report_for_ready_signal_health_document(self): + health_document = self._get_report_ready_health_document() + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_success_status) + + self.assertEqual(health_document, generated_health_document) + + generated_xroot = ElementTree.fromstring(generated_health_document) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './GoalStateIncarnation'), + str(self.default_parameters['incarnation'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './Container/ContainerId'), + str(self.default_parameters['container_id'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/InstanceId'), + str(self.default_parameters['instance_id'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/State'), + escape(self.provisioning_success_status)) + self.assertIsNone( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details')) + self.assertIsNone( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/SubStatus')) + self.assertIsNone( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/Description') + ) + + def test_build_report_for_failure_signal_health_document(self): + health_document = self._get_report_failure_health_document() + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=self.provisioning_failure_err_description) + + self.assertEqual(health_document, generated_health_document) + + generated_xroot = ElementTree.fromstring(generated_health_document) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './GoalStateIncarnation'), + str(self.default_parameters['incarnation'])) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, './Container/ContainerId'), + self.default_parameters['container_id']) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/InstanceId'), + self.default_parameters['instance_id']) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/State'), + escape(self.provisioning_not_ready_status)) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/' + 'SubStatus'), + escape(self.provisioning_failure_substatus)) + self.assertEqual( + self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/' + 'Description'), + escape(self.provisioning_failure_err_description)) + + def test_send_ready_signal_calls_build_report(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, 'build_report' + ) as m_build_report: + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + reporter.send_ready_signal() + + self.assertEqual(1, m_build_report.call_count) + self.assertEqual( + mock.call( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_success_status), + m_build_report.call_args) + + def test_send_failure_signal_calls_build_report(self): + with mock.patch.object( + azure_helper.GoalStateHealthReporter, 'build_report' + ) as m_build_report: + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + reporter.send_failure_signal( + description=self.provisioning_failure_err_description) + + self.assertEqual(1, m_build_report.call_count) + self.assertEqual( + mock.call( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=self.provisioning_failure_err_description), + m_build_report.call_args) + + def test_build_report_escapes_chars(self): + incarnation = 'jd8\'9*&^<\'A>' + instance_id = 'Opo>>>jas\'&d;[p&fp\"a<&aa\'sd!@&!)((*<&>' + health_substatus = '&as\"d<d<\'^@!5&6<7' + health_description = '&&&>!#$\"&&><>&\"sd<67<]>>' + + health_detail_subsection = \ + self._get_formatted_health_detail_subsection_xml_string( + health_substatus=escape(health_substatus), + health_description=escape(health_description)) + health_document = self._get_formatted_health_report_xml_string( + incarnation=escape(incarnation), + container_id=escape(container_id), + instance_id=escape(instance_id), + health_status=escape(health_status), + health_detail_subsection=health_detail_subsection) + + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + generated_health_document = reporter.build_report( + incarnation=incarnation, + container_id=container_id, + instance_id=instance_id, + status=health_status, + substatus=health_substatus, + description=health_description) + + self.assertEqual(health_document, generated_health_document) + + def test_build_report_conforms_to_length_limits(self): + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100 + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=long_err_msg) + + generated_xroot = ElementTree.fromstring(generated_health_document) + generated_health_report_description = self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/Description') + self.assertEqual( + len(unescape(generated_health_report_description)), + HEALTH_REPORT_DESCRIPTION_TRIM_LEN) + + def test_trim_description_then_escape_conforms_to_len_limits_worst_case( + self): + """When unescaped characters are XML-escaped, the length increases. + Char Escape String + < < + > > + " " + ' ' + & & + + We (step 1) trim the health report XML's description field, + and then (step 2) XML-escape the health report XML's description field. + + The health report XML's description field limit within cloud-init + is HEALTH_REPORT_DESCRIPTION_TRIM_LEN. + + The Azure platform's limit on the health report XML's description field + is 4096 chars. + + For worst-case chars, there is a 5x blowup in length + when the chars are XML-escaped. + ' and " when XML-escaped have a 5x blowup. + + Ensure that (1) trimming and then (2) XML-escaping does not blow past + the Azure platform's limit for health report XML's description field + (4096 chars). + """ + reporter = azure_helper.GoalStateHealthReporter( + azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), + azure_helper.AzureEndpointHttpClient(mock.MagicMock()), + self.test_azure_endpoint) + long_err_msg = '\'\"' * 10000 + generated_health_document = reporter.build_report( + incarnation=self.default_parameters['incarnation'], + container_id=self.default_parameters['container_id'], + instance_id=self.default_parameters['instance_id'], + status=self.provisioning_not_ready_status, + substatus=self.provisioning_failure_substatus, + description=long_err_msg) + + generated_xroot = ElementTree.fromstring(generated_health_document) + generated_health_report_description = self._text_from_xpath_in_xroot( + generated_xroot, + './Container/RoleInstanceList/Role/Health/Details/Description') + # The escaped description string should be less than + # the Azure platform limit for the escaped description string. + self.assertLessEqual(len(generated_health_report_description), 4096) + + +class TestWALinuxAgentShim(CiTestCase): + + def setUp(self): + super(TestWALinuxAgentShim, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.AzureEndpointHttpClient = patches.enter_context( + mock.patch.object(azure_helper, 'AzureEndpointHttpClient')) + self.find_endpoint = patches.enter_context( + mock.patch.object(wa_shim, 'find_endpoint')) + self.GoalState = patches.enter_context( + mock.patch.object(azure_helper, 'GoalState')) + self.OpenSSLManager = patches.enter_context( + mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True)) + patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) + + self.test_incarnation = 'TestIncarnation' + self.test_container_id = 'TestContainerId' + self.test_instance_id = 'TestInstanceId' + self.GoalState.return_value.incarnation = self.test_incarnation + self.GoalState.return_value.container_id = self.test_container_id + self.GoalState.return_value.instance_id = self.test_instance_id + + def test_eject_iso_is_called(self): + shim = wa_shim() + with mock.patch.object( + shim, 'eject_iso', autospec=True + ) as m_eject_iso: + shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") + m_eject_iso.assert_called_once_with("/dev/sr0") + + def test_http_client_does_not_use_certificate_for_report_ready(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + self.assertEqual( + [mock.call(None)], + self.AzureEndpointHttpClient.call_args_list) + + def test_http_client_does_not_use_certificate_for_report_failure(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + self.assertEqual( + [mock.call(None)], + self.AzureEndpointHttpClient.call_args_list) + + def test_correct_url_used_for_goalstate_during_report_ready(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + m_get = self.AzureEndpointHttpClient.return_value.get + self.assertEqual( + [mock.call('http://test_endpoint/machine/?comp=goalstate')], + m_get.call_args_list) + self.assertEqual( + [mock.call( + m_get.return_value.contents, + self.AzureEndpointHttpClient.return_value, + False + )], + self.GoalState.call_args_list) + + def test_correct_url_used_for_goalstate_during_report_failure(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + m_get = self.AzureEndpointHttpClient.return_value.get + self.assertEqual( + [mock.call('http://test_endpoint/machine/?comp=goalstate')], + m_get.call_args_list) + self.assertEqual( + [mock.call( + m_get.return_value.contents, + self.AzureEndpointHttpClient.return_value, + False + )], + self.GoalState.call_args_list) + + def test_certificates_used_to_determine_public_keys(self): + # if register_with_azure_and_fetch_data() isn't passed some info about + # the user's public keys, there's no point in even trying to parse the + # certificates + shim = wa_shim() + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + self.assertEqual( + [mock.call(self.GoalState.return_value.certificates_xml)], + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) + + def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] + self.GoalState.return_value.certificates_xml = None + shim = wa_shim() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) + self.assertEqual([], data['public-keys']) + + def test_correct_url_used_for_report_ready(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + expected_url = 'http://test_endpoint/machine?comp=health' + self.assertEqual( + [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], + self.AzureEndpointHttpClient.return_value.post + .call_args_list) + + def test_correct_url_used_for_report_failure(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + expected_url = 'http://test_endpoint/machine?comp=health' + self.assertEqual( + [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], + self.AzureEndpointHttpClient.return_value.post + .call_args_list) + + def test_goal_state_values_used_for_report_ready(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data'] + ) + self.assertIn(self.test_incarnation, posted_document) + self.assertIn(self.test_container_id, posted_document) + self.assertIn(self.test_instance_id, posted_document) + + def test_goal_state_values_used_for_report_failure(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data'] + ) + self.assertIn(self.test_incarnation, posted_document) + self.assertIn(self.test_container_id, posted_document) + self.assertIn(self.test_instance_id, posted_document) + + def test_xml_elems_in_report_ready_post(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + health_document = HEALTH_REPORT_XML_TEMPLATE.format( + incarnation=escape(self.test_incarnation), + container_id=escape(self.test_container_id), + instance_id=escape(self.test_instance_id), + health_status=escape('Ready'), + health_detail_subsection='') + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data']) + self.assertEqual(health_document, posted_document) + + def test_xml_elems_in_report_failure_post(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + health_document = HEALTH_REPORT_XML_TEMPLATE.format( + incarnation=escape(self.test_incarnation), + container_id=escape(self.test_container_id), + instance_id=escape(self.test_instance_id), + health_status=escape('NotReady'), + health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE + .format( + health_substatus=escape('ProvisioningFailed'), + health_description=escape('TestDesc'))) + posted_document = ( + self.AzureEndpointHttpClient.return_value.post + .call_args[1]['data']) + self.assertEqual(health_document, posted_document) + + @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) + def test_register_with_azure_and_fetch_data_calls_send_ready_signal( + self, m_goal_state_health_reporter): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + self.assertEqual( + 1, + m_goal_state_health_reporter.return_value.send_ready_signal + .call_count) + + @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) + def test_register_with_azure_and_report_failure_calls_send_failure_signal( + self, m_goal_state_health_reporter): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + m_goal_state_health_reporter.return_value.send_failure_signal \ + .assert_called_once_with(description='TestDesc') + + def test_register_with_azure_and_report_failure_does_not_need_certificates( + self): + shim = wa_shim() + with mock.patch.object( + shim, '_fetch_goal_state_from_azure', autospec=True + ) as m_fetch_goal_state_from_azure: + shim.register_with_azure_and_report_failure(description='TestDesc') + m_fetch_goal_state_from_azure.assert_called_once_with( + need_certificate=False) + + def test_clean_up_can_be_called_at_any_time(self): + shim = wa_shim() + shim.clean_up() + + def test_openssl_manager_not_instantiated_by_shim_report_status(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + shim.register_with_azure_and_report_failure(description='TestDesc') + shim.clean_up() + self.OpenSSLManager.assert_not_called() + + def test_clean_up_after_report_ready(self): + shim = wa_shim() + shim.register_with_azure_and_fetch_data() + shim.clean_up() + self.OpenSSLManager.return_value.clean_up.assert_not_called() + + def test_clean_up_after_report_failure(self): + shim = wa_shim() + shim.register_with_azure_and_report_failure(description='TestDesc') + shim.clean_up() + self.OpenSSLManager.return_value.clean_up.assert_not_called() + + def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self): + self.AzureEndpointHttpClient.return_value.get \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_fetch_data) + + def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self): + self.AzureEndpointHttpClient.return_value.get \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_report_failure, + description='TestDesc') + + def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self): + self.GoalState.side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_fetch_data) + + def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc( + self): + self.GoalState.side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_report_failure, + description='TestDesc') + + def test_failure_to_send_report_ready_health_doc_bubbles_up(self): + self.AzureEndpointHttpClient.return_value.post \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_fetch_data) + + def test_failure_to_send_report_failure_health_doc_bubbles_up(self): + self.AzureEndpointHttpClient.return_value.post \ + .side_effect = SentinelException + shim = wa_shim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_report_failure, + description='TestDesc') + + +class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): + + def setUp(self): + super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.m_shim = patches.enter_context( + mock.patch.object(azure_helper, 'WALinuxAgentShim')) + + def test_data_from_shim_returned(self): + ret = azure_helper.get_metadata_from_fabric() + self.assertEqual( + self.m_shim.return_value.register_with_azure_and_fetch_data + .return_value, + ret) + + def test_success_calls_clean_up(self): + azure_helper.get_metadata_from_fabric() + self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) + + def test_failure_in_registration_propagates_exc_and_calls_clean_up( + self): + self.m_shim.return_value.register_with_azure_and_fetch_data \ + .side_effect = SentinelException + self.assertRaises(SentinelException, + azure_helper.get_metadata_from_fabric) + self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) + + def test_calls_shim_register_with_azure_and_fetch_data(self): + m_pubkey_info = mock.MagicMock() + azure_helper.get_metadata_from_fabric( + pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") + self.assertEqual( + 1, + self.m_shim.return_value + .register_with_azure_and_fetch_data.call_count) + self.assertEqual( + mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), + self.m_shim.return_value + .register_with_azure_and_fetch_data.call_args) + + def test_instantiates_shim_with_kwargs(self): + m_fallback_lease_file = mock.MagicMock() + m_dhcp_options = mock.MagicMock() + azure_helper.get_metadata_from_fabric( + fallback_lease_file=m_fallback_lease_file, + dhcp_opts=m_dhcp_options) + self.assertEqual(1, self.m_shim.call_count) + self.assertEqual( + mock.call( + fallback_lease_file=m_fallback_lease_file, + dhcp_options=m_dhcp_options), + self.m_shim.call_args) + + +class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase): + + def setUp(self): + super( + TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.m_shim = patches.enter_context( + mock.patch.object(azure_helper, 'WALinuxAgentShim')) + + def test_success_calls_clean_up(self): + azure_helper.report_failure_to_fabric() + self.assertEqual( + 1, + self.m_shim.return_value.clean_up.call_count) + + def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up( + self): + self.m_shim.return_value.register_with_azure_and_report_failure \ + .side_effect = SentinelException + self.assertRaises(SentinelException, + azure_helper.report_failure_to_fabric) + self.assertEqual( + 1, + self.m_shim.return_value.clean_up.call_count) + + def test_report_failure_to_fabric_with_desc_calls_shim_report_failure( + self): + azure_helper.report_failure_to_fabric(description='TestDesc') + self.m_shim.return_value.register_with_azure_and_report_failure \ + .assert_called_once_with(description='TestDesc') + + def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure( + self): + azure_helper.report_failure_to_fabric() + # default err message description should be shown to the user + # if no description is passed in + self.m_shim.return_value.register_with_azure_and_report_failure \ + .assert_called_once_with( + description=azure_helper + .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + + def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure( + self): + azure_helper.report_failure_to_fabric(description='') + # default err message description should be shown to the user + # if an empty description is passed in + self.m_shim.return_value.register_with_azure_and_report_failure \ + .assert_called_once_with( + description=azure_helper + .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) + + def test_instantiates_shim_with_kwargs(self): + m_fallback_lease_file = mock.MagicMock() + m_dhcp_options = mock.MagicMock() + azure_helper.report_failure_to_fabric( + fallback_lease_file=m_fallback_lease_file, + dhcp_opts=m_dhcp_options) + self.m_shim.assert_called_once_with( + fallback_lease_file=m_fallback_lease_file, + dhcp_options=m_dhcp_options) + + +class TestExtractIpAddressFromNetworkd(CiTestCase): + + azure_lease = dedent("""\ + # This is private data. Do not parse. + ADDRESS=10.132.0.5 + NETMASK=255.255.255.255 + ROUTER=10.132.0.1 + SERVER_ADDRESS=169.254.169.254 + NEXT_SERVER=10.132.0.1 + MTU=1460 + T1=43200 + T2=75600 + LIFETIME=86400 + DNS=169.254.169.254 + NTP=169.254.169.254 + DOMAINNAME=c.ubuntu-foundations.internal + DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal + HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal + ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 + CLIENTID=ff405663a200020000ab11332859494d7a8b4c + OPTION_245=624c3620 + """) + + def setUp(self): + super(TestExtractIpAddressFromNetworkd, self).setUp() + self.lease_d = self.tmp_dir() + + def test_no_valid_leases_is_none(self): + """No valid leases should return None.""" + self.assertIsNone( + wa_shim._networkd_get_value_from_leases(self.lease_d)) + + def test_option_245_is_found_in_single(self): + """A single valid lease with 245 option should return it.""" + populate_dir(self.lease_d, {'9': self.azure_lease}) + self.assertEqual( + '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d)) + + def test_option_245_not_found_returns_None(self): + """A valid lease, but no option 245 should return None.""" + populate_dir( + self.lease_d, + {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")}) + self.assertIsNone( + wa_shim._networkd_get_value_from_leases(self.lease_d)) + + def test_multiple_returns_first(self): + """Somewhat arbitrarily return the first address when multiple. + + Most important at the moment is that this is consistent behavior + rather than changing randomly as in order of a dictionary.""" + myval = "624c3601" + populate_dir( + self.lease_d, + {'9': self.azure_lease, + '2': self.azure_lease.replace("624c3620", myval)}) + self.assertEqual( + myval, wa_shim._networkd_get_value_from_leases(self.lease_d)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py new file mode 100644 index 00000000..2eae16ee --- /dev/null +++ b/tests/unittests/sources/test_cloudsigma.py @@ -0,0 +1,137 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy + +from cloudinit.cs_utils import Cepko +from cloudinit import distros +from cloudinit import helpers +from cloudinit import sources +from cloudinit.sources import DataSourceCloudSigma + +from tests.unittests import helpers as test_helpers + +SERVER_CONTEXT = { + "cpu": 1000, + "cpus_instead_of_cores": False, + "global_context": {"some_global_key": "some_global_val"}, + "mem": 1073741824, + "meta": { + "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe", + "cloudinit-user-data": "#cloud-config\n\n...", + }, + "name": "test_server", + "requirements": [], + "smp": 1, + "tags": ["much server", "very performance"], + "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890", + "vnc_password": "9e84d6cb49e46379", + "vendor_data": { + "location": "zrh", + "cloudinit": "#cloud-config\n\n...", + } +} + +DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma' + + +class CepkoMock(Cepko): + def __init__(self, mocked_context): + self.result = mocked_context + + def all(self): + return self + + +class DataSourceCloudSigmaTest(test_helpers.CiTestCase): + def setUp(self): + super(DataSourceCloudSigmaTest, self).setUp() + self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.add_patch(DS_PATH + '.is_running_in_cloudsigma', + "m_is_container", return_value=True) + + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", cfg={}, paths=self.paths) + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( + sys_cfg={}, distro=distro, paths=self.paths) + self.datasource.cepko = CepkoMock(SERVER_CONTEXT) + + def test_get_hostname(self): + self.datasource.get_data() + self.assertEqual("test_server", self.datasource.get_hostname()) + self.datasource.metadata['name'] = '' + self.assertEqual("65b2fb23", self.datasource.get_hostname()) + utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8') + self.datasource.metadata['name'] = utf8_hostname + self.assertEqual("65b2fb23", self.datasource.get_hostname()) + + def test_get_public_ssh_keys(self): + self.datasource.get_data() + self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']], + self.datasource.get_public_ssh_keys()) + + def test_get_instance_id(self): + self.datasource.get_data() + self.assertEqual(SERVER_CONTEXT['uuid'], + self.datasource.get_instance_id()) + + def test_platform(self): + """All platform-related attributes are set.""" + self.datasource.get_data() + self.assertEqual(self.datasource.cloud_name, 'cloudsigma') + self.assertEqual(self.datasource.platform_type, 'cloudsigma') + self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') + + def test_metadata(self): + self.datasource.get_data() + self.assertEqual(self.datasource.metadata, SERVER_CONTEXT) + + def test_user_data(self): + self.datasource.get_data() + self.assertEqual(self.datasource.userdata_raw, + SERVER_CONTEXT['meta']['cloudinit-user-data']) + + def test_encoded_user_data(self): + encoded_context = copy.deepcopy(SERVER_CONTEXT) + encoded_context['meta']['base64_fields'] = 'cloudinit-user-data' + encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK' + self.datasource.cepko = CepkoMock(encoded_context) + self.datasource.get_data() + + self.assertEqual(self.datasource.userdata_raw, b'hi world\n') + + def test_vendor_data(self): + self.datasource.get_data() + self.assertEqual(self.datasource.vendordata_raw, + SERVER_CONTEXT['vendor_data']['cloudinit']) + + def test_lack_of_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"] + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) + + def test_lack_of_cloudinit_key_in_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"]["cloudinit"] + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) + + +class DsLoads(test_helpers.TestCase): + def test_get_datasource_list_returns_in_local(self): + deps = (sources.DEP_FILESYSTEM,) + ds_list = DataSourceCloudSigma.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceCloudSigma.DataSourceCloudSigma]) + + def test_list_sources_finds_ds(self): + found = sources.list_sources( + ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources']) + self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], + found) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py new file mode 100644 index 00000000..2b1a1b70 --- /dev/null +++ b/tests/unittests/sources/test_cloudstack.py @@ -0,0 +1,186 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import helpers +from cloudinit import util +from cloudinit.sources.DataSourceCloudStack import ( + DataSourceCloudStack, get_latest_lease) + +from tests.unittests.helpers import CiTestCase, ExitStack, mock + +import os +import time + +MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' +DS_PATH = MOD_PATH + '.DataSourceCloudStack' + + +class TestCloudStackPasswordFetching(CiTestCase): + + def setUp(self): + super(TestCloudStackPasswordFetching, self).setUp() + self.patches = ExitStack() + self.addCleanup(self.patches.close) + mod_name = MOD_PATH + self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) + self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) + default_gw = "192.201.20.0" + get_latest_lease = mock.MagicMock(return_value=None) + self.patches.enter_context(mock.patch( + mod_name + '.get_latest_lease', get_latest_lease)) + + get_default_gw = mock.MagicMock(return_value=default_gw) + self.patches.enter_context(mock.patch( + mod_name + '.get_default_gateway', get_default_gw)) + + get_networkd_server_address = mock.MagicMock(return_value=None) + self.patches.enter_context(mock.patch( + mod_name + '.dhcp.networkd_get_option_from_leases', + get_networkd_server_address)) + self.tmp = self.tmp_dir() + + def _set_password_server_response(self, response_string): + subp = mock.MagicMock(return_value=(response_string, '')) + self.patches.enter_context( + mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp', + subp)) + return subp + + def test_empty_password_doesnt_create_config(self): + self._set_password_server_response('') + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertEqual({}, ds.get_config_obj()) + + def test_saved_password_doesnt_create_config(self): + self._set_password_server_response('saved_password') + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertEqual({}, ds.get_config_obj()) + + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_password_sets_password(self, m_wait): + m_wait.return_value = True + password = 'SekritSquirrel' + self._set_password_server_response(password) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertEqual(password, ds.get_config_obj()['password']) + + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): + m_wait.return_value = True + self._set_password_server_response('bad_request') + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + self.assertTrue(ds.get_data()) + + def assertRequestTypesSent(self, subp, expected_request_types): + request_types = [] + for call in subp.call_args_list: + args = call[0][0] + for arg in args: + if arg.startswith('DomU_Request'): + request_types.append(arg.split()[1]) + self.assertEqual(expected_request_types, request_types) + + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_valid_response_means_password_marked_as_saved(self, m_wait): + m_wait.return_value = True + password = 'SekritSquirrel' + subp = self._set_password_server_response(password) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + ds.get_data() + self.assertRequestTypesSent(subp, + ['send_my_password', 'saved_password']) + + def _check_password_not_saved_for(self, response_string): + subp = self._set_password_server_response(response_string) + ds = DataSourceCloudStack( + {}, None, helpers.Paths({'run_dir': self.tmp})) + with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: + m_wait.return_value = True + ds.get_data() + self.assertRequestTypesSent(subp, ['send_my_password']) + + def test_password_not_saved_if_empty(self): + self._check_password_not_saved_for('') + + def test_password_not_saved_if_already_saved(self): + self._check_password_not_saved_for('saved_password') + + def test_password_not_saved_if_bad_request(self): + self._check_password_not_saved_for('bad_request') + + +class TestGetLatestLease(CiTestCase): + + def _populate_dir_list(self, bdir, files): + """populate_dir_list([(name, data), (name, data)]) + + writes files to bdir, and updates timestamps to ensure + that their mtime increases with each file.""" + + start = int(time.time()) + for num, fname in enumerate(reversed(files)): + fpath = os.path.sep.join((bdir, fname)) + util.write_file(fpath, fname.encode()) + os.utime(fpath, (start - num, start - num)) + + def _pop_and_test(self, files, expected): + lease_d = self.tmp_dir() + self._populate_dir_list(lease_d, files) + self.assertEqual(self.tmp_path(expected, lease_d), + get_latest_lease(lease_d)) + + def test_skips_dhcpv6_files(self): + """files started with dhclient6 should be skipped.""" + expected = "dhclient.lease" + self._pop_and_test([expected, "dhclient6.lease"], expected) + + def test_selects_dhclient_dot_files(self): + """files named dhclient.lease or dhclient.leases should be used. + + Ubuntu names files dhclient.eth0.leases dhclient6.leases and + sometimes dhclient.leases.""" + self._pop_and_test(["dhclient.lease"], "dhclient.lease") + self._pop_and_test(["dhclient.leases"], "dhclient.leases") + + def test_selects_dhclient_dash_files(self): + """files named dhclient-lease or dhclient-leases should be used. + + Redhat/Centos names files with dhclient--eth0.lease (centos 7) or + dhclient-eth0.leases (centos 6). + """ + self._pop_and_test(["dhclient-eth0.lease"], "dhclient-eth0.lease") + self._pop_and_test(["dhclient--eth0.lease"], "dhclient--eth0.lease") + + def test_ignores_by_extension(self): + """only .lease or .leases file should be considered.""" + + self._pop_and_test(["dhclient.lease", "dhclient.lease.bk", + "dhclient.lease-old", "dhclient.leaselease"], + "dhclient.lease") + + def test_selects_newest_matching(self): + """If multiple files match, the newest written should be used.""" + lease_d = self.tmp_dir() + valid_1 = "dhclient.leases" + valid_2 = "dhclient.lease" + valid_1_path = self.tmp_path(valid_1, lease_d) + valid_2_path = self.tmp_path(valid_2, lease_d) + + self._populate_dir_list(lease_d, [valid_1, valid_2]) + self.assertEqual(valid_2_path, get_latest_lease(lease_d)) + + # now update mtime on valid_2 to be older than valid_1 and re-check. + mtime = int(os.path.getmtime(valid_1_path)) - 1 + os.utime(valid_2_path, (mtime, mtime)) + + self.assertEqual(valid_1_path, get_latest_lease(lease_d)) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_common.py b/tests/unittests/sources/test_common.py new file mode 100644 index 00000000..bb8fa530 --- /dev/null +++ b/tests/unittests/sources/test_common.py @@ -0,0 +1,121 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import settings +from cloudinit import sources +from cloudinit import type_utils +from cloudinit.sources import ( + DataSource, + DataSourceAliYun as AliYun, + DataSourceAltCloud as AltCloud, + DataSourceAzure as Azure, + DataSourceBigstep as Bigstep, + DataSourceCloudSigma as CloudSigma, + DataSourceCloudStack as CloudStack, + DataSourceConfigDrive as ConfigDrive, + DataSourceDigitalOcean as DigitalOcean, + DataSourceEc2 as Ec2, + DataSourceExoscale as Exoscale, + DataSourceGCE as GCE, + DataSourceHetzner as Hetzner, + DataSourceIBMCloud as IBMCloud, + DataSourceLXD as LXD, + DataSourceMAAS as MAAS, + DataSourceNoCloud as NoCloud, + DataSourceOpenNebula as OpenNebula, + DataSourceOpenStack as OpenStack, + DataSourceOracle as Oracle, + DataSourceOVF as OVF, + DataSourceRbxCloud as RbxCloud, + DataSourceScaleway as Scaleway, + DataSourceSmartOS as SmartOS, + DataSourceUpCloud as UpCloud, + DataSourceVultr as Vultr, + DataSourceVMware as VMware, +) +from cloudinit.sources import DataSourceNone as DSNone + +from tests.unittests import helpers as test_helpers + +DEFAULT_LOCAL = [ + Azure.DataSourceAzure, + CloudSigma.DataSourceCloudSigma, + ConfigDrive.DataSourceConfigDrive, + DigitalOcean.DataSourceDigitalOcean, + GCE.DataSourceGCELocal, + Hetzner.DataSourceHetzner, + IBMCloud.DataSourceIBMCloud, + LXD.DataSourceLXD, + NoCloud.DataSourceNoCloud, + OpenNebula.DataSourceOpenNebula, + Oracle.DataSourceOracle, + OVF.DataSourceOVF, + SmartOS.DataSourceSmartOS, + Vultr.DataSourceVultr, + Ec2.DataSourceEc2Local, + OpenStack.DataSourceOpenStackLocal, + RbxCloud.DataSourceRbxCloud, + Scaleway.DataSourceScaleway, + UpCloud.DataSourceUpCloudLocal, + VMware.DataSourceVMware, +] + +DEFAULT_NETWORK = [ + AliYun.DataSourceAliYun, + AltCloud.DataSourceAltCloud, + Bigstep.DataSourceBigstep, + CloudStack.DataSourceCloudStack, + DSNone.DataSourceNone, + Ec2.DataSourceEc2, + Exoscale.DataSourceExoscale, + GCE.DataSourceGCE, + MAAS.DataSourceMAAS, + NoCloud.DataSourceNoCloudNet, + OpenStack.DataSourceOpenStack, + OVF.DataSourceOVFNet, + UpCloud.DataSourceUpCloud, + VMware.DataSourceVMware, +] + + +class ExpectedDataSources(test_helpers.TestCase): + builtin_list = settings.CFG_BUILTIN['datasource_list'] + deps_local = [sources.DEP_FILESYSTEM] + deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] + pkg_list = [type_utils.obj_name(sources)] + + def test_expected_default_local_sources_found(self): + found = sources.list_sources( + self.builtin_list, self.deps_local, self.pkg_list) + self.assertEqual(set(DEFAULT_LOCAL), set(found)) + + def test_expected_default_network_sources_found(self): + found = sources.list_sources( + self.builtin_list, self.deps_network, self.pkg_list) + self.assertEqual(set(DEFAULT_NETWORK), set(found)) + + def test_expected_nondefault_network_sources_found(self): + found = sources.list_sources( + ['AliYun'], self.deps_network, self.pkg_list) + self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) + + +class TestDataSourceInvariants(test_helpers.TestCase): + def test_data_sources_have_valid_network_config_sources(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + for cfg_src in ds.network_config_sources: + fail_msg = ('{} has an invalid network_config_sources entry:' + ' {}'.format(str(ds), cfg_src)) + self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), + fail_msg) + + def test_expected_dsname_defined(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + fail_msg = ( + '{} has an invalid / missing dsname property: {}'.format( + str(ds), str(ds.dsname) + ) + ) + self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg) + self.assertIsNotNone(ds.dsname) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py new file mode 100644 index 00000000..775d0622 --- /dev/null +++ b/tests/unittests/sources/test_configdrive.py @@ -0,0 +1,844 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from copy import copy, deepcopy +import json +import os + +from cloudinit import helpers +from cloudinit.net import eni +from cloudinit.net import network_state +from cloudinit import settings +from cloudinit.sources import DataSourceConfigDrive as ds +from cloudinit.sources.helpers import openstack +from cloudinit import util + +from tests.unittests.helpers import CiTestCase, ExitStack, mock, populate_dir + + +PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +EC2_META = { + 'ami-id': 'ami-00000001', + 'ami-launch-index': 0, + 'ami-manifest-path': 'FIXME', + 'block-device-mapping': { + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3'}, + 'hostname': 'sm-foo-test.novalocal', + 'instance-action': 'none', + 'instance-id': 'i-00000001', + 'instance-type': 'm1.tiny', + 'local-hostname': 'sm-foo-test.novalocal', + 'local-ipv4': None, + 'placement': {'availability-zone': 'nova'}, + 'public-hostname': 'sm-foo-test.novalocal', + 'public-ipv4': '', + 'public-keys': {'0': {'openssh-key': PUBKEY}}, + 'reservation-id': 'r-iru5qm4m', + 'security-groups': ['default'] +} +USER_DATA = b'#!/bin/sh\necho This is user data\n' +OSTACK_META = { + 'availability_zone': 'nova', + 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, + {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], + 'hostname': 'sm-foo-test.novalocal', + 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} + +CONTENT_0 = b'This is contents of /etc/foo.cfg\n' +CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' +NETWORK_DATA = { + 'services': [ + {'type': 'dns', 'address': '199.204.44.24'}, + {'type': 'dns', 'address': '199.204.47.54'} + ], + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc', + 'ethernet_mac_address': 'fa:16:3e:05:30:fe', + 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'} + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp', + 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5', + 'id': 'network2'} + ] +} + +NETWORK_DATA_2 = { + "services": [ + {"type": "dns", "address": "1.1.1.191"}, + {"type": "dns", "address": "1.1.1.4"}], + "networks": [ + {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4", + "netmask": "255.255.255.248", "link": "eth0", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "2.2.2.9"}], + "ip_address": "2.2.2.10", "id": "network0-ipv4"}, + {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4", + "netmask": "255.255.255.224", "link": "eth1", + "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}], + "links": [ + {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500, + "type": "vif", "id": "eth0", "vif_id": "vif-foo1"}, + {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500, + "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}] +} + +# This network data ha 'tap' or null type for a link. +NETWORK_DATA_3 = { + "services": [{"type": "dns", "address": "172.16.36.11"}, + {"type": "dns", "address": "172.16.36.12"}], + "networks": [ + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18", + "id": "network0", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.17.48.1"}]}, + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::", + "link": "tap77a0dc5b-72", + "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", + "id": "network1", + "routes": [{"netmask": "::", "network": "::", + "gateway": "fdb8:52d0:9d14::1"}]}, + {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13", + "id": "network2", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.16.48.1"}, + {"netmask": "255.255.0.0", "network": "172.16.0.0", + "gateway": "172.16.48.1"}]}], + "links": [ + {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None, + "type": "tap", "id": "tap77a0dc5b-72", + "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"}, + {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None, + "type": None, "id": "tap7d6b7bec-93", + "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"} + ] +} + +BOND_MAC = "fa:16:3e:b3:72:36" +NETWORK_DATA_BOND = { + "services": [ + {"type": "dns", "address": "1.1.1.191"}, + {"type": "dns", "address": "1.1.1.4"}, + ], + "networks": [ + {"id": "network2-ipv4", "ip_address": "2.2.2.13", + "link": "vlan2", "netmask": "255.255.255.248", + "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", + "type": "ipv4", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "2.2.2.9"}]}, + {"id": "network3-ipv4", "ip_address": "10.0.1.5", + "link": "vlan3", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "0c:c4:7a:34:6e:3c", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "0c:c4:7a:34:6e:3d", + "id": "eth1", "mtu": 1500, "type": "phy"}, + {"bond_links": ["eth0", "eth1"], + "bond_miimon": 100, "bond_mode": "4", + "bond_xmit_hash_policy": "layer3+4", + "ethernet_mac_address": BOND_MAC, + "id": "bond0", "type": "bond"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan2", "type": "vlan", "vlan_id": 602, + "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + {"ethernet_mac_address": "fa:16:3e:66:ab:a6", + "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0", + "vlan_mac_address": "fa:16:3e:66:ab:a6"} + ] +} + +NETWORK_DATA_VLAN = { + "services": [{"type": "dns", "address": "1.1.1.191"}], + "networks": [ + {"id": "network1-ipv4", "ip_address": "10.0.1.5", + "link": "vlan1", "netmask": "255.255.255.248", + "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", + "type": "ipv4", + "routes": [{"netmask": "255.255.255.255", + "network": "192.168.1.0", "gateway": "10.0.1.1"}]} + ], + "links": [ + {"ethernet_mac_address": "fa:16:3e:69:b0:58", + "id": "eth0", "mtu": 1500, "type": "phy"}, + {"ethernet_mac_address": "fa:16:3e:b3:72:30", + "id": "vlan1", "type": "vlan", "vlan_id": 602, + "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, + ] +} + +KNOWN_MACS = { + 'fa:16:3e:69:b0:58': 'enp0s1', + 'fa:16:3e:d4:57:ad': 'enp0s2', + 'fa:16:3e:dd:50:9a': 'foo1', + 'fa:16:3e:a8:14:69': 'foo2', + 'fa:16:3e:ed:9a:59': 'foo3', + '0c:c4:7a:34:6e:3d': 'oeth1', + '0c:c4:7a:34:6e:3c': 'oeth0', +} + +CFG_DRIVE_FILES_V2 = { + 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), + 'ec2/2009-04-04/user-data': USER_DATA, + 'ec2/latest/meta-data.json': json.dumps(EC2_META), + 'ec2/latest/user-data': USER_DATA, + 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META), + 'openstack/2012-08-10/user_data': USER_DATA, + 'openstack/content/0000': CONTENT_0, + 'openstack/content/0001': CONTENT_1, + 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), + 'openstack/latest/user_data': USER_DATA, + 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA), + 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META), + 'openstack/2015-10-15/user_data': USER_DATA, + 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} + +M_PATH = "cloudinit.sources.DataSourceConfigDrive." + + +class TestConfigDriveDataSource(CiTestCase): + + def setUp(self): + super(TestConfigDriveDataSource, self).setUp() + self.add_patch( + M_PATH + "util.find_devs_with", + "m_find_devs_with", return_value=[]) + self.tmp = self.tmp_dir() + + def test_ec2_metadata(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + found = ds.read_config_drive(self.tmp) + self.assertTrue('ec2-metadata' in found) + ec2_md = found['ec2-metadata'] + self.assertEqual(EC2_META, ec2_md) + + def test_dev_os_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + cfg_ds.metadata = found['metadata'] + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + with ExitStack() as mocks: + provided_name = dev_name[len('/dev/'):] + provided_name = "s" + provided_name[1:] + find_mock = mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + return_value=[provided_name])) + # We want os.path.exists() to return False on its first call, + # and True on its second call. We use a handy generator as + # the mock side effect for this. The mocked function returns + # what the side effect returns. + + def exists_side_effect(): + yield False + yield True + exists_mock = mocks.enter_context( + mock.patch.object(os.path, 'exists', + side_effect=exists_side_effect())) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + + find_mock.assert_called_once_with(mock.ANY) + self.assertEqual(exists_mock.call_count, 2) + + def test_dev_os_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + os_md = found['metadata'] + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + with ExitStack() as mocks: + find_mock = mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + return_value=[dev_name])) + exists_mock = mocks.enter_context( + mock.patch.object(os.path, 'exists', + return_value=True)) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + + find_mock.assert_called_once_with(mock.ANY) + exists_mock.assert_called_once_with(mock.ANY) + + def test_dev_ec2_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + # We want os.path.exists() to return False on its first call, + # and True on its second call. We use a handy generator as + # the mock side effect for this. The mocked function returns + # what the side effect returns. + def exists_side_effect(): + yield False + yield True + with mock.patch.object(os.path, 'exists', + side_effect=exists_side_effect()): + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + # We don't assert the call count for os.path.exists() because + # not all of the entries in name_tests results in two calls to + # that function. Specifically, 'root2k' doesn't seem to call + # it at all. + + def test_dev_ec2_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive(self.tmp) + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/sda1', + 'root': '/dev/sda1', + 'ephemeral0': '/dev/sda2', + 'swap': '/dev/sda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + with mock.patch.object(os.path, 'exists', return_value=True): + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) + + def test_dir_valid(self): + """Verify a dir is read as such.""" + + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + + found = ds.read_config_drive(self.tmp) + + expected_md = copy(OSTACK_META) + expected_md['instance-id'] = expected_md['uuid'] + expected_md['local-hostname'] = expected_md['hostname'] + + self.assertEqual(USER_DATA, found['userdata']) + self.assertEqual(expected_md, found['metadata']) + self.assertEqual(NETWORK_DATA, found['networkdata']) + self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0) + self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1) + + def test_seed_dir_valid_extra(self): + """Verify extra files do not affect datasource validity.""" + + data = copy(CFG_DRIVE_FILES_V2) + data["myfoofile.txt"] = "myfoocontent" + data["openstack/latest/random-file.txt"] = "random-content" + + populate_dir(self.tmp, data) + + found = ds.read_config_drive(self.tmp) + + expected_md = copy(OSTACK_META) + expected_md['instance-id'] = expected_md['uuid'] + expected_md['local-hostname'] = expected_md['hostname'] + + self.assertEqual(expected_md, found['metadata']) + + def test_seed_dir_bad_json_metadata(self): + """Verify that bad json in metadata raises BrokenConfigDriveDir.""" + data = copy(CFG_DRIVE_FILES_V2) + + data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}" + data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}" + data["openstack/latest/meta_data.json"] = "non-json garbage {}" + + populate_dir(self.tmp, data) + + self.assertRaises(openstack.BrokenMetadata, + ds.read_config_drive, self.tmp) + + def test_seed_dir_no_configdrive(self): + """Verify that no metadata raises NonConfigDriveDir.""" + + my_d = os.path.join(self.tmp, "non-configdrive") + data = copy(CFG_DRIVE_FILES_V2) + data["myfoofile.txt"] = "myfoocontent" + data["openstack/latest/random-file.txt"] = "random-content" + data["content/foo"] = "foocontent" + + self.assertRaises(openstack.NonReadable, + ds.read_config_drive, my_d) + + def test_seed_dir_missing(self): + """Verify that missing seed_dir raises NonConfigDriveDir.""" + my_d = os.path.join(self.tmp, "nonexistantdirectory") + self.assertRaises(openstack.NonReadable, + ds.read_config_drive, my_d) + + def test_find_candidates(self): + devs_with_answers = {} + + def my_devs_with(*args, **kwargs): + criteria = args[0] if len(args) else kwargs.pop('criteria', None) + return devs_with_answers.get(criteria, []) + + def my_is_partition(dev): + return dev[-1] in "0123456789" and not dev.startswith("sr") + + try: + orig_find_devs_with = util.find_devs_with + util.find_devs_with = my_devs_with + + orig_is_partition = util.is_partition + util.is_partition = my_is_partition + + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=config-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + + # add a vfat item + # zdd reverse sorts after vdb, but config-2 label is preferred + devs_with_answers['TYPE=vfat'] = ["/dev/zdd"] + self.assertEqual(["/dev/vdb", "/dev/zdd"], + ds.find_candidate_devs()) + + # verify that partitions are considered, that have correct label. + devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], + "TYPE=iso9660": [], + "LABEL=config-2": ["/dev/vdb3"]} + self.assertEqual(["/dev/vdb3"], + ds.find_candidate_devs()) + + # Verify that uppercase labels are also found. + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=CONFIG-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + + finally: + util.find_devs_with = orig_find_devs_with + util.is_partition = orig_is_partition + + @mock.patch(M_PATH + 'on_first_boot') + def test_pubkeys_v2(self, on_first_boot): + """Verify that public-keys work in config-drive-v2.""" + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) + self.assertEqual(myds.get_public_ssh_keys(), + [OSTACK_META['public_keys']['mykey']]) + self.assertEqual('configdrive', myds.cloud_name) + self.assertEqual('openstack', myds.platform) + self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) + + def test_subplatform_config_drive_when_starts_with_dev(self): + """subplatform reports config-drive when source starts with /dev/.""" + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: + with mock.patch(M_PATH + 'util.mount_cb'): + with mock.patch(M_PATH + 'on_first_boot'): + m_find_devs.return_value = ['/dev/anything'] + self.assertEqual(True, cfg_ds.get_data()) + self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestNetJson(CiTestCase): + def setUp(self): + super(TestNetJson, self).setUp() + self.tmp = self.tmp_dir() + self.maxDiff = None + + @mock.patch(M_PATH + 'on_first_boot') + def test_network_data_is_found(self, on_first_boot): + """Verify that network_data is present in ds in config-drive-v2.""" + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) + self.assertIsNotNone(myds.network_json) + + @mock.patch(M_PATH + 'on_first_boot') + def test_network_config_is_converted(self, on_first_boot): + """Verify that network_data is converted and present on ds object.""" + myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) + network_config = openstack.convert_net_json(NETWORK_DATA, + known_macs=KNOWN_MACS) + self.assertEqual(myds.network_config, network_config) + + def test_network_config_conversion_dhcp6(self): + """Test some ipv6 input network json and check the expected + conversions.""" + in_data = { + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + ] + } + out_data = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:69:b0:58', + 'mtu': None, + 'name': 'enp0s1', + 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], + 'type': 'physical'}, + {'mac_address': 'fa:16:3e:d4:57:ad', + 'mtu': None, + 'name': 'enp0s2', + 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], + 'type': 'physical', + 'accept-ra': True} + ], + } + conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + + def test_network_config_conversions(self): + """Tests a bunch of input network json and checks the + expected conversions.""" + in_datas = [ + NETWORK_DATA, + { + 'services': [{'type': 'dns', 'address': '172.19.0.12'}], + 'networks': [{ + 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', + 'type': 'ipv4', + 'netmask': '255.255.252.0', + 'link': 'tap1a81968a-79', + 'routes': [{ + 'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '172.19.3.254', + }], + 'ip_address': '172.19.1.34', + 'id': 'network0', + }], + 'links': [{ + 'type': 'bridge', + 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', + 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', + 'id': 'tap1a81968a-79', + 'mtu': None, + }], + }, + ] + out_datas = [ + { + 'version': 1, + 'config': [ + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:69:b0:58', + 'name': 'enp0s1', + 'mtu': None, + }, + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:d4:57:ad', + 'name': 'enp0s2', + 'mtu': None, + }, + { + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical', + 'mac_address': 'fa:16:3e:05:30:fe', + 'name': 'nic0', + 'mtu': None, + }, + { + 'type': 'nameserver', + 'address': '199.204.44.24', + }, + { + 'type': 'nameserver', + 'address': '199.204.47.54', + } + ], + + }, + { + 'version': 1, + 'config': [ + { + 'name': 'foo3', + 'mac_address': 'fa:16:3e:ed:9a:59', + 'mtu': None, + 'type': 'physical', + 'subnets': [ + { + 'address': '172.19.1.34', + 'netmask': '255.255.252.0', + 'type': 'static', + 'ipv4': True, + 'routes': [{ + 'gateway': '172.19.3.254', + 'netmask': '0.0.0.0', + 'network': '0.0.0.0', + }], + } + ] + }, + { + 'type': 'nameserver', + 'address': '172.19.0.12', + } + ], + }, + ] + for in_data, out_data in zip(in_datas, out_datas): + conv_data = openstack.convert_net_json(in_data, + known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestConvertNetworkData(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestConvertNetworkData, self).setUp() + self.tmp = self.tmp_dir() + + def _getnames_in_config(self, ncfg): + return set([n['name'] for n in ncfg['config'] + if n['type'] == 'physical']) + + def test_conversion_fills_names(self): + ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS) + expected = set(['nic0', 'enp0s1', 'enp0s2']) + found = self._getnames_in_config(ncfg) + self.assertEqual(found, expected) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac): + macs = KNOWN_MACS.copy() + macs.update({'fa:16:3e:05:30:fe': 'foonic1', + 'fa:16:3e:69:b0:58': 'ens1'}) + get_interfaces_by_mac.return_value = macs + + ncfg = openstack.convert_net_json(NETWORK_DATA) + expected = set(['nic0', 'ens1', 'enp0s2']) + found = self._getnames_in_config(ncfg) + self.assertEqual(found, expected) + + def test_convert_raises_value_error_on_missing_name(self): + macs = {'aa:aa:aa:aa:aa:00': 'ens1'} + self.assertRaises(ValueError, openstack.convert_net_json, + NETWORK_DATA, known_macs=macs) + + def test_conversion_with_route(self): + ncfg = openstack.convert_net_json(NETWORK_DATA_2, + known_macs=KNOWN_MACS) + # not the best test, but see that we get a route in the + # network config and that it gets rendered to an ENI file + routes = [] + for n in ncfg['config']: + for s in n.get('subnets', []): + routes.extend(s.get('routes', [])) + self.assertIn( + {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'}, + routes) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + network_state.parse_net_config_data(ncfg), target=self.tmp) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + self.assertIn("route add default gw 2.2.2.9", eni_rendering) + + def test_conversion_with_tap(self): + ncfg = openstack.convert_net_json(NETWORK_DATA_3, + known_macs=KNOWN_MACS) + physicals = set() + for i in ncfg['config']: + if i.get('type') == "physical": + physicals.add(i['name']) + self.assertEqual(physicals, set(('foo1', 'foo2'))) + + def test_bond_conversion(self): + # light testing of bond conversion and eni rendering of bond + ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + + eni_renderer.render_network_state( + network_state.parse_net_config_data(ncfg), target=self.tmp) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + # Verify there are expected interfaces in the net config. + interfaces = sorted( + [i['name'] for i in ncfg['config'] + if i['type'] in ('vlan', 'bond', 'physical')]) + self.assertEqual( + sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]), + interfaces) + + words = eni_rendering.split() + # 'eth0' and 'eth1' are the ids. because their mac adresses + # map to other names, we should not see them in the ENI + self.assertNotIn('eth0', words) + self.assertNotIn('eth1', words) + + # oeth0 and oeth1 are the interface names for eni. + # bond0 will be generated for the bond. Each should be auto. + self.assertIn("auto oeth0", eni_rendering) + self.assertIn("auto oeth1", eni_rendering) + self.assertIn("auto bond0", eni_rendering) + # The bond should have the given mac address + pos = eni_rendering.find("auto bond0") + self.assertIn(BOND_MAC, eni_rendering[pos:]) + + def test_vlan(self): + # light testing of vlan config conversion and eni rendering + ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN, + known_macs=KNOWN_MACS) + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + network_state.parse_net_config_data(ncfg), target=self.tmp) + with open(os.path.join(self.tmp, "etc", + "network", "interfaces"), 'r') as f: + eni_rendering = f.read() + + self.assertIn("iface enp0s1", eni_rendering) + self.assertIn("address 10.0.1.5", eni_rendering) + self.assertIn("auto enp0s1.602", eni_rendering) + + def test_mac_addrs_can_be_upper_case(self): + # input mac addresses on rackspace may be upper case + my_netdata = deepcopy(NETWORK_DATA) + for link in my_netdata['links']: + link['ethernet_mac_address'] = link['ethernet_mac_address'].upper() + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + + +def cfg_ds_from_dir(base_d, files=None): + run = os.path.join(base_d, "run") + os.mkdir(run) + cfg_ds = ds.DataSourceConfigDrive( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) + cfg_ds.seed_dir = os.path.join(base_d, "seed") + if files: + populate_dir(cfg_ds.seed_dir, files) + cfg_ds.known_macs = KNOWN_MACS.copy() + if not cfg_ds.get_data(): + raise RuntimeError("Data source did not extract itself from" + " seed directory %s" % cfg_ds.seed_dir) + return cfg_ds + + +def populate_ds_from_read_config(cfg_ds, source, results): + """Patch the DataSourceConfigDrive from the results of + read_config_drive_dir hopefully in line with what it would have + if cfg_ds.get_data had been successfully called""" + cfg_ds.source = source + cfg_ds.metadata = results.get('metadata') + cfg_ds.ec2_metadata = results.get('ec2-metadata') + cfg_ds.userdata_raw = results.get('userdata') + cfg_ds.version = results.get('version') + cfg_ds.network_json = results.get('networkdata') + cfg_ds._network_config = openstack.convert_net_json( + cfg_ds.network_json, known_macs=KNOWN_MACS) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py new file mode 100644 index 00000000..351bf7ba --- /dev/null +++ b/tests/unittests/sources/test_digitalocean.py @@ -0,0 +1,372 @@ +# Copyright (C) 2014 Neal Shrader +# +# Author: Neal Shrader +# Author: Ben Howard +# Author: Scott Moser +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceDigitalOcean +from cloudinit.sources.helpers import digitalocean + +from tests.unittests.helpers import mock, CiTestCase + +DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] +DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" + +# the following JSON was taken from droplet (that's why its a string) +DO_META = json.loads(""" +{ + "droplet_id": "22532410", + "hostname": "utl-96268", + "vendor_data": "vendordata goes here", + "user_data": "userdata goes here", + "public_keys": "", + "auth_key": "authorization_key", + "region": "nyc3", + "interfaces": { + "private": [ + { + "ipv4": { + "ip_address": "10.132.6.205", + "netmask": "255.255.0.0", + "gateway": "10.132.0.1" + }, + "mac": "04:01:57:d1:9e:02", + "type": "private" + } + ], + "public": [ + { + "ipv4": { + "ip_address": "192.0.0.20", + "netmask": "255.255.255.0", + "gateway": "104.236.0.1" + }, + "ipv6": { + "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", + "cidr": 64, + "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" + }, + "anchor_ipv4": { + "ip_address": "10.0.0.5", + "netmask": "255.255.0.0", + "gateway": "10.0.0.1" + }, + "mac": "04:01:57:d1:9e:01", + "type": "public" + } + ] + }, + "floating_ip": { + "ipv4": { + "active": false + } + }, + "dns": { + "nameservers": [ + "2001:4860:4860::8844", + "2001:4860:4860::8888", + "8.8.8.8" + ] + } +} +""") + +# This has no private interface +DO_META_2 = { + "droplet_id": 27223699, + "hostname": "smtest1", + "vendor_data": "\n".join([ + ('"Content-Type: multipart/mixed; ' + 'boundary=\"===============8645434374073493512==\"'), + 'MIME-Version: 1.0', + '', + '--===============8645434374073493512==', + 'MIME-Version: 1.0' + 'Content-Type: text/cloud-config; charset="us-ascii"' + 'Content-Transfer-Encoding: 7bit' + 'Content-Disposition: attachment; filename="cloud-config"' + '', + '#cloud-config', + 'disable_root: false', + 'manage_etc_hosts: true', + '', + '', + '--===============8645434374073493512==' + ]), + "public_keys": [ + "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" + ], + "auth_key": "88888888888888888888888888888888", + "region": "nyc3", + "interfaces": { + "public": [{ + "ipv4": { + "ip_address": "45.55.249.133", + "netmask": "255.255.192.0", + "gateway": "45.55.192.1" + }, + "anchor_ipv4": { + "ip_address": "10.17.0.5", + "netmask": "255.255.0.0", + "gateway": "10.17.0.1" + }, + "mac": "ae:cc:08:7c:88:00", + "type": "public" + }] + }, + "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "tags": None, +} + +DO_META['public_keys'] = DO_SINGLE_KEY + +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return (True, DO_META.get('id')) + + +class TestDataSourceDigitalOcean(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestDataSourceDigitalOcean, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceDigitalOcean.DataSourceDigitalOcean( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds.use_ip4LL = False + if get_sysinfo is not None: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') + def test_returns_false_not_on_docean(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + self.assertTrue(m_read_sysinfo.called) + + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = DO_META.copy() + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) + self.assertEqual(DO_META.get('region'), ds.availability_zone) + self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) + self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) + + # Single key + self.assertEqual([DO_META.get('public_keys')], + ds.get_public_ssh_keys()) + + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_multiple_ssh_keys(self, mock_readmd): + metadata = DO_META.copy() + metadata['public_keys'] = DO_MULTIPLE_KEYS + mock_readmd.return_value = metadata.copy() + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + # Multiple keys + self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestNetworkConvert(CiTestCase): + + def _get_networking(self): + self.m_get_by_mac.return_value = { + '04:01:57:d1:9e:01': 'ens1', + '04:01:57:d1:9e:02': 'ens2', + 'b8:ae:ed:75:5f:9a': 'enp0s25', + 'ae:cc:08:7c:88:00': 'meta2p1'} + netcfg = digitalocean.convert_network_configuration( + DO_META['interfaces'], DO_META['dns']['nameservers']) + self.assertIn('config', netcfg) + return netcfg + + def setUp(self): + super(TestNetworkConvert, self).setUp() + self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') + + def test_networking_defined(self): + netcfg = self._get_networking() + self.assertIsNotNone(netcfg) + dns_defined = False + + for part in netcfg.get('config'): + n_type = part.get('type') + print("testing part ", n_type, "\n", json.dumps(part, indent=3)) + + if n_type == 'nameserver': + n_address = part.get('address') + self.assertIsNotNone(n_address) + self.assertEqual(len(n_address), 3) + + dns_resolvers = DO_META["dns"]["nameservers"] + for x in n_address: + self.assertIn(x, dns_resolvers) + dns_defined = True + + else: + n_subnets = part.get('type') + n_name = part.get('name') + n_mac = part.get('mac_address') + + self.assertIsNotNone(n_type) + self.assertIsNotNone(n_subnets) + self.assertIsNotNone(n_name) + self.assertIsNotNone(n_mac) + + self.assertTrue(dns_defined) + + def _get_nic_definition(self, int_type, expected_name): + """helper function to return if_type (i.e. public) and the expected + name used by cloud-init (i.e eth0)""" + netcfg = self._get_networking() + meta_def = (DO_META.get('interfaces')).get(int_type)[0] + + self.assertEqual(int_type, meta_def.get('type')) + + for nic_def in netcfg.get('config'): + print(nic_def) + if nic_def.get('name') == expected_name: + return nic_def, meta_def + + def _get_match_subn(self, subnets, ip_addr): + """get the matching subnet definition based on ip address""" + for subn in subnets: + address = subn.get('address') + self.assertIsNotNone(address) + + # equals won't work because of ipv6 addressing being in + # cidr notation, i.e fe00::1/64 + if ip_addr in address: + print(json.dumps(subn, indent=3)) + return subn + + def test_correct_gateways_defined(self): + """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" + netcfg = self._get_networking() + gateways = [] + for nic_def in netcfg.get('config'): + if nic_def.get('type') != 'physical': + continue + for subn in nic_def.get('subnets'): + if 'gateway' in subn: + gateways.append(subn.get('gateway')) + + # we should have two gateways, one ipv4 and ipv6 + self.assertEqual(len(gateways), 2) + + # make that the ipv6 gateway is there + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIn(ipv4_def.get('gateway'), gateways) + + # make sure the the ipv6 gateway is there + ipv6_def = meta_def.get('ipv6') + self.assertIn(ipv6_def.get('gateway'), gateways) + + def test_public_interface_defined(self): + """test that the public interface is defined as eth0""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + self.assertEqual('eth0', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def test_private_interface_defined(self): + """test that the private interface is defined as eth1""" + (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') + self.assertEqual('eth1', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def test_public_interface_ipv6(self): + """test public ipv6 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv6_def = meta_def.get('ipv6') + self.assertIsNotNone(ipv6_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv6_def.get('ip_address')) + + cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), + ipv6_def.get('cidr')) + + self.assertEqual(cidr_notated_address, subn_def.get('address')) + self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) + + def test_public_interface_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) + + def test_public_interface_anchor_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('anchor_ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertNotIn('gateway', subn_def) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_convert_without_private(self, m_get_by_mac): + m_get_by_mac.return_value = { + 'b8:ae:ed:75:5f:9a': 'enp0s25', + 'ae:cc:08:7c:88:00': 'meta2p1'} + netcfg = digitalocean.convert_network_configuration( + DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) + + # print(netcfg) + byname = {} + for i in netcfg['config']: + if 'name' in i: + if i['name'] in byname: + raise ValueError("name '%s' in config twice: %s" % + (i['name'], netcfg)) + byname[i['name']] = i + self.assertTrue('eth0' in byname) + self.assertTrue('subnets' in byname['eth0']) + eth0 = byname['eth0'] + self.assertEqual( + sorted(['45.55.249.133', '10.17.0.5']), + sorted([i['address'] for i in eth0['subnets']])) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py new file mode 100644 index 00000000..19c2bbcd --- /dev/null +++ b/tests/unittests/sources/test_ec2.py @@ -0,0 +1,978 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import httpretty +import json +import requests +from unittest import mock + +from cloudinit import helpers +from cloudinit.sources import DataSourceEc2 as ec2 +from tests.unittests import helpers as test_helpers + + +DYNAMIC_METADATA = { + "instance-identity": { + "document": json.dumps({ + "devpayProductCodes": None, + "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], + "availabilityZone": "us-west-2b", + "privateIp": "10.158.112.84", + "version": "2017-09-30", + "instanceId": "my-identity-id", + "billingProducts": None, + "instanceType": "t2.micro", + "accountId": "123456789012", + "imageId": "ami-5fb8c835", + "pendingTime": "2016-11-19T16:32:11Z", + "architecture": "x86_64", + "kernelId": None, + "ramdiskId": None, + "region": "us-west-2" + }) + } +} + + +# collected from api version 2016-09-02/ with +# python3 -c 'import json +# from cloudinit.ec2_utils import get_instance_metadata as gm +# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' +# Note that the MAC addresses have been modified to sort in the opposite order +# to the device-number attribute, to test LP: #1876312 +DEFAULT_METADATA = { + "ami-id": "ami-8b92b4ee", + "ami-launch-index": "0", + "ami-manifest-path": "(unknown)", + "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, + "hostname": "ip-172-31-31-158.us-east-2.compute.internal", + "instance-action": "none", + "instance-id": "i-0a33f80f09c96477f", + "instance-type": "t2.small", + "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", + "local-ipv4": "172.3.3.15", + "mac": "06:17:04:d7:26:09", + "metrics": {"vhostmd": ""}, + "network": { + "interfaces": { + "macs": { + "06:17:04:d7:26:09": { + "device-number": "0", + "interface-id": "eni-e44ef49e", + "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, + "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", + "local-hostname": ("ip-172-3-3-15.us-east-2." + "compute.internal"), + "local-ipv4s": "172.3.3.15", + "mac": "06:17:04:d7:26:09", + "owner-id": "950047163771", + "public-hostname": ("ec2-13-59-77-202.us-east-2." + "compute.amazonaws.com"), + "public-ipv4s": "13.59.77.202", + "security-group-ids": "sg-5a61d333", + "security-groups": "wide-open", + "subnet-id": "subnet-20b8565b", + "subnet-ipv4-cidr-block": "172.31.16.0/20", + "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", + "vpc-id": "vpc-87e72bee", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56" + }, + "06:17:04:d7:26:08": { + "device-number": "1", # Only IPv4 local config + "interface-id": "eni-e44ef49f", + "ipv4-associations": {"": "172.3.3.16"}, + "ipv6s": "", # No IPv6 config + "local-hostname": ("ip-172-3-3-16.us-east-2." + "compute.internal"), + "local-ipv4s": "172.3.3.16", + "mac": "06:17:04:d7:26:08", + "owner-id": "950047163771", + "public-hostname": ("ec2-172-3-3-16.us-east-2." + "compute.amazonaws.com"), + "public-ipv4s": "", # No public ipv4 config + "security-group-ids": "sg-5a61d333", + "security-groups": "wide-open", + "subnet-id": "subnet-20b8565b", + "subnet-ipv4-cidr-block": "172.31.16.0/20", + "subnet-ipv6-cidr-blocks": "", + "vpc-id": "vpc-87e72bee", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc-ipv6-cidr-blocks": "" + } + } + } + }, + "placement": {"availability-zone": "us-east-2b"}, + "profile": "default-hvm", + "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", + "public-ipv4": "13.59.77.202", + "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, + "reservation-id": "r-01efbc9996bac1bd6", + "security-groups": "my-wide-open", + "services": {"domain": "amazonaws.com", "partition": "aws"}, +} + +# collected from api version 2018-09-24/ with +# python3 -c 'import json +# from cloudinit.ec2_utils import get_instance_metadata as gm +# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' + +NIC1_MD_IPV4_IPV6_MULTI_IP = { + "device-number": "0", + "interface-id": "eni-0d6335689899ce9cc", + "ipv4-associations": { + "18.218.219.181": "172.31.44.13" + }, + "ipv6s": [ + "2600:1f16:292:100:c187:593c:4349:136", + "2600:1f16:292:100:f153:12a3:c37c:11f9", + "2600:1f16:292:100:f152:2222:3333:4444" + ], + "local-hostname": ("ip-172-31-44-13.us-east-2." + "compute.internal"), + "local-ipv4s": [ + "172.31.44.13", + "172.31.45.70" + ], + "mac": "0a:07:84:3d:6e:38", + "owner-id": "329910648901", + "public-hostname": ("ec2-18-218-219-181.us-east-2." + "compute.amazonaws.com"), + "public-ipv4s": "18.218.219.181", + "security-group-ids": "sg-0c387755222ba8d2e", + "security-groups": "launch-wizard-4", + "subnet-id": "subnet-9d7ba0d1", + "subnet-ipv4-cidr-block": "172.31.32.0/20", + "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", + "vpc-id": "vpc-a07f62c8", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56" +} + +NIC2_MD = { + "device-number": "1", + "interface-id": "eni-043cdce36ded5e79f", + "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", + "local-ipv4s": "172.31.47.221", + "mac": "0a:75:69:92:e2:16", + "owner-id": "329910648901", + "security-group-ids": "sg-0d68fef37d8cc9b77", + "security-groups": "launch-wizard-17", + "subnet-id": "subnet-9d7ba0d1", + "subnet-ipv4-cidr-block": "172.31.32.0/20", + "vpc-id": "vpc-a07f62c8", + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16" +} + +SECONDARY_IP_METADATA_2018_09_24 = { + "ami-id": "ami-0986c2ac728528ac2", + "ami-launch-index": "0", + "ami-manifest-path": "(unknown)", + "block-device-mapping": { + "ami": "/dev/sda1", + "root": "/dev/sda1" + }, + "events": { + "maintenance": { + "history": "[]", + "scheduled": "[]" + } + }, + "hostname": "ip-172-31-44-13.us-east-2.compute.internal", + "identity-credentials": { + "ec2": { + "info": { + "AccountId": "329910648901", + "Code": "Success", + "LastUpdated": "2019-07-06T14:22:56Z" + } + } + }, + "instance-action": "none", + "instance-id": "i-069e01e8cc43732f8", + "instance-type": "t2.micro", + "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", + "local-ipv4": "172.31.44.13", + "mac": "0a:07:84:3d:6e:38", + "metrics": { + "vhostmd": "" + }, + "network": { + "interfaces": { + "macs": { + "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, + } + } + }, + "placement": { + "availability-zone": "us-east-2c" + }, + "profile": "default-hvm", + "public-hostname": ( + "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"), + "public-ipv4": "18.218.219.181", + "public-keys": { + "yourkeyname,e": [ + "ssh-rsa AAAAW...DZ yourkeyname" + ] + }, + "reservation-id": "r-09b4917135cdd33be", + "security-groups": "launch-wizard-4", + "services": { + "domain": "amazonaws.com", + "partition": "aws" + } +} + +M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.' + + +def _register_ssh_keys(rfunc, base_url, keys_data): + """handle ssh key inconsistencies. + + public-keys in the ec2 metadata is inconsistently formated compared + to other entries. + Given keys_data of {name1: pubkey1, name2: pubkey2} + + This registers the following urls: + base_url 0={name1}\n1={name2} # (for each name) + base_url/ 0={name1}\n1={name2} # (for each name) + base_url/0 openssh-key + base_url/0/ openssh-key + base_url/0/openssh-key {pubkey1} + base_url/0/openssh-key/ {pubkey1} + ... + """ + + base_url = base_url.rstrip("/") + odd_index = '\n'.join( + ["{0}={1}".format(n, name) + for n, name in enumerate(sorted(keys_data))]) + + rfunc(base_url, odd_index) + rfunc(base_url + "/", odd_index) + + for n, name in enumerate(sorted(keys_data)): + val = keys_data[name] + if isinstance(val, list): + val = '\n'.join(val) + burl = base_url + "/%s" % n + rfunc(burl, "openssh-key") + rfunc(burl + "/", "openssh-key") + rfunc(burl + "/%s/openssh-key" % name, val) + rfunc(burl + "/%s/openssh-key/" % name, val) + + +def register_mock_metaserver(base_url, data): + """Register with httpretty a ec2 metadata like service serving 'data'. + + If given a dictionary, it will populate urls under base_url for + that dictionary. For example, input of + {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} + populates + base_url with 'instance-id\nmac' + base_url/ with 'instance-id\nmac' + base_url/instance-id with i-abc + base_url/mac with 00:16:3e:00:00:00 + In the index, references to lists or dictionaries have a trailing /. + """ + def register_helper(register, base_url, body): + if not isinstance(base_url, str): + register(base_url, body) + return + base_url = base_url.rstrip("/") + if isinstance(body, str): + register(base_url, body) + elif isinstance(body, list): + register(base_url, '\n'.join(body) + '\n') + register(base_url + '/', '\n'.join(body) + '\n') + elif isinstance(body, dict): + vals = [] + for k, v in body.items(): + if k == 'public-keys': + _register_ssh_keys( + register, base_url + '/public-keys/', v) + continue + suffix = k.rstrip("/") + if not isinstance(v, (str, list)): + suffix += "/" + vals.append(suffix) + url = base_url + '/' + suffix + register_helper(register, url, v) + register(base_url, '\n'.join(vals) + '\n') + register(base_url + '/', '\n'.join(vals) + '\n') + elif body is None: + register(base_url, 'not found', status=404) + + def myreg(*argc, **kwargs): + url = argc[0] + method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET + return httpretty.register_uri(method, *argc, **kwargs) + + register_helper(myreg, base_url, data) + + +class TestEc2(test_helpers.HttprettyTestCase): + with_logs = True + maxDiff = None + + valid_platform_data = { + 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', + 'uuid_source': 'dmi', + 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', + } + + def setUp(self): + super(TestEc2, self).setUp() + self.datasource = ec2.DataSourceEc2 + self.metadata_addr = self.datasource.metadata_urls[0] + self.tmp = self.tmp_dir() + + def data_url(self, version, data_item='meta-data'): + """Return a metadata url based on the version provided.""" + return '/'.join([self.metadata_addr, version, data_item]) + + def _patch_add_cleanup(self, mpath, *args, **kwargs): + p = mock.patch(mpath, *args, **kwargs) + p.start() + self.addCleanup(p.stop) + + def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): + self.uris = [] + distro = {} + paths = helpers.Paths({'run_dir': self.tmp}) + if sys_cfg is None: + sys_cfg = {} + ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) + if not md_version: + md_version = ds.min_metadata_version + if platform_data is not None: + self._patch_add_cleanup( + "cloudinit.sources.DataSourceEc2._collect_platform_data", + return_value=platform_data) + + if md: + all_versions = ( + [ds.min_metadata_version] + ds.extended_metadata_versions) + token_url = self.data_url('latest', data_item='api/token') + register_mock_metaserver(token_url, 'API-TOKEN') + for version in all_versions: + metadata_url = self.data_url(version) + '/' + if version == md_version: + # Register all metadata for desired version + register_mock_metaserver( + metadata_url, md.get('md', DEFAULT_METADATA)) + userdata_url = self.data_url( + version, data_item='user-data') + register_mock_metaserver(userdata_url, md.get('ud', '')) + identity_url = self.data_url( + version, data_item='dynamic/instance-identity') + register_mock_metaserver( + identity_url, md.get('id', DYNAMIC_METADATA)) + else: + instance_id_url = metadata_url + 'instance-id' + if version == ds.min_metadata_version: + # Add min_metadata_version service availability check + register_mock_metaserver( + instance_id_url, DEFAULT_METADATA['instance-id']) + else: + # Register 404s for all unrequested extended versions + register_mock_metaserver(instance_id_url, None) + return ds + + def test_network_config_property_returns_version_2_network_data(self): + """network_config property returns network version 2 for metadata""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + find_fallback_path = M_PATH_NET + 'find_fallback_nic' + with mock.patch(find_fallback_path) as m_find_fallback: + m_find_fallback.return_value = 'eth9' + ds.get_data() + + mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + with mock.patch(find_fallback_path) as m_find_fallback: + with mock.patch(get_interface_mac_path) as m_get_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + m_find_fallback.return_value = 'eth9' + m_get_mac.return_value = mac1 + self.assertEqual(expected, ds.network_config) + + def test_network_config_property_set_dhcp4(self): + """network_config property configures dhcp4 on nics with local-ipv4s. + + Only one device is configured based on get_interfaces_by_mac even when + multiple MACs exist in metadata. + """ + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + find_fallback_path = M_PATH_NET + 'find_fallback_nic' + with mock.patch(find_fallback_path) as m_find_fallback: + m_find_fallback.return_value = 'eth9' + ds.get_data() + + mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + with mock.patch(find_fallback_path) as m_find_fallback: + with mock.patch(get_interface_mac_path) as m_get_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + m_find_fallback.return_value = 'eth9' + m_get_mac.return_value = mac1 + self.assertEqual(expected, ds.network_config) + + def test_network_config_property_secondary_private_ips(self): + """network_config property configures any secondary ipv4 addresses. + + Only one device is configured based on get_interfaces_by_mac even when + multiple MACs exist in metadata. + """ + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': SECONDARY_IP_METADATA_2018_09_24}) + find_fallback_path = M_PATH_NET + 'find_fallback_nic' + with mock.patch(find_fallback_path) as m_find_fallback: + m_find_fallback.return_value = 'eth9' + ds.get_data() + + mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6 + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': mac1}, 'set-name': 'eth9', + 'addresses': ['172.31.45.70/20', + '2600:1f16:292:100:f152:2222:3333:4444/128', + '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], + 'dhcp4': True, 'dhcp6': True}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + get_interface_mac_path = M_PATH_NET + 'get_interface_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + with mock.patch(find_fallback_path) as m_find_fallback: + with mock.patch(get_interface_mac_path) as m_get_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + m_find_fallback.return_value = 'eth9' + m_get_mac.return_value = mac1 + self.assertEqual(expected, ds.network_config) + + def test_network_config_property_is_cached_in_datasource(self): + """network_config property is cached in DataSourceEc2.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + ds._network_config = {'cached': 'data'} + self.assertEqual({'cached': 'data'}, ds.network_config) + + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): + """Refresh the network_config Ec2 cache if network key is absent. + + This catches an upgrade issue where obj.pkl contained stale metadata + which lacked newly required network key. + """ + old_metadata = copy.deepcopy(DEFAULT_METADATA) + old_metadata.pop('network') + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': old_metadata}) + self.assertTrue(ds.get_data()) + # Provide new revision of metadata that contains network data + register_mock_metaserver( + 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA) + mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA + get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac' + ds.fallback_nic = 'eth9' + with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} + nc = ds.network_config # Will re-crawl network metadata + self.assertIsNotNone(nc) + self.assertIn( + 'Refreshing stale metadata from prior to upgrade', + self.logs.getvalue()) + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual(expected, ds.network_config) + + def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): + """get_instance-id gets DataSourceEc2Local.identity if not present. + + This handles an upgrade case where the old pickled datasource didn't + set up self.identity, but 'systemctl cloud-init init' runs + get_instance_id which traces on missing self.identity. lp:1748354. + """ + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + # Mock 404s on all versions except latest + all_versions = ( + [ds.min_metadata_version] + ds.extended_metadata_versions) + for ver in all_versions[:-1]: + register_mock_metaserver( + 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver), + None) + ds.metadata_address = 'http://169.254.169.254' + register_mock_metaserver( + '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]), + DEFAULT_METADATA) + # Register dynamic/instance-identity document which we now read. + register_mock_metaserver( + '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), + DYNAMIC_METADATA) + ds._cloud_name = ec2.CloudNames.AWS + # Setup cached metadata on the Datasource + ds.metadata = DEFAULT_METADATA + self.assertEqual('my-identity-id', ds.get_instance_id()) + + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + + def test_aws_inaccessible_imds_service_fails_with_retries(self): + """Inaccessibility of http://169.254.169.254 are retried.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=None) + + conn_error = requests.exceptions.ConnectionError( + '[Errno 113] no route to host' + ) + + mock_success = mock.MagicMock(contents=b'fakesuccess') + mock_success.ok.return_value = True + + with mock.patch('cloudinit.url_helper.readurl') as m_readurl: + m_readurl.side_effect = (conn_error, conn_error, mock_success) + with mock.patch('cloudinit.url_helper.time.sleep'): + self.assertTrue(ds.wait_for_metadata_service()) + + # Just one /latest/api/token request + self.assertEqual(3, len(m_readurl.call_args_list)) + for readurl_call in m_readurl.call_args_list: + self.assertIn('latest/api/token', readurl_call[0][0]) + + def test_aws_token_403_fails_without_retries(self): + """Verify that 403s fetching AWS tokens are not retried.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=None) + token_url = self.data_url('latest', data_item='api/token') + httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403) + self.assertFalse(ds.get_data()) + # Just one /latest/api/token request + logs = self.logs.getvalue() + failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0' + expected_logs = [ + 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is' + ' disabled. Aborting.', + "WARNING: IMDS's HTTP endpoint is probably disabled", + failed_put_log + ] + for log in expected_logs: + self.assertIn(log, logs) + self.assertEqual( + 1, + len([line for line in logs.splitlines() if failed_put_log in line]) + ) + + def test_aws_token_redacted(self): + """Verify that aws tokens are redacted when logged.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + all_logs = self.logs.getvalue().splitlines() + REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" + REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" + logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] + logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] + logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] + self.assertEqual(1, len(logs_with_redacted_ttl)) + self.assertEqual(81, len(logs_with_redacted)) + self.assertEqual(0, len(logs_with_token)) + + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_valid_platform_with_strict_true(self, m_dhcp): + """Valid platform data should return true with strict_id true.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(0, m_dhcp.call_count) + self.assertEqual('aws', ds.cloud_name) + self.assertEqual('ec2', ds.platform_type) + self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform) + + def test_valid_platform_with_strict_false(self): + """Valid platform data should return true with strict_id false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertTrue(ret) + + def test_unknown_platform_with_strict_true(self): + """Unknown platform data with strict_id true should return False.""" + uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + ds = self._setup_ds( + platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertFalse(ret) + + def test_unknown_platform_with_strict_false(self): + """Unknown platform data with strict_id false should return True.""" + uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + ds = self._setup_ds( + platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertTrue(ret) + + def test_ec2_local_returns_false_on_non_aws(self): + """DataSourceEc2Local returns False when platform is not AWS.""" + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + platform_attrs = [ + attr for attr in ec2.CloudNames.__dict__.keys() + if not attr.startswith('__')] + for attr_name in platform_attrs: + platform_name = getattr(ec2.CloudNames, attr_name) + if platform_name != 'aws': + ds._cloud_name = platform_name + ret = ds.get_data() + self.assertEqual('ec2', ds.platform_type) + self.assertFalse(ret) + message = ( + "Local Ec2 mode only supported on ('aws',)," + ' not {0}'.format(platform_name)) + self.assertIn(message, self.logs.getvalue()) + + @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') + def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): + """DataSourceEc2Local returns False on BSD. + + FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. + """ + m_is_freebsd.return_value = True + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + ret = ds.get_data() + self.assertFalse(ret) + self.assertIn( + "FreeBSD doesn't support running dhclient with -sf", + self.logs.getvalue()) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') + def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp, + m_fallback_nic, m_net): + """Ec2Local returns True for valid platform data on non-BSD with dhcp. + + DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. + Then the metadata services is crawled for more network config info. + When the platform data is valid, return True. + """ + + m_fallback_nic.return_value = 'eth9' + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'broadcast-address': '192.168.2.255'}] + self.datasource = ec2.DataSourceEc2Local + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + + ret = ds.get_data() + self.assertTrue(ret) + m_dhcp.assert_called_once_with('eth9', None) + m_net.assert_called_once_with( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) + self.assertIn('Crawl of metadata service took', self.logs.getvalue()) + + +class TestGetSecondaryAddresses(test_helpers.CiTestCase): + + mac = '06:17:04:d7:26:ff' + with_logs = True + + def test_md_with_no_secondary_addresses(self): + """Empty list is returned when nic metadata contains no secondary ip""" + self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) + + def test_md_with_secondary_v4_and_v6_addresses(self): + """All secondary addresses are returned from nic metadata""" + self.assertEqual( + ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128', + '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], + ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac)) + + def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): + """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" + invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) + invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected" + invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is" + self.assertEqual( + ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128', + '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], + ec2.get_secondary_addresses(invalid_cidr_md, self.mac)) + expected_logs = [ + "WARNING: Could not parse subnet-ipv4-cidr-block" + " something-unexpected for mac 06:17:04:d7:26:ff." + " ipv4 network config prefix defaults to /24", + "WARNING: Could not parse subnet-ipv6-cidr-block" + " not/sure/what/this/is for mac 06:17:04:d7:26:ff." + " ipv6 network config prefix defaults to /128" + ] + logs = self.logs.getvalue() + for log in expected_logs: + self.assertIn(log, logs) + + +class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): + + def setUp(self): + super(TestConvertEc2MetadataNetworkConfig, self).setUp() + self.mac1 = '06:17:04:d7:26:09' + interface_dict = copy.deepcopy( + DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1]) + # These tests are written assuming the base interface doesn't have IPv6 + interface_dict.pop('ipv6s') + self.network_metadata = { + 'interfaces': {'macs': {self.mac1: interface_dict}}} + + def test_convert_ec2_metadata_network_config_skips_absent_macs(self): + """Any mac absent from metadata is skipped by network config.""" + macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'} + + # DE:AD:BE:EF:FF:FF represented by OS but not in metadata + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + self.network_metadata, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): + """Config dhcp6 when ipv6s is in metadata for a mac.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_ipv6 = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_ipv6['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + nic1_metadata.pop('public-ipv4s') + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): + """Config dhcp4 when there are no public addresses in public-ipv4s.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_ipv6 = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_ipv6['interfaces']['macs'][self.mac1]) + nic1_metadata['local-ipv4s'] = '172.3.3.15' + nic1_metadata.pop('public-ipv4s') + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): + """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_ipv6 = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_ipv6['interfaces']['macs'][self.mac1]) + nic1_metadata['public-ipv4s'] = '' + + # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, macs_to_nics, fallback_nic='eth9')) + + def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): + """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_both = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_both['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + nic1_metadata.pop('public-ipv4s') + nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): + """DHCP route-metric increases on secondary NICs for IPv4 and IPv6.""" + mac2 = '06:17:04:d7:26:08' + macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'} + network_metadata_both = copy.deepcopy(self.network_metadata) + # Add 2nd nic info + network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD + nic1_metadata = ( + network_metadata_both['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg + nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc + expected = {'version': 2, 'ethernets': { + 'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}}, + 'eth10': { + 'match': {'macaddress': mac2}, 'set-name': 'eth10', + 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}, + 'dhcp6': False}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, macs_to_nics)) + + def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): + """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" + macs_to_nics = {self.mac1: 'eth9'} + network_metadata_both = copy.deepcopy(self.network_metadata) + nic1_metadata = ( + network_metadata_both['interfaces']['macs'][self.mac1]) + nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', + 'dhcp4': True, 'dhcp6': True}}} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config( + network_metadata_both, macs_to_nics)) + + def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): + """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" + expected = {'version': 2, 'ethernets': {'eth9': { + 'match': {'macaddress': self.mac1}, + 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}} + patch_path = M_PATH_NET + 'get_interfaces_by_mac' + with mock.patch(patch_path) as m_get_interfaces_by_mac: + m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'} + self.assertEqual( + expected, + ec2.convert_ec2_metadata_network_config(self.network_metadata)) + + +class TesIdentifyPlatform(test_helpers.CiTestCase): + + def collmock(self, **kwargs): + """return non-special _collect_platform_data updated with changes.""" + unspecial = { + 'asset_tag': '3857-0037-2746-7462-1818-3997-77', + 'serial': 'H23-C4J3JV-R6', + 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', + 'uuid_source': 'dmi', + 'vendor': 'tothecloud', + } + unspecial.update(**kwargs) + return unspecial + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack(self, m_collect): + """zstack should be identified if chassis-asset-tag ends in .zstack.io + """ + m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack_full_domain_only(self, m_collect): + """zstack asset-tag matching should match only on full domain boundary. + """ + m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloud') + self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud_negative(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloudyday') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py new file mode 100644 index 00000000..b0ffb7a5 --- /dev/null +++ b/tests/unittests/sources/test_exoscale.py @@ -0,0 +1,211 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import helpers +from cloudinit.sources.DataSourceExoscale import ( + API_VERSION, + DataSourceExoscale, + METADATA_URL, + get_password, + PASSWORD_SERVER_PORT, + read_metadata) +from tests.unittests.helpers import HttprettyTestCase, mock +from cloudinit import util + +import httpretty +import os +import requests + + +TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, + PASSWORD_SERVER_PORT, + API_VERSION) + +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, + API_VERSION) + +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, + API_VERSION) + + +@httpretty.activate +class TestDatasourceExoscale(HttprettyTestCase): + + def setUp(self): + super(TestDatasourceExoscale, self).setUp() + self.tmp = self.tmp_dir() + self.password_url = TEST_PASSWORD_URL + self.metadata_url = TEST_METADATA_URL + self.userdata_url = TEST_USERDATA_URL + + def test_password_saved(self): + """The password is not set when it is not found + in the metadata service.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="saved_password") + self.assertFalse(get_password()) + + def test_password_empty(self): + """No password is set if the metadata service returns + an empty string.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="") + self.assertFalse(get_password()) + + def test_password(self): + """The password is set to what is found in the metadata + service.""" + expected_password = "p@ssw0rd" + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + password = get_password() + self.assertEqual(expected_password, password) + + def test_activate_removes_set_passwords_semaphore(self): + """Allow set_passwords to run every boot by removing the semaphore.""" + path = helpers.Paths({'cloud_dir': self.tmp}) + sem_dir = self.tmp_path('instance/sem', dir=self.tmp) + util.ensure_dir(sem_dir) + sem_file = os.path.join(sem_dir, 'config_set_passwords') + with open(sem_file, 'w') as stream: + stream.write('') + ds = DataSourceExoscale({}, None, path) + ds.activate(None, None) + self.assertFalse(os.path.exists(sem_file)) + + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_password = "p@ssw0rd" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, + 'chpasswd': { + 'expire': False, + }}) + + def test_get_data_saved_password(self): + """The datasource conforms to expected behavior when saved_password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "saved_password" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), {}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), {}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): + """The read_metadata function returns partial results in case the + password server (only) is unreachable.""" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + + m_password.side_effect = requests.Timeout('Fake Connection Timeout') + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + + result = read_metadata() + + self.assertIsNone(result.get("password")) + self.assertEqual(result.get("user-data").decode("utf-8"), + expected_userdata) + + def test_non_viable_platform(self): + """The datasource fails fast when the platform is not viable.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: False + self.assertFalse(ds._get_data()) diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py new file mode 100644 index 00000000..dc768e99 --- /dev/null +++ b/tests/unittests/sources/test_gce.py @@ -0,0 +1,388 @@ +# Copyright (C) 2014 Vaidas Jablonskis +# +# Author: Vaidas Jablonskis +# +# This file is part of cloud-init. See LICENSE file for license information. + +import datetime +import httpretty +import json +import re +from unittest import mock +from urllib.parse import urlparse + +from base64 import b64encode, b64decode + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceGCE + +from tests.unittests import helpers as test_helpers + + +GCE_META = { + 'instance/id': '123', + 'instance/zone': 'foo/bar', + 'instance/hostname': 'server.project-foo.local', +} + +GCE_META_PARTIAL = { + 'instance/id': '1234', + 'instance/hostname': 'server.project-bar.local', + 'instance/zone': 'bar/baz', +} + +GCE_META_ENCODING = { + 'instance/id': '12345', + 'instance/hostname': 'server.project-baz.local', + 'instance/zone': 'baz/bang', + 'instance/attributes': { + 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'), + 'user-data-encoding': 'base64', + } +} + +GCE_USER_DATA_TEXT = { + 'instance/id': '12345', + 'instance/hostname': 'server.project-baz.local', + 'instance/zone': 'baz/bang', + 'instance/attributes': { + 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n', + } +} + +HEADERS = {'Metadata-Flavor': 'Google'} +MD_URL_RE = re.compile( + r'http://metadata.google.internal/computeMetadata/v1/.*') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes/hostkeys/') + + +def _set_mock_metadata(gce_meta=None): + if gce_meta is None: + gce_meta = GCE_META + + def _request_callback(method, uri, headers): + url_path = urlparse(uri).path + if url_path.startswith('/computeMetadata/v1/'): + path = url_path.split('/computeMetadata/v1/')[1:][0] + recursive = path.endswith('/') + path = path.rstrip('/') + else: + path = None + if path in gce_meta: + response = gce_meta.get(path) + if recursive: + response = json.dumps(response) + return (200, headers, response) + else: + return (404, headers, '') + + # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 + httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) + + +@httpretty.activate +class TestDataSourceGCE(test_helpers.HttprettyTestCase): + + def _make_distro(self, dtype, def_user=None): + cfg = dict(settings.CFG_BUILTIN) + cfg['system_info']['distro'] = dtype + paths = helpers.Paths(cfg['system_info']['paths']) + distro_cls = distros.fetch(dtype) + if def_user: + cfg['system_info']['default_user'] = def_user.copy() + distro = distro_cls(dtype, cfg['system_info'], paths) + return distro + + def setUp(self): + tmp = self.tmp_dir() + self.ds = DataSourceGCE.DataSourceGCE( + settings.CFG_BUILTIN, None, + helpers.Paths({'run_dir': tmp})) + ppatch = self.m_platform_reports_gce = mock.patch( + 'cloudinit.sources.DataSourceGCE.platform_reports_gce') + self.m_platform_reports_gce = ppatch.start() + self.m_platform_reports_gce.return_value = True + self.addCleanup(ppatch.stop) + self.add_patch('time.sleep', 'm_sleep') # just to speed up tests + super(TestDataSourceGCE, self).setUp() + + def test_connection(self): + _set_mock_metadata() + success = self.ds.get_data() + self.assertTrue(success) + + req_header = httpretty.last_request().headers + for header_name, expected_value in HEADERS.items(): + self.assertEqual(expected_value, req_header.get(header_name)) + + def test_metadata(self): + # UnicodeDecodeError if set to ds.userdata instead of userdata_raw + meta = GCE_META.copy() + meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' + + _set_mock_metadata() + self.ds.get_data() + + shostname = GCE_META.get('instance/hostname').split('.')[0] + self.assertEqual(shostname, + self.ds.get_hostname()) + + self.assertEqual(GCE_META.get('instance/id'), + self.ds.get_instance_id()) + + self.assertEqual(GCE_META.get('instance/attributes/user-data'), + self.ds.get_userdata_raw()) + + # test partial metadata (missing user-data in particular) + def test_metadata_partial(self): + _set_mock_metadata(GCE_META_PARTIAL) + self.ds.get_data() + + self.assertEqual(GCE_META_PARTIAL.get('instance/id'), + self.ds.get_instance_id()) + + shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0] + self.assertEqual(shostname, self.ds.get_hostname()) + + def test_userdata_no_encoding(self): + """check that user-data is read.""" + _set_mock_metadata(GCE_USER_DATA_TEXT) + self.ds.get_data() + self.assertEqual( + GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(), + self.ds.get_userdata_raw()) + + def test_metadata_encoding(self): + """user-data is base64 encoded if user-data-encoding is 'base64'.""" + _set_mock_metadata(GCE_META_ENCODING) + self.ds.get_data() + + instance_data = GCE_META_ENCODING.get('instance/attributes') + decoded = b64decode(instance_data.get('user-data')) + self.assertEqual(decoded, self.ds.get_userdata_raw()) + + def test_missing_required_keys_return_false(self): + for required_key in ['instance/id', 'instance/zone', + 'instance/hostname']: + meta = GCE_META_PARTIAL.copy() + del meta[required_key] + _set_mock_metadata(meta) + self.assertEqual(False, self.ds.get_data()) + httpretty.reset() + + def test_no_ssh_keys_metadata(self): + _set_mock_metadata() + self.ds.get_data() + self.assertEqual([], self.ds.get_public_ssh_keys()) + + def test_cloudinit_ssh_keys(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'cloudinit:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") + def test_default_user_ssh_keys(self, mock_ug_util): + mock_ug_util.normalize_users_groups.return_value = None, None + mock_ug_util.extract_default.return_value = 'ubuntu', None + ubuntu_ds = DataSourceGCE.DataSourceGCE( + settings.CFG_BUILTIN, self._make_distro('ubuntu'), + helpers.Paths({'run_dir': self.tmp_dir()})) + + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(0)), + 'user:{0}'.format(invalid_key.format(0)), + ]), + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(1)), + 'user:{0}'.format(invalid_key.format(1)), + ]), + } + instance_attributes = { + 'ssh-keys': '\n'.join([ + 'ubuntu:{0}'.format(valid_key.format(2)), + 'user:{0}'.format(invalid_key.format(2)), + ]), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + ubuntu_ds.get_data() + + expected = [valid_key.format(key) for key in range(3)] + self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) + + def test_instance_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), + 'block-project-ssh-keys': 'False', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(key) for key in range(2)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + def test_block_project_ssh_keys_override(self): + valid_key = 'ssh-rsa VALID {0}' + invalid_key = 'ssh-rsa INVALID {0}' + project_attributes = { + 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), + 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), + } + instance_attributes = { + 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), + 'block-project-ssh-keys': 'True', + } + + meta = GCE_META.copy() + meta['project/attributes'] = project_attributes + meta['instance/attributes'] = instance_attributes + + _set_mock_metadata(meta) + self.ds.get_data() + + expected = [valid_key.format(0)] + self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) + + def test_only_last_part_of_zone_used_for_availability_zone(self): + _set_mock_metadata() + r = self.ds.get_data() + self.assertEqual(True, r) + self.assertEqual('bar', self.ds.availability_zone) + + @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher") + def test_get_data_returns_false_if_not_on_gce(self, m_fetcher): + self.m_platform_reports_gce.return_value = False + ret = self.ds.get_data() + self.assertEqual(False, ret) + m_fetcher.assert_not_called() + + def test_has_expired(self): + + def _get_timestamp(days): + format_str = '%Y-%m-%dT%H:%M:%S+0000' + today = datetime.datetime.now() + timestamp = today + datetime.timedelta(days=days) + return timestamp.strftime(format_str) + + past = _get_timestamp(-1) + future = _get_timestamp(1) + ssh_keys = { + None: False, + '': False, + 'Invalid': False, + 'user:ssh-rsa key user@domain.com': False, + 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, + 'user:ssh-rsa key google-ssh': False, + 'user:ssh-rsa key google-ssh {invalid:json}': False, + 'user:ssh-rsa key google-ssh {"userName":"user"}': False, + 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, + 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True, + } + + for key, expired in ssh_keys.items(): + self.assertEqual(DataSourceGCE._has_expired(key), expired) + + def test_parse_public_keys_non_ascii(self): + public_key_data = [ + 'cloudinit:rsa ssh-ke%s invalid' % chr(165), + 'use%sname:rsa ssh-key' % chr(174), + 'cloudinit:test 1', + 'default:test 2', + 'user:test 3', + ] + expected = ['test 1', 'test 2'] + found = DataSourceGCE._parse_public_keys( + public_key_data, default_user='default') + self.assertEqual(sorted(found), sorted(expected)) + + @mock.patch("cloudinit.url_helper.readurl") + def test_publish_host_keys(self, m_readurl): + hostkeys = [('ssh-rsa', 'asdfasdf'), + ('ssh-ed25519', 'qwerqwer')] + readurl_expected_calls = [ + mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), + mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), + ] + self.ds.publish_host_keys(hostkeys) + m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + @mock.patch( + "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface" + ) + def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCELocal( + sys_cfg={}, distro=None, paths=None + ) + ds._get_data() + assert m_dhcp.call_count == 1 + + @mock.patch( + "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", + autospec=True, + ) + def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp): + _set_mock_metadata() + ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None) + ds._get_data() + assert m_dhcp.call_count == 0 + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py new file mode 100644 index 00000000..5af0f3db --- /dev/null +++ b/tests/unittests/sources/test_hetzner.py @@ -0,0 +1,142 @@ +# Copyright (C) 2018 Jonas Keidel +# +# Author: Jonas Keidel +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceHetzner +import cloudinit.sources.helpers.hetzner as hc_helper +from cloudinit import util, settings, helpers + +from tests.unittests.helpers import mock, CiTestCase + +import base64 +import pytest + +METADATA = util.load_yaml(""" +hostname: cloudinit-test +instance-id: 123456 +local-ipv4: '' +network-config: + config: + - mac_address: 96:00:00:08:19:da + name: eth0 + subnets: + - dns_nameservers: + - 213.133.99.99 + - 213.133.100.100 + - 213.133.98.98 + ipv4: true + type: dhcp + type: physical + - name: eth0:0 + subnets: + - address: 2a01:4f8:beef:beef::1/64 + gateway: fe80::1 + ipv6: true + routes: + - gateway: fe80::1%eth0 + netmask: 0 + network: '::' + type: static + type: physical + version: 1 +network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\ + ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\ + IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\ + IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\ + DNS1=213.133.99.99\nDNS2=213.133.100.100\n" +public-ipv4: 192.168.0.1 +public-keys: +- ssh-ed25519 \ + AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \ + test-key@workstation +vendor_data: "test" +""") + +USERDATA = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + + +class TestDataSourceHetzner(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestDataSourceHetzner, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self): + ds = DataSourceHetzner.DataSourceHetzner( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + return ds + + @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') + @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') + @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') + def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd, + m_fallback_nic, m_net): + m_get_hcloud_data.return_value = (True, + str(METADATA.get('instance-id'))) + m_readmd.return_value = METADATA.copy() + m_usermd.return_value = USERDATA + m_fallback_nic.return_value = 'eth0' + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + m_net.assert_called_once_with( + 'eth0', '169.254.0.1', + 16, '169.254.255.255' + ) + + self.assertTrue(m_readmd.called) + + self.assertEqual(METADATA.get('hostname'), ds.get_hostname()) + + self.assertEqual(METADATA.get('public-keys'), + ds.get_public_ssh_keys()) + + self.assertIsInstance(ds.get_public_ssh_keys(), list) + self.assertEqual(ds.get_userdata_raw(), USERDATA) + self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) + + @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') + def test_not_on_hetzner_returns_false(self, m_get_hcloud_data, + m_find_fallback, m_read_md): + """If helper 'get_hcloud_data' returns False, + return False from get_data.""" + m_get_hcloud_data.return_value = (False, None) + ds = self.get_ds() + ret = ds.get_data() + + self.assertFalse(ret) + # These are a white box attempt to ensure it did not search. + m_find_fallback.assert_not_called() + m_read_md.assert_not_called() + + +class TestMaybeB64Decode: + """Test the maybe_b64decode helper function.""" + + @pytest.mark.parametrize("invalid_input", (str("not bytes"), int(4))) + def test_raises_error_on_non_bytes(self, invalid_input): + """maybe_b64decode should raise error if data is not bytes.""" + with pytest.raises(TypeError): + hc_helper.maybe_b64decode(invalid_input) + + @pytest.mark.parametrize("in_data,expected", [ + # If data is not b64 encoded, then return value should be the same. + (b"this is my data", b"this is my data"), + # If data is b64 encoded, then return value should be decoded. + (base64.b64encode(b"data"), b"data"), + ]) + def test_happy_path(self, in_data, expected): + assert expected == hc_helper.maybe_b64decode(in_data) diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py new file mode 100644 index 00000000..38e8e892 --- /dev/null +++ b/tests/unittests/sources/test_ibmcloud.py @@ -0,0 +1,343 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.helpers import Paths +from cloudinit.sources import DataSourceIBMCloud as ibm +from tests.unittests import helpers as test_helpers +from cloudinit import util + +import base64 +import copy +import json +from textwrap import dedent + +mock = test_helpers.mock + +D_PATH = "cloudinit.sources.DataSourceIBMCloud." + + +@mock.patch(D_PATH + "_is_xen", return_value=True) +@mock.patch(D_PATH + "_is_ibm_provisioning") +@mock.patch(D_PATH + "util.blkid") +class TestGetIBMPlatform(test_helpers.CiTestCase): + """Test the get_ibm_platform helper.""" + + blkid_base = { + "/dev/xvda1": { + "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", + "TYPE": "ext3"}, + "/dev/xvda2": { + "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", + "TYPE": "ext4"}, + } + + blkid_metadata_disk = { + "/dev/xvdh1": { + "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": "681B-8C5D", + "PARTUUID": "3d631e09-01"}, + } + + blkid_oscode_disk = { + "/dev/xvdh": { + "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} + } + + def setUp(self): + self.blkid_metadata = copy.deepcopy(self.blkid_base) + self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk)) + + self.blkid_oscode = copy.deepcopy(self.blkid_base) + self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk)) + + def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_LIVE_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = False + self.assertEqual( + (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_NODATA.""" + m_blkid.return_value = self.blkid_base + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), + ibm.get_ibm_platform()) + + def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): + """Identify OS_CODE.""" + m_blkid.return_value = self.blkid_oscode + m_is_prov.return_value = False + self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), + ibm.get_ibm_platform()) + + def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): + """Test against false positive on openstack with non-ibm UUID.""" + blkid = self.blkid_oscode + blkid["/dev/xvdh"]["UUID"] = "9999-9999" + m_blkid.return_value = blkid + m_is_prov.return_value = False + self.assertEqual((None, None), ibm.get_ibm_platform()) + + +@mock.patch(D_PATH + "_read_system_uuid", return_value=None) +@mock.patch(D_PATH + "get_ibm_platform") +class TestReadMD(test_helpers.CiTestCase): + """Test the read_datasource helper.""" + + template_md = { + "files": [], + "network_config": {"content_path": "/content/interfaces"}, + "hostname": "ci-fond-ram", + "name": "ci-fond-ram", + "domain": "testing.ci.cloud-init.org", + "meta": {"dsmode": "net"}, + "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", + "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"}, + } + + oscode_md = { + "hostname": "ci-grand-gannet.testing.ci.cloud-init.org", + "name": "ci-grand-gannet", + "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", + "random_seed": "bm90LXJhbmRvbQo=", + "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", + "configuration_token": "eyJhbGciOi..M3ZA", + "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, + } + + content_interfaces = dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + allow-hotplug eth0 + iface eth0 inet static + address 10.82.43.5 + netmask 255.255.255.192 + """) + + userdata = b"#!/bin/sh\necho hi mom\n" + # meta.js file gets json encoded userdata as a list. + meta_js = '["#!/bin/sh\necho hi mom\n"]' + vendor_data = { + "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} + + network_data = { + "links": [ + {"id": "interface_29402281", "name": "eth0", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, + {"id": "interface_29402279", "name": "eth1", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} + ], + "networks": [ + {"id": "network_109887563", "link": "interface_29402281", + "type": "ipv4", "ip_address": "10.82.43.2", + "netmask": "255.255.255.192", + "routes": [ + {"network": "10.0.0.0", "netmask": "255.0.0.0", + "gateway": "10.82.43.1"}, + {"network": "161.26.0.0", "netmask": "255.255.0.0", + "gateway": "10.82.43.1"}]}, + {"id": "network_109887551", "link": "interface_29402279", + "type": "ipv4", "ip_address": "108.168.194.252", + "netmask": "255.255.255.248", + "routes": [ + {"network": "0.0.0.0", "netmask": "0.0.0.0", + "gateway": "108.168.194.249"}]} + ], + "services": [ + {"type": "dns", "address": "10.0.80.11"}, + {"type": "dns", "address": "10.0.80.12"} + ], + } + + sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' + + def _get_expected_metadata(self, os_md): + """return expected 'metadata' for data loaded from meta_data.json.""" + os_md = copy.deepcopy(os_md) + renames = ( + ('hostname', 'local-hostname'), + ('uuid', 'instance-id'), + ('public_keys', 'public-keys')) + ret = {} + for osname, mdname in renames: + if osname in os_md: + ret[mdname] = os_md[osname] + if 'random_seed' in os_md: + ret['random_seed'] = base64.b64decode(os_md['random_seed']) + + return ret + + def test_provisioning_md(self, m_platform, m_sysuuid): + """Provisioning env with a metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") + self.assertIsNone(ibm.read_md()) + + def test_provisioning_no_metadata(self, m_platform, m_sysuuid): + """Provisioning env with no metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) + self.assertIsNone(ibm.read_md()) + + def test_provisioning_not_ibm(self, m_platform, m_sysuuid): + """Provisioning env but not identified as IBM should return None.""" + m_platform.return_value = (None, None) + self.assertIsNone(ibm.read_md()) + + def test_template_live(self, m_platform, m_sysuuid): + """Template live environment should be identified.""" + tmpdir = self.tmp_dir() + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) + m_sysuuid.return_value = self.sysuuid + + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.template_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/content/interfaces': self.content_interfaces, + 'meta.js': self.meta_js}) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, + ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.template_md), + ret['metadata']) + self.assertEqual(self.sysuuid, ret['system-uuid']) + + def test_os_code_live(self, m_platform, m_sysuuid): + """Verify an os_code metadata path.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + netdata = json.dumps(self.network_data) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + 'openstack/latest/network_data.json': netdata, + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): + """Verify os_code without user-data.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertIsNone(ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + +class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): + """Test the _is_ibm_provisioning method.""" + inst_log = "/root/swinstall.log" + prov_cfg = "/root/provisioningConfiguration.cfg" + boot_ref = "/proc/1/environ" + with_logs = True + + def _call_with_root(self, rootd): + self.reRoot(rootd) + return ibm._is_ibm_provisioning() + + def test_no_config(self): + """No provisioning config means not provisioning.""" + self.assertFalse(self._call_with_root(self.tmp_dir())) + + def test_config_only(self): + """A provisioning config without a log means provisioning.""" + rootd = self.tmp_dir() + test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"}) + self.assertTrue(self._call_with_root(rootd)) + + def test_config_with_old_log(self): + """A config with a log from previous boot is not provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("from previous boot", self.logs.getvalue()) + + def test_config_with_new_log(self): + """A config with a log from this boot is provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertTrue(self._call_with_root(rootd=rootd)) + self.assertIn("from current boot", self.logs.getvalue()) + + def test_config_and_log_no_reference(self): + """If the config and log existed, but no reference, assume not.""" + rootd = self.tmp_dir() + test_helpers.populate_dir( + rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("no reference file", self.logs.getvalue()) + + +class TestDataSourceIBMCloud(test_helpers.CiTestCase): + + def setUp(self): + super(TestDataSourceIBMCloud, self).setUp() + self.tmp = self.tmp_dir() + self.cloud_dir = self.tmp_path('cloud', dir=self.tmp) + util.ensure_dir(self.cloud_dir) + paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir}) + self.ds = ibm.DataSourceIBMCloud( + sys_cfg={}, distro=None, paths=paths) + + def test_get_data_false(self): + """When read_md returns None, get_data returns False.""" + with mock.patch(D_PATH + 'read_md', return_value=None): + self.assertFalse(self.ds.get_data()) + + def test_get_data_processes_read_md(self): + """get_data processes and caches content returned by read_md.""" + md = { + 'metadata': {}, 'networkdata': 'net', 'platform': 'plat', + 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud', + 'vendordata': 'vd'} + with mock.patch(D_PATH + 'read_md', return_value=md): + self.assertTrue(self.ds.get_data()) + self.assertEqual('src', self.ds.source) + self.assertEqual('plat', self.ds.platform) + self.assertEqual({}, self.ds.metadata) + self.assertEqual('ud', self.ds.userdata_raw) + self.assertEqual('net', self.ds.network_json) + self.assertEqual('vd', self.ds.vendordata_pure) + self.assertEqual('uuid', self.ds.system_uuid) + self.assertEqual('ibmcloud', self.ds.cloud_name) + self.assertEqual('ibmcloud', self.ds.platform_type) + self.assertEqual('plat (src)', self.ds.subplatform) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py new file mode 100644 index 00000000..a1d19518 --- /dev/null +++ b/tests/unittests/sources/test_init.py @@ -0,0 +1,771 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import inspect +import os +import stat + +from cloudinit.event import EventScope, EventType +from cloudinit.helpers import Paths +from cloudinit import importer +from cloudinit.sources import ( + EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, + METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, + canonical_cloud_id, redact_sensitive_keys) +from tests.unittests.helpers import CiTestCase, mock +from cloudinit.user_data import UserDataProcessor +from cloudinit import util + + +class DataSourceTestSubclassNet(DataSource): + + dsname = 'MyTestSubclass' + url_max_wait = 55 + + def __init__(self, sys_cfg, distro, paths, custom_metadata=None, + custom_userdata=None, get_data_retval=True): + super(DataSourceTestSubclassNet, self).__init__( + sys_cfg, distro, paths) + self._custom_userdata = custom_userdata + self._custom_metadata = custom_metadata + self._get_data_retval = get_data_retval + + def _get_cloud_name(self): + return 'SubclassCloudName' + + def _get_data(self): + if self._custom_metadata: + self.metadata = self._custom_metadata + else: + self.metadata = {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'} + if self._custom_userdata: + self.userdata_raw = self._custom_userdata + else: + self.userdata_raw = 'userdata_raw' + self.vendordata_raw = 'vendordata_raw' + return self._get_data_retval + + +class InvalidDataSourceTestSubclassNet(DataSource): + pass + + +class TestDataSource(CiTestCase): + + with_logs = True + maxDiff = None + + def setUp(self): + super(TestDataSource, self).setUp() + self.sys_cfg = {'datasource': {'_undef': {'key1': False}}} + self.distro = 'distrotest' # generally should be a Distro object + self.paths = Paths({}) + self.datasource = DataSource(self.sys_cfg, self.distro, self.paths) + + def test_datasource_init(self): + """DataSource initializes metadata attributes, ds_cfg and ud_proc.""" + self.assertEqual(self.paths, self.datasource.paths) + self.assertEqual(self.sys_cfg, self.datasource.sys_cfg) + self.assertEqual(self.distro, self.datasource.distro) + self.assertIsNone(self.datasource.userdata) + self.assertEqual({}, self.datasource.metadata) + self.assertIsNone(self.datasource.userdata_raw) + self.assertIsNone(self.datasource.vendordata) + self.assertIsNone(self.datasource.vendordata_raw) + self.assertEqual({'key1': False}, self.datasource.ds_cfg) + self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor) + + def test_datasource_init_gets_ds_cfg_using_dsname(self): + """Init uses DataSource.dsname for sourcing ds_cfg.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) + self.assertEqual({'key2': False}, datasource.ds_cfg) + + def test_str_is_classname(self): + """The string representation of the datasource is the classname.""" + self.assertEqual('DataSource', str(self.datasource)) + self.assertEqual( + 'DataSourceTestSubclassNet', + str(DataSourceTestSubclassNet('', '', self.paths))) + + def test_datasource_get_url_params_defaults(self): + """get_url_params default url config settings for the datasource.""" + params = self.datasource.get_url_params() + self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) + self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) + self.assertEqual(params.num_retries, self.datasource.url_retries) + self.assertEqual(params.sec_between_retries, + self.datasource.url_sec_between_retries) + + def test_datasource_get_url_params_subclassed(self): + """Subclasses can override get_url_params defaults.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries, datasource.url_sec_between_retries) + url_params = datasource.get_url_params() + self.assertNotEqual(self.datasource.get_url_params(), url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_ds_config_override(self): + """Datasource configuration options can override url param defaults.""" + sys_cfg = { + 'datasource': { + 'MyTestSubclass': { + 'max_wait': '1', 'timeout': '2', + 'retries': '3', 'sec_between_retries': 4 + }}} + datasource = DataSourceTestSubclassNet( + sys_cfg, self.distro, self.paths) + expected = (1, 2, 3, 4) + url_params = datasource.get_url_params() + self.assertNotEqual( + (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries, datasource.url_sec_between_retries), + url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_is_zero_or_greater(self): + """get_url_params ignores timeouts with a value below 0.""" + # Set an override that is below 0 which gets ignored. + sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + (_max_wait, timeout, _retries, + _sec_between_retries) = datasource.get_url_params() + self.assertEqual(0, timeout) + + def test_datasource_get_url_uses_defaults_on_errors(self): + """On invalid system config values for url_params defaults are used.""" + # All invalid values should be logged + sys_cfg = {'datasource': { + '_undef': { + 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + url_params = datasource.get_url_params() + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries, datasource.url_sec_between_retries) + self.assertEqual(expected, url_params) + logs = self.logs.getvalue() + expected_logs = [ + "Config max_wait 'nope' is not an int, using default '-1'", + "Config timeout 'bug' is not an int, using default '10'", + "Config retries 'nonint' is not an int, using default '5'", + ] + for log in expected_logs: + self.assertIn(log, logs) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_is_discovered(self, m_get_fallback_nic): + """The fallback_interface is discovered via find_fallback_nic.""" + m_get_fallback_nic.return_value = 'nic9' + self.assertEqual('nic9', self.datasource.fallback_interface) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): + """Log a warning when fallback_interface can not discover the nic.""" + self.datasource._cloud_name = 'MySupahCloud' + m_get_fallback_nic.return_value = None # Couldn't discover nic + self.assertIsNone(self.datasource.fallback_interface) + self.assertEqual( + 'WARNING: Did not find a fallback interface on MySupahCloud.\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): + """The fallback_interface is cached and won't be rediscovered.""" + self.datasource._fallback_interface = 'nic10' + self.assertEqual('nic10', self.datasource.fallback_interface) + m_get_fallback_nic.assert_not_called() + + def test__get_data_unimplemented(self): + """Raise an error when _get_data is not implemented.""" + with self.assertRaises(NotImplementedError) as context_manager: + self.datasource.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + datasource2 = InvalidDataSourceTestSubclassNet( + self.sys_cfg, self.distro, self.paths) + with self.assertRaises(NotImplementedError) as context_manager: + datasource2.get_data() + self.assertIn( + 'Subclasses of DataSource must implement _get_data', + str(context_manager.exception)) + + def test_get_data_calls_subclass__get_data(self): + """Datasource.get_data uses the subclass' version of _get_data.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}, + datasource.metadata) + self.assertEqual('userdata_raw', datasource.userdata_raw) + self.assertEqual('vendordata_raw', datasource.vendordata_raw) + + def test_get_hostname_strips_local_hostname_without_domain(self): + """Datasource.get_hostname strips metadata local-hostname of domain.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + 'test-subclass-hostname', datasource.metadata['local-hostname']) + self.assertEqual('test-subclass-hostname', datasource.get_hostname()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual('hostname', datasource.get_hostname()) + + def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): + """Datasource.get_hostname with fqdn set gets qualified hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual( + 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_uses_system_hostname(self): + """Datasource.gethostname runs util.get_hostname when no metadata.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = None # No maching fqdn in /etc/hosts + self.assertEqual('systemhostname', datasource.get_hostname()) + self.assertEqual( + 'systemhostname.domain.com', + datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_returns_none(self): + """Datasource.gethostname returns None when metadata_only and no MD.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + self.assertIsNone(datasource.get_hostname(metadata_only=True)) + self.assertIsNone( + datasource.get_hostname(fqdn=True, metadata_only=True)) + self.assertEqual([], m_gethost.call_args_list) + self.assertEqual([], m_fqdn.call_args_list) + + def test_get_hostname_without_metadata_prefers_etc_hosts(self): + """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = 'fqdnhostname.domain.com' + self.assertEqual('fqdnhostname', datasource.get_hostname()) + self.assertEqual('fqdnhostname.domain.com', + datasource.get_hostname(fqdn=True)) + + def test_get_data_does_not_write_instance_data_on_failure(self): + """get_data does not write INSTANCE_JSON_FILE on get_data False.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + get_data_retval=False) + self.assertFalse(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + self.assertFalse( + os.path.exists(json_file), 'Found unexpected file %s' % json_file) + + def test_get_data_writes_json_instance_data_on_success(self): + """get_data writes INSTANCE_JSON_FILE to run_dir as world readable.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected = { + 'base64_encoded_keys': [], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': ['merged_cfg'], + 'sys_info': sys_info, + 'v1': { + '_beta_keys': ['subplatform'], + 'availability-zone': 'myaz', + 'availability_zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', + 'instance-id': 'iid-datasource', + 'instance_id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'python_version': '3.7', + 'region': 'myregion', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, + 'ds': { + + '_doc': EXPERIMENTAL_TEXT, + 'meta_data': {'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion'}}} + self.assertEqual(expected, util.load_json(content)) + file_stat = os.stat(json_file) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + self.assertEqual(expected, util.load_json(content)) + + def test_get_data_writes_redacted_public_json_instance_data(self): + """get_data writes redacted content to public INSTANCE_JSON_FILE.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={ + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': { + 'cred1': 'sekret', 'cred2': 'othersekret'}}}) + self.assertCountEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + redacted = util.load_json(util.load_file(json_file)) + expected = { + 'base64_encoded_keys': [], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, + 'v1': { + '_beta_keys': ['subplatform'], + 'availability-zone': 'myaz', + 'availability_zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', + 'instance-id': 'iid-datasource', + 'instance_id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'python_version': '3.7', + 'region': 'myregion', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, + 'ds': { + '_doc': EXPERIMENTAL_TEXT, + 'meta_data': { + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} + } + self.assertCountEqual(expected, redacted) + file_stat = os.stat(json_file) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + def test_get_data_writes_json_instance_data_sensitive(self): + """ + get_data writes unmodified data to sensitive file as root-readonly. + """ + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={ + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': { + 'cred1': 'sekret', 'cred2': 'othersekret'}}}) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + + self.assertCountEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() + sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) + content = util.load_file(sensitive_json_file) + expected = { + 'base64_encoded_keys': [], + 'merged_cfg': { + '_doc': ( + 'Merged cloud-init system config from ' + '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/' + ), + 'datasource': {'_undef': {'key1': False}}}, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, + 'v1': { + '_beta_keys': ['subplatform'], + 'availability-zone': 'myaz', + 'availability_zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', + 'instance-id': 'iid-datasource', + 'instance_id': 'iid-datasource', + 'kernel_release': '5.4.0-24-generic', + 'local-hostname': 'test-subclass-hostname', + 'local_hostname': 'test-subclass-hostname', + 'machine': 'x86_64', + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'python_version': '3.7', + 'region': 'myregion', + 'subplatform': 'unknown', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'variant': 'ubuntu'}, + 'ds': { + '_doc': EXPERIMENTAL_TEXT, + 'meta_data': { + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': { + 'security-credentials': + {'cred1': 'sekret', 'cred2': 'othersekret'}}}} + } + self.assertCountEqual(expected, util.load_json(content)) + file_stat = os.stat(sensitive_json_file) + self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) + self.assertEqual(expected, util.load_json(content)) + + def test_get_data_handles_redacted_unserializable_content(self): + """get_data warns unserializable content in INSTANCE_JSON_FILE.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}}) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + expected_metadata = { + 'key1': 'val1', + 'key2': { + 'key2.1': "Warning: redacted unserializable type "}} + instance_json = util.load_json(content) + self.assertEqual( + expected_metadata, instance_json['ds']['meta_data']) + + def test_persist_instance_data_writes_ec2_metadata_when_set(self): + """When ec2_metadata class attribute is set, persist to json.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.ec2_metadata = UNSET + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + instance_data = util.load_json(util.load_file(json_file)) + self.assertNotIn('ec2_metadata', instance_data['ds']) + datasource.ec2_metadata = {'ec2stuff': 'is good'} + datasource.persist_instance_data() + instance_data = util.load_json(util.load_file(json_file)) + self.assertEqual( + {'ec2stuff': 'is good'}, + instance_data['ds']['ec2_metadata']) + + def test_persist_instance_data_writes_network_json_when_set(self): + """When network_data.json class attribute is set, persist to json.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + datasource.get_data() + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + instance_data = util.load_json(util.load_file(json_file)) + self.assertNotIn('network_json', instance_data['ds']) + datasource.network_json = {'network_json': 'is good'} + datasource.persist_instance_data() + instance_data = util.load_json(util.load_file(json_file)) + self.assertEqual( + {'network_json': 'is good'}, + instance_data['ds']['network_json']) + + def test_get_data_base64encodes_unserializable_bytes(self): + """On py3, get_data base64encodes any unserializable content.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) + self.assertTrue(datasource.get_data()) + json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) + content = util.load_file(json_file) + instance_json = util.load_json(content) + self.assertCountEqual( + ['ds/meta_data/key2/key2.1'], + instance_json['base64_encoded_keys']) + self.assertEqual( + {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, + instance_json['ds']['meta_data']) + + def test_get_hostname_subclass_support(self): + """Validate get_hostname signature on all subclasses of DataSource.""" + base_args = inspect.getfullargspec(DataSource.get_hostname) + # Import all DataSource subclasses so we can inspect them. + modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) + for _loc, name in modules.items(): + mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) + if mod_locs: + importer.import_module(mod_locs[0]) + for child in DataSource.__subclasses__(): + if 'Test' in child.dsname: + continue + self.assertEqual( + base_args, + inspect.getfullargspec(child.get_hostname), + '%s does not implement DataSource.get_hostname params' + % child) + for grandchild in child.__subclasses__(): + self.assertEqual( + base_args, + inspect.getfullargspec(grandchild.get_hostname), + '%s does not implement DataSource.get_hostname params' + % grandchild) + + def test_clear_cached_attrs_resets_cached_attr_class_attributes(self): + """Class attributes listed in cached_attr_defaults are reset.""" + count = 0 + # Setup values for all cached class attributes + for attr, value in self.datasource.cached_attr_defaults: + setattr(self.datasource, attr, count) + count += 1 + self.datasource._dirty_cache = True + self.datasource.clear_cached_attrs() + for attr, value in self.datasource.cached_attr_defaults: + self.assertEqual(value, getattr(self.datasource, attr)) + + def test_clear_cached_attrs_noops_on_clean_cache(self): + """Class attributes listed in cached_attr_defaults are reset.""" + count = 0 + # Setup values for all cached class attributes + for attr, _ in self.datasource.cached_attr_defaults: + setattr(self.datasource, attr, count) + count += 1 + self.datasource._dirty_cache = False # Fake clean cache + self.datasource.clear_cached_attrs() + count = 0 + for attr, _ in self.datasource.cached_attr_defaults: + self.assertEqual(count, getattr(self.datasource, attr)) + count += 1 + + def test_clear_cached_attrs_skips_non_attr_class_attributes(self): + """Skip any cached_attr_defaults which aren't class attributes.""" + self.datasource._dirty_cache = True + self.datasource.clear_cached_attrs() + for attr in ('ec2_metadata', 'network_json'): + self.assertFalse(hasattr(self.datasource, attr)) + + def test_clear_cached_attrs_of_custom_attrs(self): + """Custom attr_values can be passed to clear_cached_attrs.""" + self.datasource._dirty_cache = True + cached_attr_name = self.datasource.cached_attr_defaults[0][0] + setattr(self.datasource, cached_attr_name, 'himom') + self.datasource.myattr = 'orig' + self.datasource.clear_cached_attrs( + attr_defaults=(('myattr', 'updated'),)) + self.assertEqual('himom', getattr(self.datasource, cached_attr_name)) + self.assertEqual('updated', self.datasource.myattr) + + @mock.patch.dict(DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_update_metadata_only_acts_on_supported_update_events(self): + """update_metadata_if_supported wont get_data on unsupported events.""" + self.assertEqual( + {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])}, + self.datasource.default_update_events + ) + + def fake_get_data(): + raise Exception('get_data should not be called') + + self.datasource.get_data = fake_get_data + self.assertFalse( + self.datasource.update_metadata_if_supported( + source_event_types=[EventType.BOOT])) + + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_update_metadata_returns_true_on_supported_update_event(self): + """update_metadata_if_supported returns get_data on supported events""" + def fake_get_data(): + return True + + self.datasource.get_data = fake_get_data + self.datasource._network_config = 'something' + self.datasource._dirty_cache = True + self.assertTrue( + self.datasource.update_metadata_if_supported( + source_event_types=[ + EventType.BOOT, EventType.BOOT_NEW_INSTANCE])) + self.assertEqual(UNSET, self.datasource._network_config) + + self.assertIn( + "DEBUG: Update datasource metadata and network config due to" + " events: boot-new-instance", + self.logs.getvalue() + ) + + +class TestRedactSensitiveData(CiTestCase): + + def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self): + """When sensitive_keys is absent or empty from metadata do nothing.""" + md = {'my': 'data'} + self.assertEqual( + md, redact_sensitive_keys(md, redact_value='redacted')) + md['sensitive_keys'] = [] + self.assertEqual( + md, redact_sensitive_keys(md, redact_value='redacted')) + + def test_redact_sensitive_data_redacts_exact_match_name(self): + """Only exact matched sensitive_keys are redacted from metadata.""" + md = {'sensitive_keys': ['md/secure'], + 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} + secure_md = copy.deepcopy(md) + secure_md['md']['secure'] = 'redacted' + self.assertEqual( + secure_md, + redact_sensitive_keys(md, redact_value='redacted')) + + def test_redact_sensitive_data_does_redacts_with_default_string(self): + """When redact_value is absent, REDACT_SENSITIVE_VALUE is used.""" + md = {'sensitive_keys': ['md/secure'], + 'md': {'secure': 's3kr1t', 'insecure': 'publik'}} + secure_md = copy.deepcopy(md) + secure_md['md']['secure'] = 'redacted for non-root user' + self.assertEqual( + secure_md, + redact_sensitive_keys(md)) + + +class TestCanonicalCloudID(CiTestCase): + + def test_cloud_id_returns_platform_on_unknowns(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region=METADATA_UNKNOWN, + platform='platform')) + + def test_cloud_id_returns_platform_on_none(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=None, + region=None, + platform='platform')) + + def test_cloud_id_returns_cloud_name_on_unknown_region(self): + """When region is unknown, return cloud_name.""" + for region in (None, METADATA_UNKNOWN): + self.assertEqual( + 'cloudname', + canonical_cloud_id(cloud_name='cloudname', + region=region, + platform='platform')) + + def test_cloud_id_returns_platform_on_unknown_cloud_name(self): + """When region is set but cloud_name is unknown return cloud_name.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region='region', + platform='platform')) + + def test_cloud_id_aws_based_on_region_and_cloud_name(self): + """When cloud_name is aws, return proper cloud-id based on region.""" + self.assertEqual( + 'aws-china', + canonical_cloud_id(cloud_name='aws', + region='cn-north-1', + platform='platform')) + self.assertEqual( + 'aws', + canonical_cloud_id(cloud_name='aws', + region='us-east-1', + platform='platform')) + self.assertEqual( + 'aws-gov', + canonical_cloud_id(cloud_name='aws', + region='us-gov-1', + platform='platform')) + self.assertEqual( # Overrideen non-aws cloud_name is returned + '!aws', + canonical_cloud_id(cloud_name='!aws', + region='us-gov-1', + platform='platform')) + + def test_cloud_id_azure_based_on_region_and_cloud_name(self): + """Report cloud-id when cloud_name is azure and region is in china.""" + self.assertEqual( + 'azure-china', + canonical_cloud_id(cloud_name='azure', + region='chinaeast', + platform='platform')) + self.assertEqual( + 'azure', + canonical_cloud_id(cloud_name='azure', + region='!chinaeast', + platform='platform')) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py new file mode 100644 index 00000000..a6e51f3b --- /dev/null +++ b/tests/unittests/sources/test_lxd.py @@ -0,0 +1,376 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +from copy import deepcopy +import json +import re +import stat +from unittest import mock +import yaml + +import pytest + +from cloudinit.sources import ( + DataSourceLXD as lxd, InvalidMetaDataException, UNSET +) +DS_PATH = "cloudinit.sources.DataSourceLXD." + + +LStatResponse = namedtuple("lstatresponse", "st_mode") + + +NETWORK_V1 = { + "version": 1, + "config": [ + { + "type": "physical", "name": "eth0", + "subnets": [{"type": "dhcp", "control": "auto"}] + } + ] +} + + +def _add_network_v1_device(devname) -> dict: + """Helper to inject device name into default network v1 config.""" + network_cfg = deepcopy(NETWORK_V1) + network_cfg["config"][0]["name"] = devname + return network_cfg + + +LXD_V1_METADATA = { + "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "network-config": NETWORK_V1, + "user-data": "#cloud-config\npackages: [sl]\n", + "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n", + "config": { + "user.user-data": + "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n", + "user.vendor-data": + "#cloud-config\nruncmd: ['echo vendor-data']\n", + "user.network-config": yaml.safe_dump(NETWORK_V1), + } +} + + +@pytest.fixture +def lxd_metadata(): + return LXD_V1_METADATA + + +@pytest.yield_fixture +def lxd_ds(request, paths, lxd_metadata): + """ + Return an instantiated DataSourceLXD. + + This also performs the mocking required for the default test case: + * ``is_platform_viable`` returns True, + * ``read_metadata`` returns ``LXD_V1_METADATA`` + + (This uses the paths fixture for the required helpers.Paths object) + """ + with mock.patch(DS_PATH + "is_platform_viable", return_value=True): + with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata): + yield lxd.DataSourceLXD( + sys_cfg={}, distro=mock.Mock(), paths=paths + ) + + +class TestGenerateFallbackNetworkConfig: + + @pytest.mark.parametrize( + "uname_machine,systemd_detect_virt,expected", ( + # None for systemd_detect_virt returns None from which + ({}, None, NETWORK_V1), + ({}, None, NETWORK_V1), + ("anything", "lxc\n", NETWORK_V1), + # `uname -m` on kvm determines devname + ("x86_64", "kvm\n", _add_network_v1_device("enp5s0")), + ("ppc64le", "kvm\n", _add_network_v1_device("enp0s5")), + ("s390x", "kvm\n", _add_network_v1_device("enc9")) + ) + ) + @mock.patch(DS_PATH + "util.system_info") + @mock.patch(DS_PATH + "subp.subp") + @mock.patch(DS_PATH + "subp.which") + def test_net_v2_based_on_network_mode_virt_type_and_uname_machine( + self, + m_which, + m_subp, + m_system_info, + uname_machine, + systemd_detect_virt, + expected, + ): + """Return network config v2 based on uname -m, systemd-detect-virt.""" + if systemd_detect_virt is None: + m_which.return_value = None + m_system_info.return_value = {"uname": ["", "", "", "", uname_machine]} + m_subp.return_value = (systemd_detect_virt, "") + assert expected == lxd.generate_fallback_network_config() + if systemd_detect_virt is None: + assert 0 == m_subp.call_count + assert 0 == m_system_info.call_count + else: + assert [ + mock.call(["systemd-detect-virt"]) + ] == m_subp.call_args_list + if systemd_detect_virt != "kvm\n": + assert 0 == m_system_info.call_count + else: + assert 1 == m_system_info.call_count + + +class TestDataSourceLXD: + def test_platform_info(self, lxd_ds): + assert "LXD" == lxd_ds.dsname + assert "lxd" == lxd_ds.cloud_name + assert "lxd" == lxd_ds.platform_type + + def test_subplatform(self, lxd_ds): + assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == lxd_ds.subplatform + + def test__get_data(self, lxd_ds): + """get_data calls read_metadata, setting appropiate instance attrs.""" + assert UNSET == lxd_ds._crawled_metadata + assert UNSET == lxd_ds._network_config + assert None is lxd_ds.userdata_raw + assert True is lxd_ds._get_data() + assert LXD_V1_METADATA == lxd_ds._crawled_metadata + # network-config is dumped from YAML + assert NETWORK_V1 == lxd_ds._network_config + # Any user-data and vendor-data are saved as raw + assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw + assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw + + +class TestIsPlatformViable: + @pytest.mark.parametrize( + "exists,lstat_mode,expected", ( + (False, None, False), + (True, stat.S_IFREG, False), + (True, stat.S_IFSOCK, True), + ) + ) + @mock.patch(DS_PATH + "os.lstat") + @mock.patch(DS_PATH + "os.path.exists") + def test_expected_viable( + self, m_exists, m_lstat, exists, lstat_mode, expected + ): + """Return True only when LXD_SOCKET_PATH exists and is a socket.""" + m_exists.return_value = exists + m_lstat.return_value = LStatResponse(lstat_mode) + assert expected is lxd.is_platform_viable() + m_exists.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) + if exists: + m_lstat.assert_has_calls([mock.call(lxd.LXD_SOCKET_PATH)]) + else: + assert 0 == m_lstat.call_count + + +class TestReadMetadata: + @pytest.mark.parametrize( + "url_responses,expected,logs", ( + ( # Assert non-JSON format from config route + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": "[NOT_JSON", + }, + InvalidMetaDataException( + "Unable to determine cloud-init config from" + " http://lxd/1.0/config. Expected JSON but found:" + " [NOT_JSON"), + ["[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config"], + ), + ( # Assert success on just meta-data + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": "[]", + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": {}, "meta-data": "local-hostname: md\n" + }, + ["[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config"], + ), + ( # Assert 404s for config routes log skipping + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": + '["/1.0/config/user.custom1",' + ' "/1.0/config/user.meta-data",' + ' "/1.0/config/user.network-config",' + ' "/1.0/config/user.user-data",' + ' "/1.0/config/user.vendor-data"]', + "http://lxd/1.0/config/user.custom1": "custom1", + "http://lxd/1.0/config/user.meta-data": "", # 404 + "http://lxd/1.0/config/user.network-config": "net-config", + "http://lxd/1.0/config/user.user-data": "", # 404 + "http://lxd/1.0/config/user.vendor-data": "", # 404 + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.network-config": "net-config", + }, + "meta-data": "local-hostname: md\n", + "network-config": "net-config", + }, + [ + "Skipping http://lxd/1.0/config/user.vendor-data on" + " [HTTP:404]", + "Skipping http://lxd/1.0/config/user.meta-data on" + " [HTTP:404]", + "Skipping http://lxd/1.0/config/user.user-data on" + " [HTTP:404]", + "[GET] [HTTP:200] http://lxd/1.0/config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/user.network-config", + ], + ), + ( # Assert all CONFIG_KEY_ALIASES promoted to top-level keys + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": + '["/1.0/config/user.custom1",' + ' "/1.0/config/user.meta-data",' + ' "/1.0/config/user.network-config",' + ' "/1.0/config/user.user-data",' + ' "/1.0/config/user.vendor-data"]', + "http://lxd/1.0/config/user.custom1": "custom1", + "http://lxd/1.0/config/user.meta-data": "meta-data", + "http://lxd/1.0/config/user.network-config": "net-config", + "http://lxd/1.0/config/user.user-data": "user-data", + "http://lxd/1.0/config/user.vendor-data": "vendor-data", + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.custom1": "custom1", # Not promoted + "user.meta-data": "meta-data", + "user.network-config": "net-config", + "user.user-data": "user-data", + "user.vendor-data": "vendor-data", + }, + "meta-data": "local-hostname: md\n", + "network-config": "net-config", + "user-data": "user-data", + "vendor-data": "vendor-data", + }, + [ + "[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.custom1", + "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/user.network-config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data", + "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data", + ], + ), + ( # Assert cloud-init.* config key values prefered over user.* + { + "http://lxd/1.0/meta-data": "local-hostname: md\n", + "http://lxd/1.0/config": + '["/1.0/config/user.meta-data",' + ' "/1.0/config/user.network-config",' + ' "/1.0/config/user.user-data",' + ' "/1.0/config/user.vendor-data",' + ' "/1.0/config/cloud-init.network-config",' + ' "/1.0/config/cloud-init.user-data",' + ' "/1.0/config/cloud-init.vendor-data"]', + "http://lxd/1.0/config/user.meta-data": "user.meta-data", + "http://lxd/1.0/config/user.network-config": + "user.network-config", + "http://lxd/1.0/config/user.user-data": "user.user-data", + "http://lxd/1.0/config/user.vendor-data": + "user.vendor-data", + "http://lxd/1.0/config/cloud-init.meta-data": + "cloud-init.meta-data", + "http://lxd/1.0/config/cloud-init.network-config": + "cloud-init.network-config", + "http://lxd/1.0/config/cloud-init.user-data": + "cloud-init.user-data", + "http://lxd/1.0/config/cloud-init.vendor-data": + "cloud-init.vendor-data", + }, + { + "_metadata_api_version": lxd.LXD_SOCKET_API_VERSION, + "config": { + "user.meta-data": "user.meta-data", + "user.network-config": "user.network-config", + "user.user-data": "user.user-data", + "user.vendor-data": "user.vendor-data", + "cloud-init.network-config": + "cloud-init.network-config", + "cloud-init.user-data": "cloud-init.user-data", + "cloud-init.vendor-data": + "cloud-init.vendor-data", + }, + "meta-data": "local-hostname: md\n", + "network-config": "cloud-init.network-config", + "user-data": "cloud-init.user-data", + "vendor-data": "cloud-init.vendor-data", + }, + [ + "[GET] [HTTP:200] http://lxd/1.0/meta-data", + "[GET] [HTTP:200] http://lxd/1.0/config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.meta-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/user.network-config", + "[GET] [HTTP:200] http://lxd/1.0/config/user.user-data", + "[GET] [HTTP:200] http://lxd/1.0/config/user.vendor-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/cloud-init.network-config", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/cloud-init.user-data", + "[GET] [HTTP:200]" + " http://lxd/1.0/config/cloud-init.vendor-data", + "Ignoring LXD config user.user-data in favor of" + " cloud-init.user-data value.", + "Ignoring LXD config user.network-config in favor of" + " cloud-init.network-config value.", + "Ignoring LXD config user.vendor-data in favor of" + " cloud-init.vendor-data value.", + ], + ), + ) + ) + @mock.patch.object(lxd.requests.Session, 'get') + def test_read_metadata_handles_unexpected_content_or_http_status( + self, session_get, url_responses, expected, logs, caplog + ): + """read_metadata handles valid and invalid content and status codes.""" + + def fake_get(url): + """Mock Response json, ok, status_code, text from url_responses.""" + m_resp = mock.MagicMock() + content = url_responses.get(url, '') + m_resp.json.side_effect = lambda: json.loads(content) + if content: + mock_ok = mock.PropertyMock(return_value=True) + mock_status_code = mock.PropertyMock(return_value=200) + else: + mock_ok = mock.PropertyMock(return_value=False) + mock_status_code = mock.PropertyMock(return_value=404) + type(m_resp).ok = mock_ok + type(m_resp).status_code = mock_status_code + mock_text = mock.PropertyMock(return_value=content) + type(m_resp).text = mock_text + return m_resp + + session_get.side_effect = fake_get + + if isinstance(expected, Exception): + with pytest.raises(type(expected), match=re.escape(str(expected))): + lxd.read_metadata() + else: + assert expected == lxd.read_metadata() + caplogs = caplog.text + for log in logs: + assert log in caplogs + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_maas.py b/tests/unittests/sources/test_maas.py new file mode 100644 index 00000000..34b79587 --- /dev/null +++ b/tests/unittests/sources/test_maas.py @@ -0,0 +1,200 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from copy import copy +import os +import shutil +import tempfile +import yaml +from unittest import mock + +from cloudinit.sources import DataSourceMAAS +from cloudinit import url_helper +from tests.unittests.helpers import CiTestCase, populate_dir + + +class TestMAASDataSource(CiTestCase): + + def setUp(self): + super(TestMAASDataSource, self).setUp() + # Make a temp directoy for tests to use. + self.tmp = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.tmp) + + def test_seed_dir_valid(self): + """Verify a valid seeddir is read as such.""" + + userdata = b'valid01-userdata' + data = {'meta-data/instance-id': 'i-valid01', + 'meta-data/local-hostname': 'valid01-hostname', + 'user-data': userdata, + 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} + + my_d = os.path.join(self.tmp, "valid") + populate_dir(my_d, data) + + ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) + + self.assertEqual(userdata, ud) + for key in ('instance-id', 'local-hostname'): + self.assertEqual(data["meta-data/" + key], md[key]) + + # verify that 'userdata' is not returned as part of the metadata + self.assertFalse(('user-data' in md)) + self.assertIsNone(vd) + + def test_seed_dir_valid_extra(self): + """Verify extra files do not affect seed_dir validity.""" + + userdata = b'valid-extra-userdata' + data = {'meta-data/instance-id': 'i-valid-extra', + 'meta-data/local-hostname': 'valid-extra-hostname', + 'user-data': userdata, 'foo': 'bar'} + + my_d = os.path.join(self.tmp, "valid_extra") + populate_dir(my_d, data) + + ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) + + self.assertEqual(userdata, ud) + for key in ('instance-id', 'local-hostname'): + self.assertEqual(data['meta-data/' + key], md[key]) + + # additional files should not just appear as keys in metadata atm + self.assertFalse(('foo' in md)) + + def test_seed_dir_invalid(self): + """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" + + valid = {'instance-id': 'i-instanceid', + 'local-hostname': 'test-hostname', 'user-data': ''} + + my_based = os.path.join(self.tmp, "valid_extra") + + # missing 'userdata' file + my_d = "%s-01" % my_based + invalid_data = copy(valid) + del invalid_data['local-hostname'] + populate_dir(my_d, invalid_data) + self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, + DataSourceMAAS.read_maas_seed_dir, my_d) + + # missing 'instance-id' + my_d = "%s-02" % my_based + invalid_data = copy(valid) + del invalid_data['instance-id'] + populate_dir(my_d, invalid_data) + self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, + DataSourceMAAS.read_maas_seed_dir, my_d) + + def test_seed_dir_none(self): + """Verify that empty seed_dir raises MAASSeedDirNone.""" + + my_d = os.path.join(self.tmp, "valid_empty") + self.assertRaises(DataSourceMAAS.MAASSeedDirNone, + DataSourceMAAS.read_maas_seed_dir, my_d) + + def test_seed_dir_missing(self): + """Verify that missing seed_dir raises MAASSeedDirNone.""" + self.assertRaises(DataSourceMAAS.MAASSeedDirNone, + DataSourceMAAS.read_maas_seed_dir, + os.path.join(self.tmp, "nonexistantdirectory")) + + def mock_read_maas_seed_url(self, data, seed, version="19991231"): + """mock up readurl to appear as a web server at seed has provided data. + return what read_maas_seed_url returns.""" + def my_readurl(*args, **kwargs): + if len(args): + url = args[0] + else: + url = kwargs['url'] + prefix = "%s/%s/" % (seed, version) + if not url.startswith(prefix): + raise ValueError("unexpected call %s" % url) + + short = url[len(prefix):] + if short not in data: + raise url_helper.UrlError("not found", code=404, url=url) + return url_helper.StringResponse(data[short]) + + # Now do the actual call of the code under test. + with mock.patch("cloudinit.url_helper.readurl") as mock_readurl: + mock_readurl.side_effect = my_readurl + return DataSourceMAAS.read_maas_seed_url(seed, version=version) + + def test_seed_url_valid(self): + """Verify that valid seed_url is read as such.""" + valid = { + 'meta-data/instance-id': 'i-instanceid', + 'meta-data/local-hostname': 'test-hostname', + 'meta-data/public-keys': 'test-hostname', + 'meta-data/vendor-data': b'my-vendordata', + 'user-data': b'foodata', + } + my_seed = "http://example.com/xmeta" + my_ver = "1999-99-99" + ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver) + + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual( + valid['meta-data/local-hostname'], md['local-hostname']) + self.assertEqual(valid['meta-data/public-keys'], md['public-keys']) + self.assertEqual(valid['user-data'], ud) + # vendor-data is yaml, which decodes a string + self.assertEqual(valid['meta-data/vendor-data'].decode(), vd) + + def test_seed_url_vendor_data_dict(self): + expected_vd = {'key1': 'value1'} + valid = { + 'meta-data/instance-id': 'i-instanceid', + 'meta-data/local-hostname': 'test-hostname', + 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), + } + _ud, md, vd = self.mock_read_maas_seed_url( + valid, "http://example.com/foo") + + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual(expected_vd, vd) + + +@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") +class TestGetOauthHelper(CiTestCase): + base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', + 'token_key': 'FAKE_TOKEN_KEY', + 'token_secret': 'FAKE_TOKEN_SECRET', + 'consumer_secret': None} + + def test_all_required(self, m_helper): + """Valid config as expected.""" + DataSourceMAAS.get_oauth_helper(self.base_cfg.copy()) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + def test_other_fields_not_passed_through(self, m_helper): + """Only relevant fields are passed through.""" + mycfg = self.base_cfg.copy() + mycfg['unrelated_field'] = 'unrelated' + DataSourceMAAS.get_oauth_helper(mycfg) + m_helper.assert_has_calls([mock.call(**self.base_cfg)]) + + +class TestGetIdHash(CiTestCase): + v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', + 'token_secret': 'TSEC'} + v1_id = ( + 'v1:' + '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') + + def test_v1_expected(self): + """Test v1 id generated as expected working behavior from config.""" + result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy()) + self.assertEqual(self.v1_id, result) + + def test_v1_extra_fields_are_ignored(self): + """Test v1 id ignores unused entries in config.""" + cfg = self.v1_cfg.copy() + cfg['consumer_secret'] = "BOO" + cfg['unrelated'] = "HI MOM" + result = DataSourceMAAS.get_id_from_ds_cfg(cfg) + self.assertEqual(self.v1_id, result) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_nocloud.py b/tests/unittests/sources/test_nocloud.py new file mode 100644 index 00000000..26f91054 --- /dev/null +++ b/tests/unittests/sources/test_nocloud.py @@ -0,0 +1,393 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import dmi +from cloudinit import helpers +from cloudinit.sources.DataSourceNoCloud import ( + DataSourceNoCloud as dsNoCloud, + _maybe_remove_top_network, + parse_cmdline_data) +from cloudinit import util +from tests.unittests.helpers import CiTestCase, populate_dir, mock, ExitStack + +import os +import textwrap +import yaml + + +@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd') +class TestNoCloudDataSource(CiTestCase): + + def setUp(self): + super(TestNoCloudDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + + self.cmdline = "root=TESTCMDLINE" + + self.mocks = ExitStack() + self.addCleanup(self.mocks.close) + + self.mocks.enter_context( + mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) + self.mocks.enter_context( + mock.patch.object(dmi, 'read_dmi_data', return_value=None)) + + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): + md = {'instance-id': 'IID', 'dsmode': 'local'} + ud = b"USER_DATA_HERE" + seed_dir = os.path.join(self.paths.seed_dir, "nocloud") + populate_dir(seed_dir, + {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, ud) + self.assertEqual(dsrc.metadata, md) + self.assertEqual(dsrc.platform_type, 'lxd') + self.assertEqual( + dsrc.subplatform, 'seed-dir (%s)' % seed_dir) + self.assertTrue(ret) + + def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): + """Non-lxd environments will list nocloud as the platform.""" + m_is_lxd.return_value = False + md = {'instance-id': 'IID', 'dsmode': 'local'} + seed_dir = os.path.join(self.paths.seed_dir, "nocloud") + populate_dir(seed_dir, + {'user-data': '', 'meta-data': yaml.safe_dump(md)}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.platform_type, 'nocloud') + self.assertEqual( + dsrc.subplatform, 'seed-dir (%s)' % seed_dir) + + def test_fs_label(self, m_is_lxd): + # find_devs_with should not be called ff fs_label is None + class PsuedoException(Exception): + pass + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=PsuedoException)) + + # by default, NoCloud should search for filesystems by label + sys_cfg = {'datasource': {'NoCloud': {}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertRaises(PsuedoException, dsrc.get_data) + + # but disabling searching should just end up with None found + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertFalse(ret) + + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + + def test_no_datasource_expected(self, m_is_lxd): + # no source should be found if no cmdline, config, and fs_label=None + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertFalse(dsrc.get_data()) + + def test_seed_in_config(self, m_is_lxd): + data = { + 'fs_label': None, + 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), + 'user-data': b"USER_DATA_RAW", + } + + sys_cfg = {'datasource': {'NoCloud': data}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + + def test_nocloud_seed_with_vendordata(self, m_is_lxd): + md = {'instance-id': 'IID', 'dsmode': 'local'} + ud = b"USER_DATA_HERE" + vd = b"THIS IS MY VENDOR_DATA" + + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': ud, 'meta-data': yaml.safe_dump(md), + 'vendor-data': vd}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, ud) + self.assertEqual(dsrc.metadata, md) + self.assertEqual(dsrc.vendordata_raw, vd) + self.assertTrue(ret) + + def test_nocloud_no_vendordata(self, m_is_lxd): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, b"ud") + self.assertFalse(dsrc.vendordata) + self.assertTrue(ret) + + def test_metadata_network_interfaces(self, m_is_lxd): + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + # very simple check just for the strings above + self.assertIn(gateway, str(dsrc.network_config)) + + def test_metadata_network_config(self, m_is_lxd): + # network-config needs to get into network_config + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + + def test_metadata_network_config_with_toplevel_network(self, m_is_lxd): + """network-config may have 'network' top level key.""" + netconf = {'config': 'disabled'} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump({'network': netconf}) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + + def test_metadata_network_config_over_interfaces(self, m_is_lxd): + # network-config should override meta-data/network-interfaces + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + self.assertNotIn(gateway, str(dsrc.network_config)) + + @mock.patch("cloudinit.util.blkid") + def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + self.mocks.enter_context( + mock.patch.object(util, 'is_FreeBSD', return_value=True)) + + def _mfind_devs_with_freebsd( + criteria=None, oformat='device', + tag=None, no_cache=False, path=None): + if not criteria: + return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] + if criteria.startswith("LABEL="): + return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] + elif criteria == "TYPE=vfat": + return ["/dev/msdosfs/foo"] + elif criteria == "TYPE=iso9660": + return ["/dev/iso9660/foo"] + return [] + + self.mocks.enter_context( + mock.patch.object( + util, 'find_devs_with_freebsd', + side_effect=_mfind_devs_with_freebsd)) + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc._get_devices('foo') + self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + fake_blkid.assert_not_called() + + +class TestParseCommandLineData(CiTestCase): + + def test_parse_cmdline_data_valid(self): + ds_id = "ds=nocloud" + pairs = ( + ("root=/dev/sda1 %(ds_id)s", {}), + ("%(ds_id)s; root=/dev/foo", {}), + ("%(ds_id)s", {}), + ("%(ds_id)s;", {}), + ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}), + ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost", + {'seedfrom': 'SEED', 'local-hostname': 'xhost'}), + ("%(ds_id)s;h=xhost", + {'local-hostname': 'xhost'}), + ("%(ds_id)s;h=xhost;i=IID", + {'local-hostname': 'xhost', 'instance-id': 'IID'}), + ) + + for (fmt, expected) in pairs: + fill = {} + cmdline = fmt % {'ds_id': ds_id} + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) + self.assertEqual(expected, fill) + self.assertTrue(ret) + + def test_parse_cmdline_data_none(self): + ds_id = "ds=foo" + cmdlines = ( + "root=/dev/sda1 ro", + "console=/dev/ttyS0 root=/dev/foo", + "", + "ds=foocloud", + "ds=foo-net", + "ds=nocloud;s=SEED", + ) + + for cmdline in cmdlines: + fill = {} + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) + self.assertEqual(fill, {}) + self.assertFalse(ret) + + +class TestMaybeRemoveToplevelNetwork(CiTestCase): + """test _maybe_remove_top_network function.""" + basecfg = [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}] + + def test_should_remove_safely(self): + mcfg = {'config': self.basecfg, 'version': 1} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + def test_no_remove_if_other_keys(self): + """should not shift if other keys at top level.""" + mcfg = {'network': {'config': self.basecfg, 'version': 1}, + 'unknown_keyname': 'keyval'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_non_dict(self): + """should not shift if not a dict.""" + mcfg = {'network': '"content here'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_missing_config_or_version(self): + """should not shift unless network entry has config and version.""" + mcfg = {'network': {'config': self.basecfg}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + mcfg = {'network': {'version': 1}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_remove_with_config_disabled(self): + """network/config=disabled should be shifted.""" + mcfg = {'config': 'disabled'} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py new file mode 100644 index 00000000..e5963f5a --- /dev/null +++ b/tests/unittests/sources/test_opennebula.py @@ -0,0 +1,977 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import helpers +from cloudinit.sources import DataSourceOpenNebula as ds +from cloudinit import util +from tests.unittests.helpers import mock, populate_dir, CiTestCase + +import os +import pwd +import unittest + +import pytest + + +TEST_VARS = { + 'VAR1': 'single', + 'VAR2': 'double word', + 'VAR3': 'multi\nline\n', + 'VAR4': "'single'", + 'VAR5': "'double word'", + 'VAR6': "'multi\nline\n'", + 'VAR7': 'single\\t', + 'VAR8': 'double\\tword', + 'VAR9': 'multi\\t\nline\n', + 'VAR10': '\\', # expect '\' + 'VAR11': '\'', # expect ' + 'VAR12': '$', # expect $ +} + +INVALID_CONTEXT = ';' +USER_DATA = '#cloud-config\napt_upgrade: true' +SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' +HOSTNAME = 'foo.example.com' +PUBLIC_IP = '10.0.0.3' +MACADDR = '02:00:0a:12:01:01' +IP_BY_MACADDR = '10.18.1.1' +IP4_PREFIX = '24' +IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba' +IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba' +IP6_GW = '2001:db8:1::ffff' +IP6_PREFIX = '48' + +DS_PATH = "cloudinit.sources.DataSourceOpenNebula" + + +class TestOpenNebulaDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def setUp(self): + super(TestOpenNebulaDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp}) + + # defaults for few tests + self.ds = ds.DataSourceOpenNebula + self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula") + self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}} + + # we don't want 'sudo' called in tests. so we patch switch_user_cmd + def my_switch_user_cmd(user): + self.parsed_user = user + return [] + + self.switch_user_cmd_real = ds.switch_user_cmd + ds.switch_user_cmd = my_switch_user_cmd + + def tearDown(self): + ds.switch_user_cmd = self.switch_user_cmd_real + super(TestOpenNebulaDataSource, self).tearDown() + + def test_get_data_non_contextdisk(self): + orig_find_devs_with = util.find_devs_with + try: + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertFalse(ret) + finally: + util.find_devs_with = orig_find_devs_with + + def test_get_data_broken_contextdisk(self): + orig_find_devs_with = util.find_devs_with + try: + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) + dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) + self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) + finally: + util.find_devs_with = orig_find_devs_with + + def test_get_data_invalid_identity(self): + orig_find_devs_with = util.find_devs_with + try: + # generate non-existing system user name + sys_cfg = self.sys_cfg + invalid_user = 'invalid' + while not sys_cfg['datasource']['OpenNebula'].get('parseuser'): + try: + pwd.getpwnam(invalid_user) + invalid_user += 'X' + except KeyError: + sys_cfg['datasource']['OpenNebula']['parseuser'] = \ + invalid_user + + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) + dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) + finally: + util.find_devs_with = orig_find_devs_with + + def test_get_data(self): + orig_find_devs_with = util.find_devs_with + try: + # dont' try to lookup for CDs + util.find_devs_with = lambda n: [] + populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) + dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + finally: + util.find_devs_with = orig_find_devs_with + self.assertEqual('opennebula', dsrc.cloud_name) + self.assertEqual('opennebula', dsrc.platform_type) + self.assertEqual( + 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform) + + def test_seed_dir_non_contextdisk(self): + self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir, + self.seed_dir, mock.Mock()) + + def test_seed_dir_empty1_context(self): + populate_dir(self.seed_dir, {'context.sh': ''}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertIsNone(results['userdata']) + self.assertEqual(results['metadata'], {}) + + def test_seed_dir_empty2_context(self): + populate_context_dir(self.seed_dir, {}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertIsNone(results['userdata']) + self.assertEqual(results['metadata'], {}) + + def test_seed_dir_broken_context(self): + populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) + + self.assertRaises(ds.BrokenContextDiskDir, + ds.read_context_disk_dir, + self.seed_dir, mock.Mock()) + + def test_context_parser(self): + populate_context_dir(self.seed_dir, TEST_VARS) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('metadata' in results) + self.assertEqual(TEST_VARS, results['metadata']) + + def test_ssh_key(self): + public_keys = ['first key', 'second key'] + for c in range(4): + for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'): + my_d = os.path.join(self.tmp, "%s-%i" % (k, c)) + populate_context_dir(my_d, {k: '\n'.join(public_keys)}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('metadata' in results) + self.assertTrue('public-keys' in results['metadata']) + self.assertEqual(public_keys, + results['metadata']['public-keys']) + + public_keys.append(SSH_KEY % (c + 1,)) + + def test_user_data_plain(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: USER_DATA, + 'USERDATA_ENCODING': ''}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + def test_user_data_encoding_required_for_decode(self): + b64userdata = util.b64e(USER_DATA) + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: b64userdata}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('userdata' in results) + self.assertEqual(b64userdata, results['userdata']) + + def test_user_data_base64_encoding(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: util.b64e(USER_DATA), + 'USERDATA_ENCODING': 'base64'}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_hostname(self, m_get_phys_by_mac): + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', + 'ETH0_IP'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: PUBLIC_IP}) + results = ds.read_context_disk_dir(my_d, mock.Mock()) + + self.assertTrue('metadata' in results) + self.assertTrue('local-hostname' in results['metadata']) + self.assertEqual( + PUBLIC_IP, results['metadata']['local-hostname']) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_network_interfaces(self, m_get_phys_by_mac): + for dev in ('eth0', 'ens3'): + m_get_phys_by_mac.return_value = {MACADDR: dev} + + # without ETH0_MAC + # for Older OpenNebula? + populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP and ETH0_MAC + populate_context_dir( + self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP with empty string and ETH0_MAC + # in the case of using Virtual Network contains + # "AR = [ TYPE = ETHER ]" + populate_context_dir( + self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_MASK + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '255.255.0.0' + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/16' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_MASK with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_MAC': MACADDR, + 'ETH0_MASK': '' + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_GLOBAL + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6_ULA + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_ULA + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_GLOBAL + '/' + IP6_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_GLOBAL + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + def test_find_candidates(self): + def my_devs_with(criteria): + return { + "LABEL=CONTEXT": ["/dev/sdb"], + "LABEL=CDROM": ["/dev/sr0"], + "TYPE=iso9660": ["/dev/vdb"], + }.get(criteria, []) + + orig_find_devs_with = util.find_devs_with + try: + util.find_devs_with = my_devs_with + self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"], + ds.find_candidate_devs()) + finally: + util.find_devs_with = orig_find_devs_with + + +@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={})) +class TestOpenNebulaNetwork(unittest.TestCase): + + system_nics = ('eth0', 'ens3') + + def test_context_devname(self): + """Verify context_devname correctly returns mac and name.""" + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH1_MAC': '02:00:0a:12:0f:0f', } + expected = { + '02:00:0a:12:01:01': 'ETH0', + '02:00:0a:12:0f:0f': 'ETH1', } + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(expected, net.context_devname) + + def test_get_nameservers(self): + """ + Verify get_nameservers('device') correctly returns DNS server addresses + and search domains. + """ + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + expected = { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_nameservers('eth0') + self.assertEqual(expected, val) + + def test_get_mtu(self): + """Verify get_mtu('device') correctly returns MTU size.""" + context = {'ETH0_MTU': '1280'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_mtu('eth0') + self.assertEqual('1280', val) + + def test_get_ip(self): + """Verify get_ip('device') correctly returns IPv4 address.""" + context = {'ETH0_IP': PUBLIC_IP} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(PUBLIC_IP, val) + + def test_get_ip_emptystring(self): + """ + Verify get_ip('device') correctly returns IPv4 address. + It returns IP address created by MAC address if ETH0_IP has empty + string. + """ + context = {'ETH0_IP': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip('eth0', MACADDR) + self.assertEqual(IP_BY_MACADDR, val) + + def test_get_ip6(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': '', } + expected = [IP6_GLOBAL] + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_ula(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 address is Given by ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_ULA] + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_dual(self): + """ + Verify get_ip6('device') correctly returns IPv6 address. + In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. + """ + context = { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_ULA': IP6_ULA, } + expected = [IP6_GLOBAL, IP6_ULA] + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6('eth0') + self.assertEqual(expected, val) + + def test_get_ip6_prefix(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6_prefix('eth0') + self.assertEqual(IP6_PREFIX, val) + + def test_get_ip6_prefix_emptystring(self): + """ + Verify get_ip6_prefix('device') correctly returns IPv6 prefix. + It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty + string. + """ + context = {'ETH0_IP6_PREFIX_LENGTH': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_ip6_prefix('eth0') + self.assertEqual('64', val) + + def test_get_gateway(self): + """ + Verify get_gateway('device') correctly returns IPv4 default gateway + address. + """ + context = {'ETH0_GATEWAY': '1.2.3.5'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_gateway('eth0') + self.assertEqual('1.2.3.5', val) + + def test_get_gateway6(self): + """ + Verify get_gateway6('device') correctly returns IPv6 default gateway + address. + """ + for k in ('GATEWAY6', 'IP6_GATEWAY'): + context = {'ETH0_' + k: IP6_GW} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_gateway6('eth0') + self.assertEqual(IP6_GW, val) + + def test_get_mask(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + """ + context = {'ETH0_MASK': '255.255.0.0'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_mask('eth0') + self.assertEqual('255.255.0.0', val) + + def test_get_mask_emptystring(self): + """ + Verify get_mask('device') correctly returns IPv4 subnet mask. + It returns default value '255.255.255.0' if ETH0_MASK has empty string. + """ + context = {'ETH0_MASK': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_mask('eth0') + self.assertEqual('255.255.255.0', val) + + def test_get_network(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + """ + context = {'ETH0_NETWORK': '1.2.3.0'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_network('eth0', MACADDR) + self.assertEqual('1.2.3.0', val) + + def test_get_network_emptystring(self): + """ + Verify get_network('device') correctly returns IPv4 network address. + It returns network address created by MAC address if ETH0_NETWORK has + empty string. + """ + context = {'ETH0_NETWORK': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_network('eth0', MACADDR) + self.assertEqual('10.18.1.0', val) + + def test_get_field(self): + """ + Verify get_field('device', 'name') returns *context* value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue(self): + """ + Verify get_field('device', 'name', 'default value') returns *context* + value. + """ + context = {'ETH9_DUMMY': 'DUMMY_VALUE'} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DUMMY_VALUE', val) + + def test_get_field_withdefaultvalue_emptycontext(self): + """ + Verify get_field('device', 'name', 'default value') returns *default* + value if context value is empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') + self.assertEqual('DEFAULT_VALUE', val) + + def test_get_field_emptycontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + empty string. + """ + context = {'ETH9_DUMMY': ''} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + def test_get_field_nonecontext(self): + """ + Verify get_field('device', 'name') returns None if context value is + None. + """ + context = {'ETH9_DUMMY': None} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_field('eth9', 'dummy') + self.assertEqual(None, val) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway(self, m_get_phys_by_mac): + """Test rendering with/without IPv4 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY': '1.2.3.5', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway4': '1.2.3.5', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_gateway6(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 gateway""" + self.maxDiff = None + # empty ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_GATEWAY6 + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_GATEWAY6': IP6_GW, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'gateway6': IP6_GW, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_ipv6address(self, m_get_phys_by_mac): + """Test rendering with/without IPv6 address""" + self.maxDiff = None + # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': '', + 'ETH0_IP6_ULA': '', + 'ETH0_IP6_PREFIX_LENGTH': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/' + IP4_PREFIX, + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_dns(self, m_get_phys_by_mac): + """Test rendering with/without DNS server, search domain""" + self.maxDiff = None + # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '', + 'ETH0_DNS': '', + 'ETH0_SEARCH_DOMAIN': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_gen_conf_mtu(self, m_get_phys_by_mac): + """Test rendering with/without MTU""" + self.maxDiff = None + # empty ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + # set ETH0_MTU + context = { + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MTU': '1280', } + for nic in self.system_nics: + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'mtu': '1280', + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_eth0(self, m_get_phys_by_mac): + for nic in self.system_nics: + m_get_phys_by_mac.return_value = {MACADDR: nic} + net = ds.OpenNebulaNetwork({}, mock.Mock()) + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + + self.assertEqual(net.gen_conf(), expected) + + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_distro_passed_through(self, m_get_physical_nics_by_mac): + ds.OpenNebulaNetwork({}, mock.sentinel.distro) + self.assertEqual( + [mock.call(mock.sentinel.distro)], + m_get_physical_nics_by_mac.call_args_list, + ) + + def test_eth0_override(self): + self.maxDiff = None + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': '', + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6': '', + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_IP6_ULA': '', + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': '', + } + for nic in self.system_nics: + net = ds.OpenNebulaNetwork(context, mock.Mock(), + system_nics_by_mac={MACADDR: nic}) + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/16'], + 'gateway4': '1.2.3.5', + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} + + self.assertEqual(expected, net.gen_conf()) + + def test_eth0_v4v6_override(self): + self.maxDiff = None + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': IP6_GW, + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '1280', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', + } + for nic in self.system_nics: + net = ds.OpenNebulaNetwork(context, mock.Mock(), + system_nics_by_mac={MACADDR: nic}) + + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/16', + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX], + 'gateway4': '1.2.3.5', + 'gateway6': IP6_GW, + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'mtu': '1280'}}} + + self.assertEqual(expected, net.gen_conf()) + + def test_multiple_nics(self): + """Test rendering multiple nics with names that differ from context.""" + self.maxDiff = None + MAC_1 = "02:00:0a:12:01:01" + MAC_2 = "02:00:0a:12:01:02" + context = { + 'DNS': '1.2.3.8', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': IP6_GW, + 'ETH0_IP': '10.18.1.1', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': MAC_2, + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '1280', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': 'example.com', + 'ETH3_DNS': '10.3.1.2', + 'ETH3_GATEWAY': '10.3.0.1', + 'ETH3_GATEWAY6': '', + 'ETH3_IP': '10.3.1.3', + 'ETH3_IP6': '', + 'ETH3_IP6_PREFIX_LENGTH': '', + 'ETH3_IP6_ULA': '', + 'ETH3_MAC': MAC_1, + 'ETH3_MASK': '255.255.0.0', + 'ETH3_MTU': '', + 'ETH3_NETWORK': '10.3.0.0', + 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org', + } + net = ds.OpenNebulaNetwork( + context, + mock.Mock(), + system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'} + ) + + expected = { + 'version': 2, + 'ethernets': { + 'enp1s2': { + 'match': {'macaddress': MAC_2}, + 'addresses': [ + '10.18.1.1/16', + IP6_GLOBAL + '/64', + IP6_ULA + '/64'], + 'gateway4': '1.2.3.5', + 'gateway6': IP6_GW, + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com']}, + 'mtu': '1280'}, + 'enp0s25': { + 'match': {'macaddress': MAC_1}, + 'addresses': ['10.3.1.3/16'], + 'gateway4': '10.3.0.1', + 'nameservers': { + 'addresses': ['10.3.1.2', '1.2.3.8'], + 'search': [ + 'third.example.com', + 'third.example.org']}}}} + + self.assertEqual(expected, net.gen_conf()) + + +class TestParseShellConfig: + @pytest.mark.allow_subp_for("bash") + def test_no_seconds(self): + cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"]) + # we could test 'sleep 2', but that would make the test run slower. + ret = ds.parse_shell_config(cfg) + assert ret == {"foo": "bar", "xx": "foo"} + + +class TestGetPhysicalNicsByMac: + @pytest.mark.parametrize( + "interfaces_by_mac,physical_devs,expected_return", + [ + # No interfaces => empty return + ({}, [], {}), + # Only virtual interface => empty return + ({"mac1": "virtual0"}, [], {}), + # Only physical interface => it is returned + ({"mac2": "physical0"}, ["physical0"], {"mac2": "physical0"}), + # Combination of physical and virtual => only physical returned + ( + {"mac3": "physical1", "mac4": "virtual1"}, + ["physical1"], + {"mac3": "physical1"}, + ), + ], + ) + def test(self, interfaces_by_mac, physical_devs, expected_return): + distro = mock.Mock() + distro.networking.is_physical.side_effect = ( + lambda devname: devname in physical_devs + ) + with mock.patch( + DS_PATH + ".net.get_interfaces_by_mac", + return_value=interfaces_by_mac, + ): + assert expected_return == ds.get_physical_nics_by_mac(distro) + + +def populate_context_dir(path, variables): + data = "# Context variables generated by OpenNebula\n" + for k, v in variables.items(): + data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) + populate_dir(path, {'context.sh': data}) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py new file mode 100644 index 00000000..0d6fb04a --- /dev/null +++ b/tests/unittests/sources/test_openstack.py @@ -0,0 +1,724 @@ +# Copyright (C) 2014 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import httpretty as hp +import json +import re +from io import StringIO +from urllib.parse import urlparse + +from tests.unittests import helpers as test_helpers + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET +from cloudinit.sources import DataSourceOpenStack as ds +from cloudinit.sources.helpers import openstack +from cloudinit import util + +BASE_URL = "http://169.254.169.254" +PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +EC2_META = { + 'ami-id': 'ami-00000001', + 'ami-launch-index': '0', + 'ami-manifest-path': 'FIXME', + 'hostname': 'sm-foo-test.novalocal', + 'instance-action': 'none', + 'instance-id': 'i-00000001', + 'instance-type': 'm1.tiny', + 'local-hostname': 'sm-foo-test.novalocal', + 'local-ipv4': '0.0.0.0', + 'public-hostname': 'sm-foo-test.novalocal', + 'public-ipv4': '0.0.0.1', + 'reservation-id': 'r-iru5qm4m', +} +USER_DATA = b'#!/bin/sh\necho This is user data\n' +VENDOR_DATA = { + 'magic': '', +} +VENDOR_DATA2 = { + 'static': {} +} +OSTACK_META = { + 'availability_zone': 'nova', + 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, + {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], + 'hostname': 'sm-foo-test.novalocal', + 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c', +} +CONTENT_0 = b'This is contents of /etc/foo.cfg\n' +CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' +OS_FILES = { + 'openstack/content/0000': CONTENT_0, + 'openstack/content/0001': CONTENT_1, + 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), + 'openstack/latest/network_data.json': json.dumps( + {'links': [], 'networks': [], 'services': []}), + 'openstack/latest/user_data': USER_DATA, + 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA), + 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2), +} +EC2_FILES = { + 'latest/user-data': USER_DATA, +} +EC2_VERSIONS = [ + 'latest', +] + +MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.' + + +# TODO _register_uris should leverage test_ec2.register_mock_metaserver. +def _register_uris(version, ec2_files, ec2_meta, os_files): + """Registers a set of url patterns into httpretty that will mimic the + same data returned by the openstack metadata service (and ec2 service).""" + + def match_ec2_url(uri, headers): + path = uri.path.strip("/") + if len(path) == 0: + return (200, headers, "\n".join(EC2_VERSIONS)) + path = uri.path.lstrip("/") + if path in ec2_files: + return (200, headers, ec2_files.get(path)) + if path == 'latest/meta-data/': + buf = StringIO() + for (k, v) in ec2_meta.items(): + if isinstance(v, (list, tuple)): + buf.write("%s/" % (k)) + else: + buf.write("%s" % (k)) + buf.write("\n") + return (200, headers, buf.getvalue()) + if path.startswith('latest/meta-data/'): + value = None + pieces = path.split("/") + if path.endswith("/"): + pieces = pieces[2:-1] + value = util.get_cfg_by_path(ec2_meta, pieces) + else: + pieces = pieces[2:] + value = util.get_cfg_by_path(ec2_meta, pieces) + if value is not None: + return (200, headers, str(value)) + return (404, headers, '') + + def match_os_uri(uri, headers): + path = uri.path.strip("/") + if path == 'openstack': + return (200, headers, "\n".join([openstack.OS_LATEST])) + path = uri.path.lstrip("/") + if path in os_files: + return (200, headers, os_files.get(path)) + return (404, headers, '') + + def get_request_callback(method, uri, headers): + uri = urlparse(uri) + path = uri.path.lstrip("/").split("/") + if path[0] == 'openstack': + return match_os_uri(uri, headers) + return match_ec2_url(uri, headers) + + hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), + body=get_request_callback) + + +def _read_metadata_service(): + return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1) + + +class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + + with_logs = True + VERSION = 'latest' + + def setUp(self): + super(TestOpenStackDataSource, self).setUp() + self.tmp = self.tmp_dir() + + def test_successful(self): + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + f = _read_metadata_service() + self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertEqual(2, len(f['files'])) + self.assertEqual(USER_DATA, f.get('userdata')) + self.assertEqual(EC2_META, f.get('ec2-metadata')) + self.assertEqual(2, f.get('version')) + metadata = f['metadata'] + self.assertEqual('nova', metadata.get('availability_zone')) + self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname')) + self.assertEqual('sm-foo-test.novalocal', + metadata.get('local-hostname')) + self.assertEqual('sm-foo-test', metadata.get('name')) + self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', + metadata.get('uuid')) + self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', + metadata.get('instance-id')) + + def test_no_ec2(self): + _register_uris(self.VERSION, {}, {}, OS_FILES) + f = _read_metadata_service() + self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertEqual(USER_DATA, f.get('userdata')) + self.assertEqual({}, f.get('ec2-metadata')) + self.assertEqual(2, f.get('version')) + + def test_bad_metadata(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(openstack.NonReadable, _read_metadata_service) + + def test_bad_uuid(self): + os_files = copy.deepcopy(OS_FILES) + os_meta = copy.deepcopy(OSTACK_META) + os_meta.pop('uuid') + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = json.dumps(os_meta) + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + def test_userdata_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('user_data'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = _read_metadata_service() + self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('userdata')) + + def test_vendordata_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = _read_metadata_service() + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('vendordata')) + + def test_vendordata2_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data2.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = _read_metadata_service() + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('vendordata2')) + + def test_vendordata_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + def test_vendordata2_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data2.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + def test_metadata_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_datasource(self, m_dhcp): + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertTrue(found) + self.assertEqual(2, ds_os.version) + md = dict(ds_os.metadata) + md.pop('instance-id', None) + md.pop('local-hostname', None) + self.assertEqual(OSTACK_META, md) + self.assertEqual(EC2_META, ds_os.ec2_metadata) + self.assertEqual(USER_DATA, ds_os.userdata_raw) + self.assertEqual(2, len(ds_os.files)) + self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) + self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure) + self.assertIsNone(ds_os.vendordata_raw) + m_dhcp.assert_not_called() + + @hp.activate + @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_local_datasource(self, m_dhcp, m_net): + """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os_local = ds.DataSourceOpenStackLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'broadcast-address': '192.168.2.255'}] + + self.assertIsNone(ds_os_local.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os_local.get_data() + self.assertTrue(found) + self.assertEqual(2, ds_os_local.version) + md = dict(ds_os_local.metadata) + md.pop('instance-id', None) + md.pop('local-hostname', None) + self.assertEqual(OSTACK_META, md) + self.assertEqual(EC2_META, ds_os_local.ec2_metadata) + self.assertEqual(USER_DATA, ds_os_local.userdata_raw) + self.assertEqual(2, len(ds_os_local.files)) + self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) + self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) + self.assertIsNone(ds_os_local.vendordata_raw) + m_dhcp.assert_called_with('eth9', None) + + def test_bad_datasource_meta(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({'run_dir': self.tmp})) + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + self.assertIn( + 'InvalidMetaDataException: Broken metadata address' + ' http://169.254.169.25', + self.logs.getvalue()) + + def test_no_datasource(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files.pop(k) + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = { + 'max_wait': 0, + 'timeout': 0, + } + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + + def test_network_config_disabled_by_datasource_config(self): + """The network_config can be disabled from datasource config.""" + mock_path = MOCK_PATH + 'openstack.convert_net_json' + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = {'apply_network_config': False} + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json # Ignore this content from metadata + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertIsNone(ds_os.network_config) + m_convert_json.assert_not_called() + + def test_network_config_from_network_json(self): + """The datasource gets network_config from network_data.json.""" + mock_path = MOCK_PATH + 'openstack.convert_net_json' + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json + with test_helpers.mock.patch(mock_path) as m_convert_json: + m_convert_json.return_value = example_cfg + self.assertEqual(example_cfg, ds_os.network_config) + self.assertIn( + 'network config provided via network_json', self.logs.getvalue()) + m_convert_json.assert_called_with(sample_json, known_macs=None) + + def test_network_config_cached(self): + """The datasource caches the network_config property.""" + mock_path = MOCK_PATH + 'openstack.convert_net_json' + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os._network_config = example_cfg + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertEqual(example_cfg, ds_os.network_config) + m_convert_json.assert_not_called() + + def test_disabled_datasource(self): + os_files = copy.deepcopy(OS_FILES) + os_meta = copy.deepcopy(OSTACK_META) + os_meta['meta'] = { + 'dsmode': 'disabled', + } + for k in list(os_files.keys()): + if k.endswith('meta_data.json'): + os_files[k] = json.dumps(os_meta) + _register_uris(self.VERSION, {}, {}, os_files) + ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, + None, + helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = { + 'max_wait': 0, + 'timeout': 0, + } + self.assertIsNone(ds_os.version) + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() + self.assertFalse(found) + self.assertIsNone(ds_os.version) + + @hp.activate + def test_wb__crawl_metadata_does_not_persist(self): + """_crawl_metadata returns current metadata and does not cache.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + crawled_data = ds_os._crawl_metadata() + self.assertEqual(UNSET, ds_os.ec2_metadata) + self.assertIsNone(ds_os.userdata_raw) + self.assertEqual(0, len(ds_os.files)) + self.assertIsNone(ds_os.vendordata_raw) + self.assertEqual( + ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', + 'userdata', 'vendordata', 'vendordata2', 'version'], + sorted(crawled_data.keys())) + self.assertEqual('local', crawled_data['dsmode']) + self.assertEqual(EC2_META, crawled_data['ec2-metadata']) + self.assertEqual(2, len(crawled_data['files'])) + md = copy.deepcopy(crawled_data['metadata']) + md.pop('instance-id') + md.pop('local-hostname') + self.assertEqual(OSTACK_META, md) + self.assertEqual( + json.loads(OS_FILES['openstack/latest/network_data.json']), + crawled_data['networkdata']) + self.assertEqual(USER_DATA, crawled_data['userdata']) + self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) + self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2']) + self.assertEqual(2, crawled_data['version']) + + +class TestVendorDataLoading(test_helpers.TestCase): + def cvj(self, data): + return convert_vendordata(data) + + def test_vd_load_none(self): + # non-existant vendor-data should return none + self.assertIsNone(self.cvj(None)) + + def test_vd_load_string(self): + self.assertEqual(self.cvj("foobar"), "foobar") + + def test_vd_load_list(self): + data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] + self.assertEqual(self.cvj(data), data) + + def test_vd_load_dict_no_ci(self): + self.assertIsNone(self.cvj({'foo': 'bar'})) + + def test_vd_load_dict_ci_dict(self): + self.assertRaises(ValueError, self.cvj, + {'foo': 'bar', 'cloud-init': {'x': 1}}) + + def test_vd_load_dict_ci_string(self): + data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} + self.assertEqual(self.cvj(data), data['cloud-init']) + + def test_vd_load_dict_ci_list(self): + data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} + self.assertEqual(self.cvj(data), data['cloud-init']) + + +@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86') +class TestDetectOpenStack(test_helpers.CiTestCase): + + def test_detect_openstack_non_intel_x86(self, m_is_x86): + """Return True on non-intel platforms because dmi isn't conclusive.""" + m_is_x86.return_value = False + self.assertTrue( + ds.detect_openstack(), 'Expected detect_openstack == True') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env, + m_is_x86): + """Return False on EC2 platforms.""" + m_is_x86.return_value = True + # No product_name in proc/1/environ + m_proc_env.return_value = {'HOME': '/'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on EC2 + if dmi_key == 'chassis-asset-tag': + return '' # Empty string on EC2 + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertFalse( + ds.detect_openstack(), 'Expected detect_openstack == False on EC2') + m_proc_env.assert_called_with(1) + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_intel_product_name_compute(self, m_dmi, + m_is_x86): + """Return True on OpenStack compute and nova instances.""" + m_is_x86.return_value = True + openstack_product_names = ['OpenStack Nova', 'OpenStack Compute'] + + for product_name in openstack_product_names: + m_dmi.return_value = product_name + self.assertTrue( + ds.detect_openstack(), 'Failed to detect_openstack') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud + if dmi_key == 'chassis-asset-tag': + return 'OpenTelekomCloud' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting SAP CCloud VM asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'VMware Virtual Platform' # SAP CCloud uses VMware + if dmi_key == 'chassis-asset-tag': + return 'SAP CCloud VM' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on SAP CCloud VM') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting Oracle cloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'Standard PC (i440FX + PIIX, 1996)' # No match + if dmi_key == 'chassis-asset-tag': + return 'OracleCloud.com' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(accept_oracle=True), + 'Expected detect_openstack == True on OracleCloud.com') + self.assertFalse( + ds.detect_openstack(accept_oracle=False), + 'Expected detect_openstack == False.') + + def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi, + m_is_x86, + chassis_tag): + """Return True on OpenStack reporting generic asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'Generic OpenStack Platform' + if dmi_key == 'chassis-asset-tag': + return chassis_tag + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on Generic OpenStack Platform') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, + m_is_x86): + self._test_detect_openstack_nova_compute_chassis_asset_tag( + m_dmi, m_is_x86, 'OpenStack Nova') + + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, + m_is_x86): + self._test_detect_openstack_nova_compute_chassis_asset_tag( + m_dmi, m_is_x86, 'OpenStack Compute') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') + def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, + m_is_x86): + """Return True when nova product_name specified in /proc/1/environ.""" + m_is_x86.return_value = True + # Nova product_name in proc/1/environ + m_proc_env.return_value = { + 'HOME': '/', 'product_name': 'OpenStack Nova'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' + if dmi_key == 'chassis-asset-tag': + return '' # Nothin 'openstackish' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + m_proc_env.assert_called_with(1) + + +class TestMetadataReader(test_helpers.HttprettyTestCase): + """Test the MetadataReader.""" + burl = 'http://169.254.169.254/' + md_base = { + 'availability_zone': 'myaz1', + 'hostname': 'sm-foo-test.novalocal', + "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], + 'launch_index': 0, + 'name': 'sm-foo-test', + 'public_keys': {'mykey': PUBKEY}, + 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', + 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} + + def register(self, path, body=None, status=200): + content = body if not isinstance(body, str) else body.encode('utf-8') + hp.register_uri( + hp.GET, self.burl + "openstack" + path, status=status, + body=content) + + def register_versions(self, versions): + self.register("", '\n'.join(versions)) + self.register("/", '\n'.join(versions)) + + def register_version(self, version, data): + content = '\n'.join(sorted(data.keys())) + self.register(version, content) + self.register(version + "/", content) + for path, content in data.items(): + self.register("/%s/%s" % (version, path), content) + self.register("/%s/%s" % (version, path), content) + if 'user_data' not in data: + self.register("/%s/user_data" % version, "nodata", status=404) + + def test__find_working_version(self): + """Test a working version ignores unsupported.""" + unsup = "2016-11-09" + self.register_versions( + [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, + openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LIBERTY, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test__find_working_version_uses_latest(self): + """'latest' should be used if no supported versions.""" + unsup1, unsup2 = ("2016-11-09", '2017-06-06') + self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) + self.assertEqual( + openstack.OS_LATEST, + openstack.MetadataReader(self.burl)._find_working_version()) + + def test_read_v2_os_ocata(self): + """Validate return value of read_v2 for os_ocata data.""" + md = copy.deepcopy(self.md_base) + md['devices'] = [] + network_data = {'links': [], 'networks': [], 'services': []} + vendor_data = {} + vendor_data2 = {"static": {}} + + data = { + 'meta_data.json': json.dumps(md), + 'network_data.json': json.dumps(network_data), + 'vendor_data.json': json.dumps(vendor_data), + 'vendor_data2.json': json.dumps(vendor_data2), + } + + self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) + self.register_version(openstack.OS_OCATA, data) + + mock_read_ec2 = test_helpers.mock.MagicMock( + return_value={'instance-id': 'unused-ec2'}) + expected_md = copy.deepcopy(md) + expected_md.update( + {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) + expected = { + 'userdata': '', # Annoying, no user-data results in empty string. + 'version': 2, + 'metadata': expected_md, + 'vendordata': vendor_data, + 'vendordata2': vendor_data2, + 'networkdata': network_data, + 'ec2-metadata': mock_read_ec2.return_value, + 'files': {}, + } + reader = openstack.MetadataReader(self.burl) + reader._read_ec2_metadata = mock_read_ec2 + self.assertEqual(expected, reader.read_v2()) + self.assertEqual(1, mock_read_ec2.call_count) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py new file mode 100644 index 00000000..2aab097c --- /dev/null +++ b/tests/unittests/sources/test_oracle.py @@ -0,0 +1,797 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import copy +import json +from contextlib import ExitStack +from unittest import mock + +import pytest + +from cloudinit.sources import DataSourceOracle as oracle +from cloudinit.sources import NetworkConfigSource +from cloudinit.sources.DataSourceOracle import OpcMetadata +from tests.unittests import helpers as test_helpers +from cloudinit.url_helper import UrlError + +DS_PATH = "cloudinit.sources.DataSourceOracle" + +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine +# with a secondary VNIC attached (vnicId truncated for Python line length) +OPC_BM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||", + "privateIp" : "10.0.0.8", + "vlanTag" : 0, + "macAddr" : "90:e2:ba:d4:f1:68", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24", + "nicIndex" : 0 +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||", + "privateIp" : "10.0.4.5", + "vlanTag" : 1, + "macAddr" : "02:00:17:05:CF:51", + "virtualRouterIp" : "10.0.4.1", + "subnetCidrBlock" : "10.0.4.0/24", + "nicIndex" : 0 +} ]""" + +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine +# with a secondary VNIC attached +OPC_VM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated", + "privateIp" : "10.0.0.230", + "vlanTag" : 1039, + "macAddr" : "02:00:17:05:D1:DB", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated", + "privateIp" : "10.0.0.231", + "vlanTag" : 1041, + "macAddr" : "00:00:17:02:2B:B1", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +} ]""" + + +# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then +# truncated for line length) +OPC_V2_METADATA = """\ +{ + "availabilityDomain" : "qIZq:PHX-AD-1", + "faultDomain" : "FAULT-DOMAIN-2", + "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED", + "displayName" : "instance-20200320-1400", + "hostname" : "instance-20200320-1400", + "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED", + "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED", + "metadata" : { + "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated", + "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v" + }, + "region" : "phx", + "canonicalRegionName" : "us-phoenix-1", + "ociAdName" : "phx-ad-3", + "shape" : "VM.Standard2.1", + "state" : "Running", + "timeCreated" : 1584727285318, + "agentConfig" : { + "monitoringDisabled" : true, + "managementDisabled" : true + } +}""" + +# Just a small meaningless change to differentiate the two metadatas +OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance") + + +@pytest.fixture +def metadata_version(): + return 2 + + +@pytest.yield_fixture +def oracle_ds(request, fixture_utils, paths, metadata_version): + """ + Return an instantiated DataSourceOracle. + + This also performs the mocking required for the default test case: + * ``_read_system_uuid`` returns something, + * ``_is_platform_viable`` returns True, + * ``_is_iscsi_root`` returns True (the simpler code path), + * ``read_opc_metadata`` returns ``OPC_V1_METADATA`` + + (This uses the paths fixture for the required helpers.Paths object, and the + fixture_utils fixture for fetching markers.) + """ + sys_cfg = fixture_utils.closest_marker_first_arg_or( + request, "ds_sys_cfg", mock.MagicMock() + ) + metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None) + with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"): + with mock.patch(DS_PATH + "._is_platform_viable", return_value=True): + with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True): + with mock.patch( + DS_PATH + ".read_opc_metadata", + return_value=metadata, + ): + yield oracle.DataSourceOracle( + sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths, + ) + + +class TestDataSourceOracle: + def test_platform_info(self, oracle_ds): + assert "oracle" == oracle_ds.cloud_name + assert "oracle" == oracle_ds.platform_type + + def test_subplatform_before_fetch(self, oracle_ds): + assert 'unknown' == oracle_ds.subplatform + + def test_platform_info_after_fetch(self, oracle_ds): + oracle_ds._get_data() + assert 'metadata (http://169.254.169.254/opc/v2/)' == \ + oracle_ds.subplatform + + @pytest.mark.parametrize('metadata_version', [1]) + def test_v1_platform_info_after_fetch(self, oracle_ds): + oracle_ds._get_data() + assert 'metadata (http://169.254.169.254/opc/v1/)' == \ + oracle_ds.subplatform + + def test_secondary_nics_disabled_by_default(self, oracle_ds): + assert not oracle_ds.ds_cfg["configure_secondary_nics"] + + @pytest.mark.ds_sys_cfg( + {"datasource": {"Oracle": {"configure_secondary_nics": True}}} + ) + def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds): + assert oracle_ds.ds_cfg["configure_secondary_nics"] + + +class TestIsPlatformViable(test_helpers.CiTestCase): + @mock.patch(DS_PATH + ".dmi.read_dmi_data", + return_value=oracle.CHASSIS_ASSET_TAG) + def test_expected_viable(self, m_read_dmi_data): + """System with known chassis tag is viable.""" + self.assertTrue(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None) + def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data): + """System without known chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs") + def test_expected_not_viable_other(self, m_read_dmi_data): + """System with unnown chassis tag is not viable.""" + self.assertFalse(oracle._is_platform_viable()) + m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) + + +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) +class TestNetworkConfigFromOpcImds: + def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): + oracle_ds._vnics_data = [{}] + # We test this by using in a non-dict to ensure that no dict + # operations are used; failure would be seen as exceptions + oracle_ds._network_config = object() + oracle_ds._add_network_config_from_opc_imds() + + def test_bare_metal_machine_skipped(self, oracle_ds, caplog): + # nicIndex in the first entry indicates a bare metal machine + oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE) + # We test this by using a non-dict to ensure that no dict + # operations are used + oracle_ds._network_config = object() + oracle_ds._add_network_config_from_opc_imds() + assert 'bare metal machine' in caplog.text + + def test_missing_mac_skipped(self, oracle_ds, caplog): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + + oracle_ds._network_config = { + 'version': 1, 'config': [{'primary': 'nic'}] + } + with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): + oracle_ds._add_network_config_from_opc_imds() + + assert 1 == len(oracle_ds.network_config['config']) + assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ + caplog.text + + def test_missing_mac_skipped_v2(self, oracle_ds, caplog): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + + oracle_ds._network_config = { + 'version': 2, 'ethernets': {'primary': {'nic': {}}} + } + with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}): + oracle_ds._add_network_config_from_opc_imds() + + assert 1 == len(oracle_ds.network_config['ethernets']) + assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \ + caplog.text + + def test_secondary_nic(self, oracle_ds): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + oracle_ds._network_config = { + 'version': 1, 'config': [{'primary': 'nic'}] + } + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + with mock.patch(DS_PATH + ".get_interfaces_by_mac", + return_value={mac_addr: nic_name}): + oracle_ds._add_network_config_from_opc_imds() + + # The input is mutated + assert 2 == len(oracle_ds.network_config['config']) + + secondary_nic_cfg = oracle_ds.network_config['config'][1] + assert nic_name == secondary_nic_cfg['name'] + assert 'physical' == secondary_nic_cfg['type'] + assert mac_addr == secondary_nic_cfg['mac_address'] + assert 9000 == secondary_nic_cfg['mtu'] + + assert 1 == len(secondary_nic_cfg['subnets']) + subnet_cfg = secondary_nic_cfg['subnets'][0] + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + assert '10.0.0.231' == subnet_cfg['address'] + + def test_secondary_nic_v2(self, oracle_ds): + oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE) + oracle_ds._network_config = { + 'version': 2, 'ethernets': {'primary': {'nic': {}}} + } + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + with mock.patch(DS_PATH + ".get_interfaces_by_mac", + return_value={mac_addr: nic_name}): + oracle_ds._add_network_config_from_opc_imds() + + # The input is mutated + assert 2 == len(oracle_ds.network_config['ethernets']) + + secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3'] + assert secondary_nic_cfg['dhcp4'] is False + assert secondary_nic_cfg['dhcp6'] is False + assert mac_addr == secondary_nic_cfg['match']['macaddress'] + assert 9000 == secondary_nic_cfg['mtu'] + + assert 1 == len(secondary_nic_cfg['addresses']) + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + assert '10.0.0.231' == secondary_nic_cfg['addresses'][0] + + +class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase): + + def setUp(self): + super(TestNetworkConfigFiltersNetFailover, self).setUp() + self.add_patch(DS_PATH + '.get_interfaces_by_mac', + 'm_get_interfaces_by_mac') + self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master') + + def test_ignore_bogus_network_config(self): + netcfg = {'something': 'here'} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_ignore_network_config_unknown_versions(self): + netcfg = {'something': 'here', 'version': 3} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_checks_v1_type_physical_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_checks_v1_skips_non_phys_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v1(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master, + 'mac_address': mac_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + expected_cfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + oracle._ensure_netfailover_safe(netcfg) + self.assertEqual(expected_cfg, netcfg) + + def test_checks_v2_type_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'ethernets': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_skips_v2_non_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'wifis': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v2(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 2, 'ethernets': { + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + nic_master: {'dhcp4': True, 'set-name': nic_master, + 'match': {'macaddress': mac_master}}, + }} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + + expected_cfg = {'version': 2, 'ethernets': { + nic_master: {'dhcp4': True, 'match': {'name': nic_master}}, + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + }} + oracle._ensure_netfailover_safe(netcfg) + import pprint + pprint.pprint(netcfg) + print('---- ^^ modified ^^ ---- vv original vv ----') + pprint.pprint(expected_cfg) + self.assertEqual(expected_cfg, netcfg) + + +def _mock_v2_urls(httpretty): + def instance_callback(request, uri, response_headers): + print(response_headers) + assert request.headers.get("Authorization") == "Bearer Oracle" + return [200, response_headers, OPC_V2_METADATA] + + def vnics_callback(request, uri, response_headers): + assert request.headers.get("Authorization") == "Bearer Oracle" + return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE] + + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/instance/", + body=instance_callback + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/vnics/", + body=vnics_callback + ) + + +def _mock_no_v2_urls(httpretty): + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/instance/", + status=404, + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v1/instance/", + body=OPC_V1_METADATA + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v1/vnics/", + body=OPC_BM_SECONDARY_VNIC_RESPONSE + ) + + +class TestReadOpcMetadata: + # See https://docs.pytest.org/en/stable/example + # /parametrize.html#parametrizing-conditional-raising + does_not_raise = ExitStack + + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @pytest.mark.parametrize( + 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [ + (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True, + json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), + (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None), + (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True, + json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)), + (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None), + ] + ) + def test_metadata_returned( + self, version, setup_urls, instance_data, + fetch_vnics, vnics_data, httpretty + ): + setup_urls(httpretty) + metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics) + + assert version == metadata.version + assert instance_data == metadata.instance_data + assert vnics_data == metadata.vnics_data + + # No need to actually wait between retries in the tests + @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None) + @pytest.mark.parametrize( + "v2_failure_count,v1_failure_count,expected_body,expectation", + [ + (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()), + (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()), + (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()), + (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()), + (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()), + (3, 3, None, pytest.raises(UrlError)), + ] + ) + def test_retries(self, v2_failure_count, v1_failure_count, + expected_body, expectation, httpretty): + v2_responses = [httpretty.Response("", status=404)] * v2_failure_count + v2_responses.append(httpretty.Response(OPC_V2_METADATA)) + v1_responses = [httpretty.Response("", status=404)] * v1_failure_count + v1_responses.append(httpretty.Response(OPC_V1_METADATA)) + + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v1/instance/", + responses=v1_responses, + ) + httpretty.register_uri( + httpretty.GET, + "http://169.254.169.254/opc/v2/instance/", + responses=v2_responses, + ) + with expectation: + assert expected_body == oracle.read_opc_metadata().instance_data + + +class TestCommon_GetDataBehaviour: + """This test class tests behaviour common to iSCSI and non-iSCSI root. + + It defines a fixture, parameterized_oracle_ds, which is used in all the + tests herein to test that the commonly expected behaviour is the same with + iSCSI root and without. + + (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this + class is implicitly also testing all iSCSI root behaviour so there is no + separate class for that case.) + """ + + @pytest.yield_fixture(params=[True, False]) + def parameterized_oracle_ds(self, request, oracle_ds): + """oracle_ds parameterized for iSCSI and non-iSCSI root respectively""" + is_iscsi_root = request.param + with ExitStack() as stack: + stack.enter_context( + mock.patch( + DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root + ) + ) + if not is_iscsi_root: + stack.enter_context( + mock.patch(DS_PATH + ".net.find_fallback_nic") + ) + stack.enter_context( + mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4") + ) + yield oracle_ds + + @mock.patch( + DS_PATH + "._is_platform_viable", mock.Mock(return_value=False) + ) + def test_false_if_platform_not_viable( + self, parameterized_oracle_ds, + ): + assert not parameterized_oracle_ds._get_data() + + @pytest.mark.parametrize( + "keyname,expected_value", + ( + ("availability-zone", "phx-ad-3"), + ("launch-index", 0), + ("local-hostname", "instance-20200320-1400"), + ( + "instance-id", + "ocid1.instance.oc1.phx" + ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED", + ), + ("name", "instance-20200320-1400"), + ( + "public_keys", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated", + ), + ), + ) + def test_metadata_keys_set_correctly( + self, keyname, expected_value, parameterized_oracle_ds, + ): + assert parameterized_oracle_ds._get_data() + assert expected_value == parameterized_oracle_ds.metadata[keyname] + + @pytest.mark.parametrize( + "attribute_name,expected_value", + [ + ("_crawled_metadata", json.loads(OPC_V2_METADATA)), + ( + "userdata_raw", + base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"), + ), + ("system_uuid", "my-test-uuid"), + ], + ) + @mock.patch( + DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid") + ) + def test_attributes_set_correctly( + self, attribute_name, expected_value, parameterized_oracle_ds, + ): + assert parameterized_oracle_ds._get_data() + assert expected_value == getattr( + parameterized_oracle_ds, attribute_name + ) + + @pytest.mark.parametrize( + "ssh_keys,expected_value", + [ + # No SSH keys in metadata => no keys detected + (None, []), + # Empty SSH keys in metadata => no keys detected + ("", []), + # Single SSH key in metadata => single key detected + ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]), + # Multiple SSH keys in metadata => multiple keys detected + ( + "ssh-rsa ... test@test\nssh-rsa ... test2@test2", + ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"], + ), + ], + ) + def test_public_keys_handled_correctly( + self, ssh_keys, expected_value, parameterized_oracle_ds + ): + instance_data = json.loads(OPC_V1_METADATA) + if ssh_keys is None: + del instance_data["metadata"]["ssh_authorized_keys"] + else: + instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys + metadata = OpcMetadata(None, instance_data, None) + with mock.patch( + DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + ): + assert parameterized_oracle_ds._get_data() + assert ( + expected_value == parameterized_oracle_ds.get_public_ssh_keys() + ) + + def test_missing_user_data_handled_gracefully( + self, parameterized_oracle_ds + ): + instance_data = json.loads(OPC_V1_METADATA) + del instance_data["metadata"]["user_data"] + metadata = OpcMetadata(None, instance_data, None) + with mock.patch( + DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + ): + assert parameterized_oracle_ds._get_data() + + assert parameterized_oracle_ds.userdata_raw is None + + def test_missing_metadata_handled_gracefully( + self, parameterized_oracle_ds + ): + instance_data = json.loads(OPC_V1_METADATA) + del instance_data["metadata"] + metadata = OpcMetadata(None, instance_data, None) + with mock.patch( + DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata), + ): + assert parameterized_oracle_ds._get_data() + + assert parameterized_oracle_ds.userdata_raw is None + assert [] == parameterized_oracle_ds.get_public_ssh_keys() + + +@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False) +class TestNonIscsiRoot_GetDataBehaviour: + @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4") + @mock.patch(DS_PATH + ".net.find_fallback_nic") + def test_read_opc_metadata_called_with_ephemeral_dhcp( + self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds + ): + in_context_manager = False + + def enter_context_manager(): + nonlocal in_context_manager + in_context_manager = True + + def exit_context_manager(*args): + nonlocal in_context_manager + in_context_manager = False + + m_EphemeralDHCPv4.return_value.__enter__.side_effect = ( + enter_context_manager + ) + m_EphemeralDHCPv4.return_value.__exit__.side_effect = ( + exit_context_manager + ) + + def assert_in_context_manager(**kwargs): + assert in_context_manager + return mock.MagicMock() + + with mock.patch( + DS_PATH + ".read_opc_metadata", + mock.Mock(side_effect=assert_in_context_manager), + ): + assert oracle_ds._get_data() + + assert [ + mock.call( + iface=m_find_fallback_nic.return_value, + connectivity_url_data={ + 'headers': { + 'Authorization': 'Bearer Oracle' + }, + 'url': 'http://169.254.169.254/opc/v2/instance/' + } + ) + ] == m_EphemeralDHCPv4.call_args_list + + +@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {}) +@mock.patch(DS_PATH + ".cmdline.read_initramfs_config") +class TestNetworkConfig: + def test_network_config_cached(self, m_read_initramfs_config, oracle_ds): + """.network_config should be cached""" + assert 0 == m_read_initramfs_config.call_count + oracle_ds.network_config # pylint: disable=pointless-statement + assert 1 == m_read_initramfs_config.call_count + oracle_ds.network_config # pylint: disable=pointless-statement + assert 1 == m_read_initramfs_config.call_count + + def test_network_cmdline(self, m_read_initramfs_config, oracle_ds): + """network_config should prefer initramfs config over fallback""" + ncfg = {"version": 1, "config": [{"a": "b"}]} + m_read_initramfs_config.return_value = copy.deepcopy(ncfg) + + assert ncfg == oracle_ds.network_config + assert 0 == oracle_ds.distro.generate_fallback_config.call_count + + def test_network_fallback(self, m_read_initramfs_config, oracle_ds): + """network_config should prefer initramfs config over fallback""" + ncfg = {"version": 1, "config": [{"a": "b"}]} + + m_read_initramfs_config.return_value = None + oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy( + ncfg + ) + + assert ncfg == oracle_ds.network_config + + @pytest.mark.parametrize( + "configure_secondary_nics,expect_secondary_nics", + [(True, True), (False, False), (None, False)], + ) + def test_secondary_nic_addition( + self, + m_read_initramfs_config, + configure_secondary_nics, + expect_secondary_nics, + oracle_ds, + ): + """Test that _add_network_config_from_opc_imds is called as expected + + (configure_secondary_nics=None is used to test the default behaviour.) + """ + m_read_initramfs_config.return_value = {"version": 1, "config": []} + + if configure_secondary_nics is not None: + oracle_ds.ds_cfg[ + "configure_secondary_nics" + ] = configure_secondary_nics + + def side_effect(self): + self._network_config["secondary_added"] = mock.sentinel.needle + + oracle_ds._vnics_data = 'DummyData' + with mock.patch.object( + oracle.DataSourceOracle, "_add_network_config_from_opc_imds", + new=side_effect, + ): + was_secondary_added = "secondary_added" in oracle_ds.network_config + assert expect_secondary_nics == was_secondary_added + + def test_secondary_nic_failure_isnt_blocking( + self, + m_read_initramfs_config, + caplog, + oracle_ds, + ): + oracle_ds.ds_cfg["configure_secondary_nics"] = True + oracle_ds._vnics_data = "DummyData" + + with mock.patch.object( + oracle.DataSourceOracle, "_add_network_config_from_opc_imds", + side_effect=Exception() + ): + network_config = oracle_ds.network_config + assert network_config == m_read_initramfs_config.return_value + assert "Failed to parse secondary network configuration" in caplog.text + + def test_ds_network_cfg_preferred_over_initramfs(self, _m): + """Ensure that DS net config is preferred over initramfs config""" + config_sources = oracle.DataSourceOracle.network_config_sources + ds_idx = config_sources.index(NetworkConfigSource.ds) + initramfs_idx = config_sources.index(NetworkConfigSource.initramfs) + assert ds_idx < initramfs_idx + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py new file mode 100644 index 00000000..da516731 --- /dev/null +++ b/tests/unittests/sources/test_ovf.py @@ -0,0 +1,1046 @@ +# Copyright (C) 2016 Canonical Ltd. +# +# Author: Scott Moser +# +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import os + +from collections import OrderedDict +from textwrap import dedent + +from cloudinit import subp +from cloudinit import util +from tests.unittests.helpers import CiTestCase, mock, wrap_and_call +from cloudinit.helpers import Paths +from cloudinit.sources import DataSourceOVF as dsovf +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptNotFound) +from cloudinit.safeyaml import YAMLError + +MPATH = 'cloudinit.sources.DataSourceOVF.' + +NOT_FOUND = None + +OVF_ENV_CONTENT = """ + + + + ESX Server + 3.0.1 + VMware, Inc. + en_US + + + +{properties} + + +""" + + +def fill_properties(props, template=OVF_ENV_CONTENT): + lines = [] + prop_tmpl = '' + for key, val in props.items(): + lines.append(prop_tmpl.format(key=key, val=val)) + indent = " " + properties = ''.join([indent + line + "\n" for line in lines]) + return template.format(properties=properties) + + +class TestReadOvfEnv(CiTestCase): + def test_with_b64_userdata(self): + user_data = "#!/bin/sh\necho hello world\n" + user_data_b64 = base64.b64encode(user_data.encode()).decode() + props = {"user-data": user_data_b64, "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual(user_data.encode(), ud) + self.assertEqual({'password': "passw0rd"}, cfg) + + def test_with_non_b64_userdata(self): + user_data = "my-user-data" + props = {"user-data": user_data, "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual(user_data.encode(), ud) + self.assertEqual({}, cfg) + + def test_with_no_userdata(self): + props = {"password": "passw0rd", "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + def test_with_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual("inst-001", md["instance-id"]) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['eng.vmware.com', 'vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + md["network-config"]) + self.assertIsNone(ud) + + def test_with_non_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + props = {"network-config": network_config, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + def test_with_b64_network_config_disable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + +class TestMarkerFiles(CiTestCase): + + def setUp(self): + super(TestMarkerFiles, self).setUp() + self.tdir = self.tmp_dir() + + def test_false_when_markerid_none(self): + """Return False when markerid provided is None.""" + self.assertFalse( + dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)) + + def test_markerid_file_exist(self): + """Return False when markerid file path does not exist, + True otherwise.""" + self.assertFalse( + dsovf.check_marker_exists('123', self.tdir)) + + marker_file = self.tmp_path('.markerfile-123.txt', self.tdir) + util.write_file(marker_file, '') + self.assertTrue( + dsovf.check_marker_exists('123', self.tdir) + ) + + def test_marker_file_setup(self): + """Test creation of marker files.""" + markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir) + self.assertFalse(os.path.exists(markerfilepath)) + dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir) + self.assertTrue(os.path.exists(markerfilepath)) + + +class TestDatasourceOVF(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestDatasourceOVF, self).setUp() + self.datasource = dsovf.DataSourceOVF + self.tdir = self.tmp_dir() + + def test_get_data_false_on_none_dmi_data(self): + """When dmi for system-product-name is None, get_data returns False.""" + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': None, + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: No system-product-name found', self.logs.getvalue()) + + def test_get_data_vmware_customization_disabled(self): + """When vmware customization is disabled via sys_cfg and + allow_raw_data is disabled via ds_cfg, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization for VMware platform is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_sys_cfg_disabled(self): + """When vmware customization is disabled via sys_cfg and + no meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': True}}}, + distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using VMware config is disabled.', + self.logs.getvalue()) + + def test_get_data_allow_raw_data_disabled(self): + """When allow_raw_data is disabled via ds_cfg and + meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + util.write_file(metadata_file, "This is meta data") + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using raw data is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_enabled(self): + """When cloud-init workflow for vmware is enabled via sys_cfg log a + message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + + def test_get_data_cust_script_enabled(self): + """If custom script is enabled by VMware tools configuration, + execute the script. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + + # Mock custom script is enabled by return true when calling + # get_tools_config + with mock.patch(MPATH + 'get_tools_config', return_value="true"): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + # Verify custom script is trying to be executed + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_force_run_post_script_is_yes(self): + """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if + enable-custom-scripts is not defined in VM Tools configuration + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts + # default value is TRUE + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + DEFAULT-RUN-POST-CUST-SCRIPT = yes + """) + util.write_file(conf_file, conf_content) + + # Mock get_tools_config(section, key, defaultVal) to return + # defaultVal + def my_get_tools_config(*args, **kwargs): + return args[2] + + with mock.patch(MPATH + 'get_tools_config', + side_effect=my_get_tools_config): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + # Verify custom script still runs although it is + # disabled by VMware Tools + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_non_vmware_seed_platform_info(self): + """Platform info properly reports when on non-vmware platforms.""" + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + # Write ovf-env.xml seed file + seed_dir = self.tmp_path('seed', dir=self.tdir) + ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + util.write_file(ovf_env, OVF_ENV_CONTENT) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + + self.assertEqual('ovf', ds.cloud_name) + self.assertEqual('ovf', ds.platform_type) + with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'): + with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: + with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + m_iso9660.return_value = NOT_FOUND + m_guestd.return_value = NOT_FOUND + self.assertTrue(ds.get_data()) + self.assertEqual( + 'ovf (%s/seed/ovf-env.xml)' % self.tdir, + ds.subplatform) + + def test_get_data_vmware_seed_platform_info(self): + """Platform info properly reports when on VMware platform.""" + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + # Write ovf-env.xml seed file + seed_dir = self.tmp_path('seed', dir=self.tdir) + ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + util.write_file(ovf_env, OVF_ENV_CONTENT) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + + self.assertEqual('ovf', ds.cloud_name) + self.assertEqual('ovf', ds.platform_type) + with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'): + with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: + with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + m_iso9660.return_value = NOT_FOUND + m_guestd.return_value = NOT_FOUND + self.assertTrue(ds.get_data()) + self.assertEqual( + 'vmware (%s/seed/ovf-env.xml)' % self.tdir, + ds.subplatform) + + @mock.patch('cloudinit.subp.subp') + @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + def test_get_data_vmware_guestinfo_with_network_config( + self, m_persist, m_subp + ): + self._test_get_data_with_network_config(guestinfo=False, iso=True) + + @mock.patch('cloudinit.subp.subp') + @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + def test_get_data_iso9660_with_network_config(self, m_persist, m_subp): + self._test_get_data_with_network_config(guestinfo=True, iso=False) + + def _test_get_data_with_network_config(self, guestinfo, iso): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + with mock.patch(MPATH + 'transport_vmware_guestinfo', + return_value=env if guestinfo else NOT_FOUND): + with mock.patch(MPATH + 'transport_iso9660', + return_value=env if iso else NOT_FOUND): + self.assertTrue(ds.get_data()) + self.assertEqual('inst-001', ds.metadata['instance-id']) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + ds.network_config) + + def test_get_data_cloudinit_metadata_json(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is json. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + { + "instance-id": "cloud-vm", + "local-hostname": "my-host.domain.com", + "network": { + "version": 2, + "ethernets": { + "eths": { + "match": { + "name": "ens*" + }, + "dhcp4": true + } + } + } + } + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) + + def test_get_data_cloudinit_metadata_yaml(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is yaml. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) + + def test_get_data_cloudinit_metadata_not_valid(self): + """Test metadata is not JSON or YAML format. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = "[This is not json or yaml format]a=b" + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(YAMLError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [ + self.tdir + '/test-meta', '', '' + ], + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn("expected '', but found ''", + str(context.exception)) + + def test_get_data_cloudinit_metadata_not_found(self): + """Test metadata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Don't prepare the meta data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + def test_get_data_cloudinit_userdata(self): + """Test user data can be loaded to cloud-init user data. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Prepare the user data file + userdata_file = self.tmp_path('test-user', self.tdir) + userdata_content = "This is the user data" + util.write_file(userdata_file, userdata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', + self.tdir + '/test-user', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual(userdata_content, ds.userdata_raw) + + def test_get_data_cloudinit_userdata_not_found(self): + """Test userdata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Don't prepare the user data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + +class TestTransportIso9660(CiTestCase): + + def setUp(self): + super(TestTransportIso9660, self).setUp() + self.add_patch('cloudinit.util.find_devs_with', + 'm_find_devs_with') + self.add_patch('cloudinit.util.mounts', 'm_mounts') + self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb') + self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env', + 'm_get_ovf_env') + self.m_get_ovf_env.return_value = ('myfile', 'mycontent') + + def test_find_already_mounted(self): + """Check we call get_ovf_env from on matching mounted devices""" + mounts = { + '/dev/sr9': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + self.m_mounts.return_value = mounts + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + + def test_find_already_mounted_skips_non_iso9660(self): + """Check we call get_ovf_env ignoring non iso9660""" + mounts = { + '/dev/xvdb': { + 'fstype': 'vfat', + 'mountpoint': 'wark/foobar', + 'opts': 'defaults,noatime', + }, + '/dev/xvdc': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + # We use an OrderedDict here to ensure we check xvdb before xvdc + # as we're not mocking the regex matching, however, if we place + # an entry in the results then we can be reasonably sure that + # we're skipping an entry which fails to match. + self.m_mounts.return_value = ( + OrderedDict(sorted(mounts.items(), key=lambda t: t[0]))) + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + + def test_find_already_mounted_matches_kname(self): + """Check we dont regex match on basename of the device""" + mounts = { + '/dev/foo/bar/xvdc': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + # we're skipping an entry which fails to match. + self.m_mounts.return_value = mounts + + self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) + + def test_mount_cb_called_on_blkdevs_with_iso9660(self): + """Check we call mount_cb on blockdevs with iso9660 only""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/sr0'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + self.m_mount_cb.assert_called_with( + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + + def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self): + """Check we call mount_cb on blockdevs with iso9660 and match regex""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = [ + '/dev/abc', '/dev/my-cdrom', '/dev/sr0'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + self.m_mount_cb.assert_called_with( + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + + def test_mount_cb_not_called_no_matches(self): + """Check we don't call mount_cb if nothing matches""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/vg/myovf'] + + self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) + self.assertEqual(0, self.m_mount_cb.call_count) + + def test_mount_cb_called_require_iso_false(self): + """Check we call mount_cb on blockdevs with require_iso=False""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/xvdz'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual( + "mycontent", dsovf.transport_iso9660(require_iso=False)) + + self.m_mount_cb.assert_called_with( + "/dev/xvdz", dsovf.get_ovf_env, mtype=None) + + def test_maybe_cdrom_device_none(self): + """Test maybe_cdrom_device returns False for none/empty input""" + self.assertFalse(dsovf.maybe_cdrom_device(None)) + self.assertFalse(dsovf.maybe_cdrom_device('')) + + def test_maybe_cdrom_device_non_string_exception(self): + """Test maybe_cdrom_device raises ValueError on non-string types""" + with self.assertRaises(ValueError): + dsovf.maybe_cdrom_device({'a': 'eleven'}) + + def test_maybe_cdrom_device_false_on_multi_dir_paths(self): + """Test maybe_cdrom_device is false on /dev[/.*]/* paths""" + self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) + + def test_maybe_cdrom_device_true_on_hd_partitions(self): + """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1')) + self.assertTrue(dsovf.maybe_cdrom_device('hdz9')) + + def test_maybe_cdrom_device_true_on_valid_relative_paths(self): + """Test maybe_cdrom_device normalizes paths""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9')) + self.assertTrue(dsovf.maybe_cdrom_device('///sr0')) + self.assertTrue(dsovf.maybe_cdrom_device('/sr0')) + self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda')) + + def test_maybe_cdrom_device_true_on_xvd_partitions(self): + """Test maybe_cdrom_device returns true on xvd*""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda')) + self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1')) + self.assertTrue(dsovf.maybe_cdrom_device('xvdza1')) + + +@mock.patch(MPATH + "subp.which") +@mock.patch(MPATH + "subp.subp") +class TestTransportVmwareGuestinfo(CiTestCase): + """Test the com.vmware.guestInfo transport implemented in + transport_vmware_guestinfo.""" + + rpctool = 'vmware-rpctool' + with_logs = True + rpctool_path = '/not/important/vmware-rpctool' + + def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which): + m_which.return_value = None + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(0, m_subp.call_count, + "subp should not be called if no rpctool in path.") + + def test_notfound_on_exit_code_1(self, m_subp, m_which): + """If vmware-rpctool exits 1, then must return not found.""" + m_which.return_value = self.rpctool_path + m_subp.side_effect = subp.ProcessExecutionError( + stdout="", stderr="No value found", exit_code=1, cmd=["unused"]) + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + self.assertNotIn("WARNING", self.logs.getvalue(), + "exit code of 1 by rpctool should not cause warning.") + + def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): + """If vmware-rpctool exited 0 with no stdout is normal not-found. + + This isn't actually a case I've seen. normally on "not found", + rpctool would exit 1 with 'No value found' on stderr. But cover + the case where it exited 0 and just wrote nothing to stdout. + """ + m_which.return_value = self.rpctool_path + m_subp.return_value = ('', '') + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + + def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which): + """If vmware-rpctool exits non zero or 1, warnings should be logged.""" + m_which.return_value = self.rpctool_path + m_subp.side_effect = subp.ProcessExecutionError( + stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]) + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + self.assertIn("WARNING", self.logs.getvalue(), + "exit code of 2 by rpctool should log WARNING.") + + def test_found_when_guestinfo_present(self, m_subp, m_which): + """When there is a ovf info, transport should return it.""" + m_which.return_value = self.rpctool_path + content = fill_properties({}) + m_subp.return_value = (content, '') + self.assertEqual(content, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + +# +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py new file mode 100644 index 00000000..c1294c92 --- /dev/null +++ b/tests/unittests/sources/test_rbx.py @@ -0,0 +1,238 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from tests.unittests.helpers import mock, CiTestCase, populate_dir +from cloudinit import subp + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.subp.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.subp.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + @mock.patch( + DS_PATH + '.subp.subp', + side_effect=subp.ProcessExecutionError() + ) + def test_continue_on_arping_error(self, m_subp): + """Continue when command error""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py new file mode 100644 index 00000000..33ae26b8 --- /dev/null +++ b/tests/unittests/sources/test_scaleway.py @@ -0,0 +1,473 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +import httpretty +import requests + +from cloudinit import helpers +from cloudinit import settings +from cloudinit import sources +from cloudinit.sources import DataSourceScaleway + +from tests.unittests.helpers import mock, HttprettyTestCase, CiTestCase + + +class DataResponses(object): + """ + Possible responses of the API endpoint + 169.254.42.42/user_data/cloud-init and + 169.254.42.42/vendor_data/cloud-init. + """ + + FAKE_USER_DATA = '#!/bin/bash\necho "user-data"' + + @staticmethod + def rate_limited(method, uri, headers): + return 429, headers, '' + + @staticmethod + def api_error(method, uri, headers): + return 500, headers, '' + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, cls.FAKE_USER_DATA + + @staticmethod + def empty(method, uri, headers): + """ + No user data for this server. + """ + return 404, headers, '' + + +class MetadataResponses(object): + """ + Possible responses of the metadata API. + """ + + FAKE_METADATA = { + 'id': '00000000-0000-0000-0000-000000000000', + 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], + 'ssh_public_keys': [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + } + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, json.dumps(cls.FAKE_METADATA) + + +class TestOnScaleway(CiTestCase): + + def setUp(self): + super(TestOnScaleway, self).setUp() + self.tmp = self.tmp_dir() + + def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): + mock, faked = fake_dmi + mock.return_value = 'Scaleway' if faked else 'Whatever' + + mock, faked = fake_file_exists + mock.return_value = faked + + mock, faked = fake_cmdline + mock.return_value = \ + 'initrd=initrd showopts scaleway nousb' if faked \ + else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertFalse(DataSourceScaleway.on_scaleway()) + + # When not on Scaleway, get_data() returns False. + datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) + ) + self.assertFalse(datasource.get_data()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + dmidecode returns "Scaleway". + """ + # dmidecode returns "Scaleway" + self.install_mocks( + fake_dmi=(m_read_dmi_data, True), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + /var/run/scaleway exists. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, True), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + "scaleway" in /proc/cmdline. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, True) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + +def get_source_address_adapter(*args, **kwargs): + """ + Scaleway user/vendor data API requires to be called with a privileged port. + + If the unittests are run as non-root, the user doesn't have the permission + to bind on ports below 1024. + + This function removes the bind on a privileged address, since anyway the + HTTP call is mocked by httpretty. + """ + kwargs.pop('source_address') + return requests.adapters.HTTPAdapter(*args, **kwargs) + + +class TestDataSourceScaleway(HttprettyTestCase): + + def setUp(self): + tmp = self.tmp_dir() + self.datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) + ) + super(TestDataSourceScaleway, self).setUp() + + self.metadata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] + self.userdata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] + self.vendordata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + + self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', + '_m_on_scaleway', return_value=True) + self.add_patch( + 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', + '_m_find_fallback_nic', return_value='scalewaynic0') + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() returns metadata, user data and vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user data API return a valid response + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.get_ok) + self.datasource.get_data() + + self.assertEqual(self.datasource.get_instance_id(), + MetadataResponses.FAKE_METADATA['id']) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + self.assertEqual(self.datasource.get_hostname(), + MetadataResponses.FAKE_METADATA['hostname']) + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(self.datasource.get_vendordata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertIsNone(self.datasource.availability_zone) + self.assertIsNone(self.datasource.region) + self.assertEqual(sleep.call_count, 0) + + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() returns metadata, but no user data nor vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user and vendor data APIs return HTTP/404, which means there is + # no user / vendor data for the server. + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.empty) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + self.datasource.get_data() + self.assertIsNone(self.datasource.get_userdata_raw()) + self.assertIsNone(self.datasource.get_vendordata_raw()) + self.assertEqual(sleep.call_count, 0) + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() is rate limited two times by the metadata API when fetching + user data. + """ + m_get_cmdline.return_value = 'scaleway' + + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + + httpretty.register_uri( + httpretty.GET, self.userdata_url, + responses=[ + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.get_ok), + ] + ) + self.datasource.get_data() + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(sleep.call_count, 2) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4 config if no ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + + netcfg = self.datasource.network_config + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4/v6 configs if ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = { + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + + netcfg = self.datasource.network_config + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [ + { + 'type': 'dhcp4' + }, + { + 'type': 'static', + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + ] + } + ] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_existing(self, m_get_cmdline, fallback_nic): + """ + network_config() should return the same data if a network config + already exists + """ + m_get_cmdline.return_value = 'scaleway' + self.datasource._network_config = '0xdeadbeef' + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, '0xdeadbeef') + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_unset(self, m_get_cmdline, fallback_nic): + """ + _network_config will be set to sources.UNSET after the first boot. + Make sure it behave correctly. + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = sources.UNSET + + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, + logwarning): + """ + network_config() should return config data if cached data is None + rather than sources.UNSET + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = None + + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + logwarning.assert_called_with('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py new file mode 100644 index 00000000..e306eded --- /dev/null +++ b/tests/unittests/sources/test_smartos.py @@ -0,0 +1,1163 @@ +# Copyright (C) 2013 Canonical Ltd. +# Copyright 2019 Joyent, Inc. +# +# Author: Ben Howard +# +# This file is part of cloud-init. See LICENSE file for license information. + +'''This is a testcase for the SmartOS datasource. + +It replicates a serial console and acts like the SmartOS console does in +order to validate return responses. + +''' + +from binascii import crc32 +import json +import multiprocessing +import os +import os.path +import re +import signal +import stat +import unittest +import uuid + +from cloudinit import serial +from cloudinit.sources import DataSourceSmartOS +from cloudinit.sources.DataSourceSmartOS import ( + convert_smartos_network_data as convert_net, + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, + identify_file) +from cloudinit.event import EventScope, EventType + +from cloudinit import helpers as c_helpers +from cloudinit.util import (b64e, write_file) +from cloudinit.subp import (subp, ProcessExecutionError, which) + +from tests.unittests.helpers import ( + CiTestCase, mock, FilesystemMockingTestCase, skipIf) + + +try: + import serial as _pyserial + assert _pyserial # avoid pyflakes error F401: import unused + HAS_PYSERIAL = True +except ImportError: + HAS_PYSERIAL = False + +DSMOS = 'cloudinit.sources.DataSourceSmartOS' +SDC_NICS = json.loads(""" +[ + { + "nic_tag": "external", + "primary": true, + "mtu": 1500, + "model": "virtio", + "gateway": "8.12.42.1", + "netmask": "255.255.255.0", + "ip": "8.12.42.102", + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "gateways": [ + "8.12.42.1" + ], + "vlan_id": 324, + "mac": "90:b8:d0:f5:e4:f5", + "interface": "net0", + "ips": [ + "8.12.42.102/24" + ] + }, + { + "nic_tag": "sdc_overlay/16187209", + "gateway": "192.168.128.1", + "model": "virtio", + "mac": "90:b8:d0:a5:ff:cd", + "netmask": "255.255.252.0", + "ip": "192.168.128.93", + "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9", + "gateways": [ + "192.168.128.1" + ], + "vlan_id": 2, + "mtu": 8500, + "interface": "net1", + "ips": [ + "192.168.128.93/22" + ] + } +] +""") + + +SDC_NICS_ALT = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_DHCP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "dhcp" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24", + "8.12.42.52/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24", + "10.210.1.151/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64", + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_IPV4_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": ["8.12.42.1", "2001::1", "2001::2"], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64", + "8.12.42.52/32"], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": ["10.210.1.217/24"], + "gateways": ["10.210.1.210"], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_SINGLE_GATEWAY = json.loads(""" +[ + { + "interface":"net0", + "mac":"90:b8:d0:d8:82:b4", + "vlan_id":324, + "nic_tag":"external", + "gateway":"8.12.42.1", + "gateways":["8.12.42.1"], + "netmask":"255.255.255.0", + "ip":"8.12.42.26", + "ips":["8.12.42.26/24"], + "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model":"virtio", + "mtu":1500, + "primary":true + }, + { + "interface":"net1", + "mac":"90:b8:d0:0a:51:31", + "vlan_id":600, + "nic_tag":"internal", + "netmask":"255.255.255.0", + "ip":"10.210.1.27", + "ips":["10.210.1.27/24"], + "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model":"virtio", + "mtu":1500 + } +] +""") + + +MOCK_RETURNS = { + 'hostname': 'test-host', + 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', + 'disable_iptables_flag': None, + 'enable_motd_sys_info': None, + 'test-var1': 'some data', + 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), + 'sdc:datacenter_name': 'somewhere2', + 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'sdc:uuid': str(uuid.uuid4()), + 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), + 'user-data': '\n'.join(['something', '']), + 'user-script': '\n'.join(['/bin/true', '']), + 'sdc:nics': json.dumps(SDC_NICS), +} + +DMI_DATA_RETURN = 'smartdc' + +# Useful for calculating the length of a frame body. A SUCCESS body will be +# followed by more characters or be one character less if SUCCESS with no +# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. +SUCCESS_LEN = len('0123abcd SUCCESS ') +NOTFOUND_LEN = len('0123abcd NOTFOUND') + + +class PsuedoJoyentClient(object): + def __init__(self, data=None): + if data is None: + data = MOCK_RETURNS.copy() + self.data = data + self._is_open = False + return + + def get(self, key, default=None, strip=False): + if key in self.data: + r = self.data[key] + if strip: + r = r.strip() + else: + r = default + return r + + def get_json(self, key, default=None): + result = self.get(key, default=default) + if result is None: + return default + return json.loads(result) + + def exists(self): + return True + + def open_transport(self): + assert(not self._is_open) + self._is_open = True + + def close_transport(self): + assert(self._is_open) + self._is_open = False + + +class TestSmartOSDataSource(FilesystemMockingTestCase): + jmc_cfact = None + get_smartos_environ = None + + def setUp(self): + super(TestSmartOSDataSource, self).setUp() + + self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") + self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") + self.legacy_user_d = self.tmp_path('legacy_user_tmp') + os.mkdir(self.legacy_user_d) + self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", + autospec=False, new=self.legacy_user_d) + self.add_patch(DSMOS + ".identify_file", "m_identify_file", + return_value="text/plain") + + def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, + sys_cfg=None, ds_cfg=None): + self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) + self.get_smartos_environ.return_value = mode + + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = c_helpers.Paths(dirs) + + if sys_cfg is None: + sys_cfg = {} + + if ds_cfg is not None: + sys_cfg['datasource'] = sys_cfg.get('datasource', {}) + sys_cfg['datasource']['SmartOS'] = ds_cfg + + return DataSourceSmartOS.DataSourceSmartOS( + sys_cfg, distro=None, paths=paths) + + def test_no_base64(self): + ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} + dsrc = self._get_ds(ds_cfg=ds_cfg) + ret = dsrc.get_data() + self.assertTrue(ret) + + def test_uuid(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['sdc:uuid'], + dsrc.metadata['instance-id']) + + def test_platform_info(self): + """All platform-related attributes are properly set.""" + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertEqual('joyent', dsrc.cloud_name) + self.assertEqual('joyent', dsrc.platform_type) + self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform) + + def test_root_keys(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_hostname_b64(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_hostname(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_hostname_if_no_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_hostname_if_no_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:uuid'], + dsrc.metadata['local-hostname']) + + def test_userdata(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-data'], + dsrc.metadata['legacy-user-data']) + self.assertEqual(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) + + def test_sdc_nics(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']), + dsrc.metadata['network-data']) + + def test_sdc_scripts(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + print("legacy_script_f=%s" % legacy_script_f) + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEqual(user_script_perm, '700') + + def test_scripts_shebanged(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEqual(shebang, "#!/bin/bash") + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEqual(user_script_perm, '700') + + def test_scripts_shebang_not_added(self): + """ + Test that the SmartOS requirement that plain text scripts + are executable. This test makes sure that plain texts scripts + with out file magic have it added appropriately by cloud-init. + """ + + my_returns = MOCK_RETURNS.copy() + my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', + 'print("hi")', '']) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEqual(shebang, "#!/usr/bin/perl") + + def test_userdata_removed(self): + """ + User-data in the SmartOS world is supposed to be written to a file + each and every boot. This tests to make sure that in the event the + legacy user-data is removed, the existing user-data is backed-up + and there is no /var/db/user-data left. + """ + + user_data_f = "%s/mdata-user-data" % self.legacy_user_d + with open(user_data_f, 'w') as f: + f.write("PREVIOUS") + + my_returns = MOCK_RETURNS.copy() + del my_returns['user-data'] + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertFalse(dsrc.metadata.get('legacy-user-data')) + + found_new = False + for root, _dirs, files in os.walk(self.legacy_user_d): + for name in files: + name_f = os.path.join(root, name) + permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] + if re.match(r'.*\/mdata-user-data$', name_f): + found_new = True + print(name_f) + self.assertEqual(permissions, '400') + + self.assertFalse(found_new) + + def test_vendor_data_not_default(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['sdc:vendor-data'], + dsrc.metadata['vendor-data']) + + def test_default_vendor_data(self): + my_returns = MOCK_RETURNS.copy() + def_op_script = my_returns['sdc:vendor-data'] + del my_returns['sdc:vendor-data'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data']) + + # we expect default vendor-data is a boothook + self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook")) + + def test_disable_iptables_flag(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['disable_iptables_flag'], + dsrc.metadata['iptables_disable']) + + def test_motd_sys_info(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'], + dsrc.metadata['motd_sys_info']) + + def test_default_ephemeral(self): + # Test to make sure that the builtin config has the ephemeral + # configuration. + dsrc = self._get_ds() + cfg = dsrc.get_config_obj() + + ret = dsrc.get_data() + self.assertTrue(ret) + + assert 'disk_setup' in cfg + assert 'fs_setup' in cfg + self.assertIsInstance(cfg['disk_setup'], dict) + self.assertIsInstance(cfg['fs_setup'], list) + + def test_override_disk_aliases(self): + # Test to make sure that the built-in DS is overriden + builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG + + mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}} + + # expect that these values are in builtin, or this is pointless + for k in mydscfg: + self.assertIn(k, builtin) + + dsrc = self._get_ds(ds_cfg=mydscfg) + ret = dsrc.get_data() + self.assertTrue(ret) + + self.assertEqual(mydscfg['disk_aliases']['FOO'], + dsrc.ds_cfg['disk_aliases']['FOO']) + + self.assertEqual(dsrc.device_name_to_device('FOO'), + mydscfg['disk_aliases']['FOO']) + + def test_reconfig_network_on_boot(self): + # Test to ensure that network is configured from metadata on each boot + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertSetEqual( + {EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY}, + dsrc.default_update_events[EventScope.NETWORK] + ) + + +class TestIdentifyFile(CiTestCase): + """Test the 'identify_file' utility.""" + @skipIf(not which("file"), "command 'file' not available.") + def test_file_happy_path(self): + """Test file is available and functional on plain text.""" + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + with self.allow_subp(["file"]): + self.assertEqual("text/plain", identify_file(fname)) + + @mock.patch(DSMOS + ".subp.subp") + def test_returns_none_on_error(self, m_subp): + """On 'file' execution error, None should be returned.""" + m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + self.assertEqual(None, identify_file(fname)) + self.assertEqual( + [mock.call(["file", "--brief", "--mime-type", fname])], + m_subp.call_args_list) + + +class ShortReader(object): + """Implements a 'read' interface for bytes provided. + much like io.BytesIO but the 'endbyte' acts as if EOF. + When it is reached a short will be returned.""" + def __init__(self, initial_bytes, endbyte=b'\0'): + self.data = initial_bytes + self.index = 0 + self.len = len(self.data) + self.endbyte = endbyte + + @property + def emptied(self): + return self.index >= self.len + + def read(self, size=-1): + """Read size bytes but not past a null.""" + if size == 0 or self.index >= self.len: + return b'' + + rsize = size + if size < 0 or size + self.index > self.len: + rsize = self.len - self.index + + next_null = self.data.find(self.endbyte, self.index, rsize) + if next_null >= 0: + rsize = next_null - self.index + 1 + i = self.index + self.index += rsize + ret = self.data[i:i + rsize] + if len(ret) and ret[-1:] == self.endbyte: + ret = ret[:-1] + return ret + + +class TestJoyentMetadataClient(FilesystemMockingTestCase): + + invalid = b'invalid command\n' + failure = b'FAILURE\n' + v2_ok = b'V2_OK\n' + + def setUp(self): + super(TestJoyentMetadataClient, self).setUp() + + self.serial = mock.MagicMock(spec=serial.Serial) + self.request_id = 0xabcdef12 + self.metadata_value = 'value' + self.response_parts = { + 'command': 'SUCCESS', + 'crc': 'b5a9ff00', + 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), + 'payload': b64e(self.metadata_value), + 'request_id': '{0:08x}'.format(self.request_id), + } + + def make_response(): + payloadstr = '' + if 'payload' in self.response_parts: + payloadstr = ' {0}'.format(self.response_parts['payload']) + return ('V2 {length} {crc} {request_id} ' + '{command}{payloadstr}\n'.format( + payloadstr=payloadstr, + **self.response_parts).encode('ascii')) + + self.metasource_data = None + + def read_response(length): + if not self.metasource_data: + self.metasource_data = make_response() + self.metasource_data_len = len(self.metasource_data) + resp = self.metasource_data[:length] + self.metasource_data = self.metasource_data[length:] + return resp + + self.serial.read.side_effect = read_response + self.patched_funcs.enter_context( + mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', + mock.Mock(return_value=self.request_id))) + + def _get_client(self): + return DataSourceSmartOS.JoyentMetadataClient( + fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) + + def _get_serial_client(self): + self.serial.timeout = 1 + return DataSourceSmartOS.JoyentMetadataSerialClient(None, + fp=self.serial) + + def assertEndsWith(self, haystack, prefix): + self.assertTrue(haystack.endswith(prefix), + "{0} does not end with '{1}'".format( + repr(haystack), prefix)) + + def assertStartsWith(self, haystack, prefix): + self.assertTrue(haystack.startswith(prefix), + "{0} does not start with '{1}'".format( + repr(haystack), prefix)) + + def assertNoMoreSideEffects(self, obj): + self.assertRaises(StopIteration, obj) + + def test_get_metadata_writes_a_single_line(self): + client = self._get_client() + client.get('some_key') + self.assertEqual(1, self.serial.write.call_count) + written_line = self.serial.write.call_args[0][0] + self.assertEndsWith(written_line.decode('ascii'), + b'\n'.decode('ascii')) + self.assertEqual(1, written_line.count(b'\n')) + + def _get_written_line(self, key='some_key'): + client = self._get_client() + client.get(key) + return self.serial.write.call_args[0][0] + + def test_get_metadata_writes_bytes(self): + self.assertIsInstance(self._get_written_line(), bytes) + + def test_get_metadata_line_starts_with_v2(self): + foo = self._get_written_line() + self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) + + def test_get_metadata_uses_get_command(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + self.assertEqual('GET', parts[4]) + + def test_get_metadata_base64_encodes_argument(self): + key = 'my_key' + parts = self._get_written_line(key).decode('ascii').strip().split(' ') + self.assertEqual(b64e(key), parts[5]) + + def test_get_metadata_calculates_length_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_length = len(' '.join(parts[3:])) + self.assertEqual(expected_length, int(parts[1])) + + def test_get_metadata_uses_appropriate_request_id(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + request_id = parts[3] + self.assertEqual(8, len(request_id)) + self.assertEqual(request_id, request_id.lower()) + + def test_get_metadata_uses_random_number_for_request_id(self): + line = self._get_written_line() + request_id = line.decode('ascii').strip().split(' ')[3] + self.assertEqual('{0:08x}'.format(self.request_id), request_id) + + def test_get_metadata_checksums_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_checksum = '{0:08x}'.format( + crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff) + checksum = parts[2] + self.assertEqual(expected_checksum, checksum) + + def test_get_metadata_reads_a_line(self): + client = self._get_client() + client.get('some_key') + self.assertEqual(self.metasource_data_len, self.serial.read.call_count) + + def test_get_metadata_returns_valid_value(self): + client = self._get_client() + value = client.get('some_key') + self.assertEqual(self.metadata_value, value) + + def test_get_metadata_throws_exception_for_incorrect_length(self): + self.response_parts['length'] = 0 + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_throws_exception_for_incorrect_crc(self): + self.response_parts['crc'] = 'deadbeef' + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_throws_exception_for_request_id_mismatch(self): + self.response_parts['request_id'] = 'deadbeef' + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_returns_None_if_value_not_found(self): + self.response_parts['payload'] = '' + self.response_parts['command'] = 'NOTFOUND' + self.response_parts['length'] = NOTFOUND_LEN + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertIsNone(client.get('some_key')) + + def test_negotiate(self): + client = self._get_client() + reader = ShortReader(self.v2_ok) + client.fp.read.side_effect = reader.read + client._negotiate() + self.assertTrue(reader.emptied) + + def test_negotiate_short_response(self): + client = self._get_client() + # chopped '\n' from v2_ok. + reader = ShortReader(self.v2_ok[:-1] + b'\0') + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, + client._negotiate) + self.assertTrue(reader.emptied) + + def test_negotiate_bad_response(self): + client = self._get_client() + reader = ShortReader(b'garbage\n' + self.v2_ok) + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client._negotiate) + self.assertEqual(self.v2_ok, client.fp.read()) + + def test_serial_open_transport(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_failure(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage' + b'\0' + self.failure + + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_many_timeouts(self): + client = self._get_serial_client() + reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_list_metadata_returns_list(self): + parts = ['foo', 'bar'] + value = b64e('\n'.join(parts)) + self.response_parts['payload'] = value + self.response_parts['crc'] = '40873553' + self.response_parts['length'] = SUCCESS_LEN + len(value) + client = self._get_client() + self.assertEqual(client.list(), parts) + + def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): + del self.response_parts['payload'] + self.response_parts['length'] = SUCCESS_LEN - 1 + self.response_parts['crc'] = '14e563ba' + client = self._get_client() + self.assertEqual(client.list(), []) + + +class TestNetworkConversion(CiTestCase): + def test_convert_simple(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.102/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '192.168.128.93/22'}], + 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} + found = convert_net(SDC_NICS) + self.assertEqual(expected, found) + + def test_convert_simple_alt(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_ALT) + self.assertEqual(expected, found) + + def test_convert_simple_dhcp(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_DHCP) + self.assertEqual(expected, found) + + def test_convert_simple_multi_ip(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}, + {'type': 'static', + 'address': '8.12.42.52/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}, + {'type': 'static', + 'address': '10.210.1.151/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP) + self.assertEqual(expected, found) + + def test_convert_with_dns(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, + {'type': 'nameserver', + 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} + found = convert_net( + network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], + dns_domain="local") + self.assertEqual(expected, found) + + def test_convert_simple_multi_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'address': + '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, + {'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP_IPV6) + self.assertEqual(expected, found) + + def test_convert_simple_both_ipv4_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', + 'type': 'static'}, + {'address': '8.12.42.51/24', + 'gateway': '8.12.42.1', + 'type': 'static'}, + {'address': '2001::11/64', 'type': 'static'}, + {'address': '8.12.42.52/32', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.217/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_IPV4_IPV6) + self.assertEqual(expected, found) + + def test_gateways_not_on_all_nics(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY) + self.assertEqual(expected, found) + + def test_routes_on_all_nics(self): + routes = [ + {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, + {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) + self.maxDiff = None + self.assertEqual(expected, found) + + +@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, + "Only supported on KVM and bhyve guests under SmartOS") +@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), + "Requires write access to " + SERIAL_DEVICE) +@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available") +class TestSerialConcurrency(CiTestCase): + """ + This class tests locking on an actual serial port, and as such can only + be run in a kvm or bhyve guest running on a SmartOS host. A test run on + a metadata socket will not be valid because a metadata socket ensures + there is only one session over a connection. In contrast, in the + absence of proper locking multiple processes opening the same serial + port can corrupt each others' exchanges with the metadata server. + + This takes on the order of 2 to 3 minutes to run. + """ + allowed_subp = ['mdata-get'] + + def setUp(self): + self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) + self.mdata_proc.start() + super(TestSerialConcurrency, self).setUp() + + def tearDown(self): + # os.kill() rather than mdata_proc.terminate() to avoid console spam. + os.kill(self.mdata_proc.pid, signal.SIGKILL) + self.mdata_proc.join() + super(TestSerialConcurrency, self).tearDown() + + def start_mdata_loop(self): + """ + The mdata-get command is repeatedly run in a separate process so + that it may try to race with metadata operations performed in the + main test process. Use of mdata-get is better than two processes + using the protocol implementation in DataSourceSmartOS because we + are testing to be sure that cloud-init and mdata-get respect each + others locks. + """ + rcs = list(range(0, 256)) + while True: + subp(['mdata-get', 'sdc:routes'], rcs=rcs) + + def test_all_keys(self): + self.assertIsNotNone(self.mdata_proc.pid) + ds = DataSourceSmartOS + keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] + keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) + + client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) + self.assertIsNotNone(client) + + # The behavior that we are testing for was observed mdata-get running + # 10 times at roughly the same time as cloud-init fetched each key + # once. cloud-init would regularly see failures before making it + # through all keys once. + for _ in range(0, 3): + for key in keys: + # We don't care about the return value, just that it doesn't + # thrown any exceptions. + client.get(key) + + self.assertIsNone(self.mdata_proc.exitcode) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py new file mode 100644 index 00000000..1d792066 --- /dev/null +++ b/tests/unittests/sources/test_upcloud.py @@ -0,0 +1,314 @@ +# Author: Antti Myyrä +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit import sources +from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ + DataSourceUpCloudLocal + +from tests.unittests.helpers import mock, CiTestCase + +UC_METADATA = json.loads(""" +{ + "cloud_name": "upcloud", + "instance_id": "00322b68-0096-4042-9406-faad61922128", + "hostname": "test.example.com", + "platform": "servers", + "subplatform": "metadata (http://169.254.169.254)", + "public_keys": [ + "ssh-rsa AAAAB.... test1@example.com", + "ssh-rsa AAAAB.... test2@example.com" + ], + "region": "fi-hel2", + "network": { + "interfaces": [ + { + "index": 1, + "ip_addresses": [ + { + "address": "94.237.105.53", + "dhcp": true, + "dns": [ + "94.237.127.9", + "94.237.40.9" + ], + "family": "IPv4", + "floating": false, + "gateway": "94.237.104.1", + "network": "94.237.104.0/22" + }, + { + "address": "94.237.105.50", + "dhcp": false, + "dns": null, + "family": "IPv4", + "floating": true, + "gateway": "", + "network": "94.237.105.50/32" + } + ], + "mac": "3a:d6:ba:4a:36:e7", + "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", + "type": "public" + }, + { + "index": 2, + "ip_addresses": [ + { + "address": "10.6.3.27", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "10.6.0.1", + "network": "10.6.0.0/22" + } + ], + "mac": "3a:d6:ba:4a:84:cc", + "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", + "type": "utility" + }, + { + "index": 3, + "ip_addresses": [ + { + "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", + "dhcp": true, + "dns": [ + "2a04:3540:53::1", + "2a04:3544:53::1" + ], + "family": "IPv6", + "floating": false, + "gateway": "2a04:3545:1000:720::1", + "network": "2a04:3545:1000:720::/64" + } + ], + "mac": "3a:d6:ba:4a:63:e7", + "network_id": "03000000-0000-4000-8046-000000000000", + "type": "public" + }, + { + "index": 4, + "ip_addresses": [ + { + "address": "172.30.1.10", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "172.30.1.1", + "network": "172.30.1.0/24" + } + ], + "mac": "3a:d6:ba:4a:8a:e1", + "network_id": "035a0a4a-7704-4de5-820d-189fc8132714", + "type": "private" + } + ], + "dns": [ + "94.237.127.9", + "94.237.40.9" + ] + }, + "storage": { + "disks": [ + { + "id": "014efb65-223b-4d44-8f0a-c29535b88dcf", + "serial": "014efb65223b4d448f0a", + "size": 10240, + "type": "disk", + "tier": "maxiops" + } + ] + }, + "tags": [], + "user_data": "", + "vendor_data": "" +} +""") + +UC_METADATA["user_data"] = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return True, "00322b68-0096-4042-9406-faad61922128" + + +class TestUpCloudMetadata(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestUpCloudMetadata, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloud( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo') + def test_returns_false_not_on_upcloud(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + self.assertTrue(m_read_sysinfo.called) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(UC_METADATA.get('vendor_data'), + ds.get_vendordata_raw()) + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + self.assertEqual(UC_METADATA.get('public_keys'), + ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestUpCloudNetworkSetup(CiTestCase): + """ + Test reading the meta-data on networked context + """ + + def setUp(self): + super(TestUpCloudNetworkSetup, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloudLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + def test_network_configured_metadata(self, m_net, m_dhcp, + m_fallback_nic, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + m_fallback_nic.return_value = 'eth1' + m_dhcp.return_value = [{ + 'interface': 'eth1', 'fixed-address': '10.6.3.27', + 'routers': '10.6.0.1', 'subnet-mask': '22', + 'broadcast-address': '10.6.3.255'} + ] + + ds = self.get_ds() + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(m_dhcp.called) + m_dhcp.assert_called_with('eth1', None) + + m_net.assert_called_once_with( + broadcast='10.6.3.255', interface='eth1', + ip='10.6.3.27', prefix_or_mask='22', + router='10.6.0.1', static_routes=None + ) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_configuration(self, m_get_by_mac, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + raw_ifaces = UC_METADATA.get('network').get('interfaces') + self.assertEqual(4, len(raw_ifaces)) + + m_get_by_mac.return_value = { + raw_ifaces[0].get('mac'): 'eth0', + raw_ifaces[1].get('mac'): 'eth1', + raw_ifaces[2].get('mac'): 'eth2', + raw_ifaces[3].get('mac'): 'eth3', + } + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + netcfg = ds.network_config + + self.assertEqual(1, netcfg.get('version')) + + config = netcfg.get('config') + self.assertIsInstance(config, list) + self.assertEqual(5, len(config)) + self.assertEqual('physical', config[3].get('type')) + + self.assertEqual(raw_ifaces[2].get('mac'), config[2] + .get('mac_address')) + self.assertEqual(1, len(config[2].get('subnets'))) + self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0] + .get('type')) + + self.assertEqual(2, len(config[0].get('subnets'))) + self.assertEqual('static', config[0].get('subnets')[1].get('type')) + + dns = config[4] + self.assertEqual('nameserver', dns.get('type')) + self.assertEqual(2, len(dns.get('address'))) + self.assertEqual( + UC_METADATA.get('network').get('dns')[1], + dns.get('address')[1] + ) + + +class TestUpCloudDatasourceLoading(CiTestCase): + def test_get_datasource_list_returns_in_local(self): + deps = (sources.DEP_FILESYSTEM, ) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloudLocal]) + + def test_get_datasource_list_returns_in_normal(self): + deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloud]) + + def test_list_sources_finds_ds(self): + found = sources.list_sources( + ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), + ['cloudinit.sources']) + self.assertEqual([DataSourceUpCloud], + found) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py new file mode 100644 index 00000000..d34d7782 --- /dev/null +++ b/tests/unittests/sources/test_vmware.py @@ -0,0 +1,391 @@ +# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# +# Authors: Andrew Kutz +# +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import gzip +import os + +import pytest + +from cloudinit import dmi, helpers, safeyaml +from cloudinit import settings +from cloudinit.sources import DataSourceVMware +from tests.unittests.helpers import ( + mock, + CiTestCase, + FilesystemMockingTestCase, + populate_dir, +) + + +PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" +PRODUCT_NAME = "VMware7,1" +PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" +REROOT_FILES = { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, +} + +VMW_MULTIPLE_KEYS = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", +] +VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" + +VMW_METADATA_YAML = """instance-id: cloud-vm +local-hostname: cloud-vm +network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes +""" + +VMW_USERDATA_YAML = """## template: jinja +#cloud-config +users: +- default +""" + +VMW_VENDORDATA_YAML = """## template: jinja +#cloud-config +runcmd: +- echo "Hello, world." +""" + + +@pytest.yield_fixture(autouse=True) +def common_patches(): + with mock.patch('cloudinit.util.platform.platform', return_value='Linux'): + with mock.patch.multiple( + 'cloudinit.dmi', + is_container=mock.Mock(return_value=False), + is_FreeBSD=mock.Mock(return_value=False) + ): + yield + + +class TestDataSourceVMware(CiTestCase): + """ + Test common functionality that is not transport specific. + """ + + def setUp(self): + super(TestDataSourceVMware, self).setUp() + self.tmp = self.tmp_dir() + + def test_no_data_access_method(self): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertFalse(ret) + + def test_get_host_info(self): + host_info = DataSourceVMware.get_host_info() + self.assertTrue(host_info) + self.assertTrue(host_info["hostname"]) + self.assertTrue(host_info["local-hostname"]) + self.assertTrue(host_info["local_hostname"]) + self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) + + +class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): + """ + Test the envvar transport. + """ + + def setUp(self): + super(TestDataSourceVMwareEnvVars, self).setUp() + self.tmp = self.tmp_dir() + os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" + self.create_system_files() + + def tearDown(self): + del os.environ[DataSourceVMware.VMX_GUESTINFO] + return super(TestDataSourceVMwareEnvVars, self).tearDown() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, + DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), + ), + ) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_only(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + def test_ds_valid_on_vmware_platform(self): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, PRODUCT_NAME) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + DataSourceVMware.get_guestinfo_key_name("metadata"), + ), + ) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a non-VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_ds_invalid_on_non_vmware_platform(self, m_fn): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, None) + + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertFalse(ret) + + +def assert_metadata(test_obj, ds, metadata): + test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) + test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) + + expected_public_keys = metadata.get("public_keys") + if not isinstance(expected_public_keys, list): + expected_public_keys = [expected_public_keys] + + test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) + test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) + + +def get_ds(temp_dir): + ds = DataSourceVMware.DataSourceVMware( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) + ) + ds.vmware_rpctool = "vmware-rpctool" + return ds + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py new file mode 100644 index 00000000..40594b95 --- /dev/null +++ b/tests/unittests/sources/test_vultr.py @@ -0,0 +1,337 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Vultr Metadata API: +# https://www.vultr.com/metadata/ + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceVultr +from cloudinit.sources.helpers import vultr + +from tests.unittests.helpers import mock, CiTestCase + +# Vultr metadata test data +VULTR_V1_1 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_1', + 'instanceid': '42506325', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address': '108.61.89.242', + 'gateway': '108.61.89.1', + 'netmask': '255.255.255.0' + }, + 'ipv6': { + 'additional': [ + ], + 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', + 'network': '2001:19f0:5:56c2::', + 'prefix': '64' + }, + 'mac': '56:00:03:15:c4:65', + 'network-type': 'public' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'raid1-script': '', + 'user-data': [ + ], + 'vendor-data': [ + { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + ] +} + +VULTR_V1_2 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_2', + 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', + 'instanceid': '42872224', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address':'45.76.7.171', + 'gateway':'45.76.6.1', + 'netmask':'255.255.254.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', + 'network':'2001:19f0:5:28a7::', + 'prefix':'64' + }, + 'mac':'56:00:03:1b:4e:ca', + 'network-type':'public' + }, + { + 'ipv4': { + 'additional': [ + ], + 'address':'10.1.112.3', + 'gateway':'', + 'netmask':'255.255.240.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'network':'', + 'prefix':'' + }, + 'mac':'5a:00:03:1b:4e:ca', + 'network-type':'private', + 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', + 'networkid':'net5e7155329d730' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'user-data': [ + ], + + 'vendor-data': [ + { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + ] +} + +SSH_KEYS_1 = [ + "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" +] + +# Expected generated objects + +# Expected config +EXPECTED_VULTR_CONFIG = { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } +} + +# Expected network config object from generator +EXPECTED_VULTR_NETWORK_1 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:15:c4:65', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'ipv6_slaac', 'control': 'auto'} + ], + } + ] +} + +EXPECTED_VULTR_NETWORK_2 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:1b:4e:ca', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'ipv6_slaac', 'control': 'auto'} + ], + }, + { + 'name': 'eth1', + 'type': 'physical', + 'mac_address': '5a:00:03:1b:4e:ca', + 'subnets': [ + { + "type": "static", + "control": "auto", + "address": "10.1.112.3", + "netmask": "255.255.240.0" + } + ], + } + ] +} + + +INTERFACE_MAP = { + '56:00:03:15:c4:65': 'eth0', + '56:00:03:1b:4e:ca': 'eth0', + '5a:00:03:1b:4e:ca': 'eth1' +} + + +class TestDataSourceVultr(CiTestCase): + def setUp(self): + super(TestDataSourceVultr, self).setUp() + + # Stored as a dict to make it easier to maintain + raw1 = json.dumps(VULTR_V1_1['vendor-data'][0]) + raw2 = json.dumps(VULTR_V1_2['vendor-data'][0]) + + # Make expected format + VULTR_V1_1['vendor-data'] = [raw1] + VULTR_V1_2['vendor-data'] = [raw2] + + self.tmp = self.tmp_dir() + + # Test the datasource itself + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') + @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') + def test_datasource(self, + mock_getmeta, + mock_isvultr, + mock_netmap): + mock_getmeta.return_value = VULTR_V1_2 + mock_isvultr.return_value = True + mock_netmap.return_value = INTERFACE_MAP + + source = DataSourceVultr.DataSourceVultr( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + + # Test for failure + self.assertEqual(True, source._get_data()) + + # Test instance id + self.assertEqual("42872224", source.metadata['instanceid']) + + # Test hostname + self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) + + # Test ssh keys + self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) + + # Test vendor data generation + orig_val = self.maxDiff + self.maxDiff = None + + vendordata = source.vendordata_raw + + # Test vendor config + self.assertEqual( + EXPECTED_VULTR_CONFIG, + json.loads(vendordata[0].replace("#cloud-config", ""))) + + self.maxDiff = orig_val + + # Test network config generation + self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + + # Test network config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_1['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_1, + vultr.generate_network_config(interf)) + + # Test Private Networking config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_private_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_2['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_2, + vultr.generate_network_config(interf)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/__init__.py b/tests/unittests/sources/vmware/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/sources/vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py new file mode 100644 index 00000000..fcbb9cd5 --- /dev/null +++ b/tests/unittests/sources/vmware/test_custom_script.py @@ -0,0 +1,109 @@ +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2017-2019 VMware INC. +# +# Author: Maitreyee Saikia +# +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import stat +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptConstant, + CustomScriptNotFound, + PreCustomScript, + PostCustomScript, +) +from tests.unittests.helpers import CiTestCase, mock + + +class TestVmwareCustomScript(CiTestCase): + def setUp(self): + self.tmpDir = self.tmp_dir() + # Mock the tmpDir as the root dir in VM. + self.execDir = os.path.join(self.tmpDir, ".customization") + self.execScript = os.path.join(self.execDir, + ".customize.sh") + + def test_prepare_custom_script(self): + """ + This test is designed to verify the behavior based on the presence of + custom script. Mainly needed for scenario where a custom script is + expected, but was not properly copied. "CustomScriptNotFound" exception + is raised in such cases. + """ + # Custom script does not exist. + preCust = PreCustomScript("random-vmw-test", self.tmpDir) + self.assertEqual("random-vmw-test", preCust.scriptname) + self.assertEqual(self.tmpDir, preCust.directory) + self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), + preCust.scriptpath) + with self.assertRaises(CustomScriptNotFound): + preCust.prepare_script() + + # Custom script exists. + custScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(custScript, "test-CR-strip\r\r") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + postCust = PostCustomScript("test-cust", + self.tmpDir, + self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + postCust.prepare_script() + + # Custom script is copied with exec privilege + self.assertTrue(os.path.exists(self.execScript)) + st = os.stat(self.execScript) + self.assertTrue(st.st_mode & stat.S_IEXEC) + with open(self.execScript, "r") as f: + content = f.read() + self.assertEqual(content, "test-CR-strip") + # Check if all carraige returns are stripped from script. + self.assertFalse("\r" in content) + + def test_execute_post_cust(self): + """ + This test is designed to verify the behavior after execute post + customization. + """ + # Prepare the customize package + postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + util.write_file(postCustRun, "This is the script to run post cust") + userScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(userScript, "This is the post cust script") + + # Mock the cc_scripts_per_instance dir and marker file. + # Create another tmp dir for cc_scripts_per_instance. + ccScriptDir = self.tmp_dir() + ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") + markerFile = os.path.join(self.tmpDir, ".markerFile") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + with mock.patch.object(CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile): + postCust = PostCustomScript("test-cust", + self.tmpDir, + ccScriptDir) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + self.assertTrue(os.path.exists(ccScript)) + with open(ccScript, "r") as f: + content = f.read() + self.assertEqual(content, + "This is the script to run post cust") + self.assertTrue(os.path.exists(markerFile)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py new file mode 100644 index 00000000..9114f0b9 --- /dev/null +++ b/tests/unittests/sources/vmware/test_guestcust_util.py @@ -0,0 +1,98 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import subp +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, + set_gc_status, +) +from tests.unittests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(subp, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(subp, 'which', return_value='/dummy/path'): + with mock.patch.object(subp, 'subp', + return_value=('key=value', b''), + side_effect=subp.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(subp, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(subp, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(subp, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(subp, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + + # value contains specific characters + with mock.patch.object(subp, 'subp', + return_value=('[a] b.c_d=e-f', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'e-f') + + def test_set_gc_status(self): + """ + This test is designed to verify the behavior of set_gc_status + """ + # config is None, return None + self.assertEqual(set_gc_status(None, 'Successful'), None) + + # post gc status is NO, return None + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertEqual(set_gc_status(conf, 'Successful'), None) + + # post gc status is YES, subp is called to execute command + cf._insertKey("MISC|POST-GC-STATUS", "YES") + conf = Config(cf) + with mock.patch.object(subp, 'subp', + return_value=('ok', b'')) as mockobj: + self.assertEqual( + set_gc_status(conf, 'Successful'), ('ok', b'')) + mockobj.assert_called_once_with( + ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'], + rcs=[0]) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py new file mode 100644 index 00000000..54de113e --- /dev/null +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -0,0 +1,545 @@ +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2016 VMware INC. +# +# Author: Sankar Tanguturi +# Pengpeng Sun +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging +import os +import sys +import tempfile +import textwrap + +from cloudinit.sources.DataSourceOVF import get_network_config_from_conf +from cloudinit.sources.DataSourceOVF import read_vmware_imc +from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet +from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator +from tests.unittests.helpers import CiTestCase + +logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +class TestVmwareConfigFile(CiTestCase): + + def test_utility_methods(self): + """Tests basic utility methods of ConfigFile class""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf.clear() + + self.assertEqual(0, len(cf), "clear size") + + cf._insertKey(" PASSWORD|-PASS ", " foo ") + cf._insertKey("BAR", " ") + + self.assertEqual(2, len(cf), "insert size") + self.assertEqual('foo', cf["PASSWORD|-PASS"], "password") + self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") + self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"), + "keepPassword") + self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"), + "removePassword") + self.assertFalse("FOO" in cf, "hasFoo") + self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo") + self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo") + self.assertTrue("BAR" in cf, "hasBar") + self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar") + self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar") + + def test_datasource_instance_id(self): + """Tests instance id for the DatasourceOVF""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + instance_id_prefix = 'iid-vmware-' + + conf = Config(cf) + + (md1, _, _) = read_vmware_imc(conf) + self.assertIn(instance_id_prefix, md1["instance-id"]) + self.assertEqual(md1["instance-id"], 'iid-vmware-imc') + + (md2, _, _) = read_vmware_imc(conf) + self.assertIn(instance_id_prefix, md2["instance-id"]) + self.assertEqual(md2["instance-id"], 'iid-vmware-imc') + + self.assertEqual(md2["instance-id"], md1["instance-id"]) + + def test_configfile_static_2nics(self): + """Tests Config class for a configuration with two static NICs.""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + conf = Config(cf) + + self.assertEqual('myhost1', conf.host_name, "hostName") + self.assertEqual('Africa/Abidjan', conf.timezone, "tz") + self.assertTrue(conf.utc, "utc") + + self.assertEqual(['10.20.145.1', '10.20.145.2'], + conf.name_servers, + "dns") + self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], + conf.dns_suffixes, + "suffixes") + + nics = conf.nics + ipv40 = nics[0].staticIpv4 + + self.assertEqual(2, len(nics), "nics") + self.assertEqual('NIC1', nics[0].name, "nic0") + self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0") + self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0") + self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0") + self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0") + self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0") + self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1") + + self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0") + self.assertEqual('fc00:10:20:87::154', + nics[0].staticIpv6[0].ip, + "ipv6Addr0") + + self.assertEqual('NIC2', nics[1].name, "nic1") + self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp") + + def test_config_file_dhcp_2nics(self): + """Tests Config class for a configuration with two DHCP NICs.""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + conf = Config(cf) + nics = conf.nics + self.assertEqual(2, len(nics), "nics") + self.assertEqual('NIC1', nics[0].name, "nic0") + self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0") + + def test_config_password(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf._insertKey("PASSWORD|-PASS", "test-password") + cf._insertKey("PASSWORD|RESET", "no") + + conf = Config(cf) + self.assertEqual('test-password', conf.admin_password, "password") + self.assertFalse(conf.reset_password, "do not reset password") + + def test_config_reset_passwd(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf._insertKey("PASSWORD|-PASS", "test-password") + cf._insertKey("PASSWORD|RESET", "random") + + conf = Config(cf) + with self.assertRaises(ValueError): + pw = conf.reset_password + self.assertIsNone(pw) + + cf.clear() + cf._insertKey("PASSWORD|RESET", "yes") + self.assertEqual(1, len(cf), "insert size") + + conf = Config(cf) + self.assertTrue(conf.reset_password, "reset password") + + def test_get_config_nameservers(self): + """Tests DNS and nameserver settings in a configuration.""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + config = Config(cf) + + network_config = get_network_config_from_conf(config, False) + + self.assertEqual(1, network_config.get('version')) + + config_types = network_config.get('config') + name_servers = None + dns_suffixes = None + + for type in config_types: + if type.get('type') == 'nameserver': + name_servers = type.get('address') + dns_suffixes = type.get('search') + break + + self.assertEqual(['10.20.145.1', '10.20.145.2'], + name_servers, + "dns") + self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], + dns_suffixes, + "suffixes") + + def test_gen_subnet(self): + """Tests if gen_subnet properly calculates network subnet from + IPv4 address and netmask""" + ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'], + ['10.20.92.105', '255.255.252.0', '10.20.92.0'], + ['192.168.0.10', '255.255.0.0', '192.168.0.0']] + for entry in ip_subnet_list: + self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]), + "Subnet for a specified ip and netmask") + + def test_get_config_dns_suffixes(self): + """Tests if get_network_config_from_conf properly + generates nameservers and dns settings from a + specified configuration""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + config = Config(cf) + + network_config = get_network_config_from_conf(config, False) + + self.assertEqual(1, network_config.get('version')) + + config_types = network_config.get('config') + name_servers = None + dns_suffixes = None + + for type in config_types: + if type.get('type') == 'nameserver': + name_servers = type.get('address') + dns_suffixes = type.get('search') + break + + self.assertEqual([], + name_servers, + "dns") + self.assertEqual(['eng.vmware.com'], + dns_suffixes, + "suffixes") + + def test_get_nics_list_dhcp(self): + """Tests if NicConfigurator properly calculates network subnets + for a configuration with a list of DHCP NICs""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + config = Config(cf) + + nicConfigurator = NicConfigurator(config.nics, False) + nics_cfg_list = nicConfigurator.generate() + + self.assertEqual(2, len(nics_cfg_list), "number of config elements") + + nic1 = {'name': 'NIC1'} + nic2 = {'name': 'NIC2'} + for cfg in nics_cfg_list: + if cfg.get('name') == nic1.get('name'): + nic1.update(cfg) + elif cfg.get('name') == nic2.get('name'): + nic2.update(cfg) + + self.assertEqual('physical', nic1.get('type'), 'type of NIC1') + self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') + self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), + 'mac address of NIC1') + subnets = nic1.get('subnets') + self.assertEqual(1, len(subnets), 'number of subnets for NIC1') + subnet = subnets[0] + self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1') + self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type') + + self.assertEqual('physical', nic2.get('type'), 'type of NIC2') + self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') + self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'), + 'mac address of NIC2') + subnets = nic2.get('subnets') + self.assertEqual(1, len(subnets), 'number of subnets for NIC2') + subnet = subnets[0] + self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2') + self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type') + + def test_get_nics_list_static(self): + """Tests if NicConfigurator properly calculates network subnets + for a configuration with 2 static NICs""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + config = Config(cf) + + nicConfigurator = NicConfigurator(config.nics, False) + nics_cfg_list = nicConfigurator.generate() + + self.assertEqual(2, len(nics_cfg_list), "number of elements") + + nic1 = {'name': 'NIC1'} + nic2 = {'name': 'NIC2'} + route_list = [] + for cfg in nics_cfg_list: + cfg_type = cfg.get('type') + if cfg_type == 'physical': + if cfg.get('name') == nic1.get('name'): + nic1.update(cfg) + elif cfg.get('name') == nic2.get('name'): + nic2.update(cfg) + + self.assertEqual('physical', nic1.get('type'), 'type of NIC1') + self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') + self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), + 'mac address of NIC1') + + subnets = nic1.get('subnets') + self.assertEqual(2, len(subnets), 'Number of subnets') + + static_subnet = [] + static6_subnet = [] + + for subnet in subnets: + subnet_type = subnet.get('type') + if subnet_type == 'static': + static_subnet.append(subnet) + elif subnet_type == 'static6': + static6_subnet.append(subnet) + else: + self.assertEqual(True, False, 'Unknown type') + if 'route' in subnet: + for route in subnet.get('routes'): + route_list.append(route) + + self.assertEqual(1, len(static_subnet), 'Number of static subnet') + self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet') + + subnet = static_subnet[0] + self.assertEqual('10.20.87.154', subnet.get('address'), + 'IPv4 address of static subnet') + self.assertEqual('255.255.252.0', subnet.get('netmask'), + 'NetMask of static subnet') + self.assertEqual('auto', subnet.get('control'), + 'control for static subnet') + + subnet = static6_subnet[0] + self.assertEqual('fc00:10:20:87::154', subnet.get('address'), + 'IPv6 address of static subnet') + self.assertEqual('64', subnet.get('netmask'), + 'NetMask of static6 subnet') + + route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10']) + for route in route_list: + self.assertEqual(10000, route.get('metric'), 'metric of route') + gateway = route.get('gateway') + if gateway in route_set: + route_set.discard(gateway) + else: + self.assertEqual(True, False, 'invalid gateway %s' % (gateway)) + + self.assertEqual('physical', nic2.get('type'), 'type of NIC2') + self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') + self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'), + 'mac address of NIC2') + + subnets = nic2.get('subnets') + self.assertEqual(1, len(subnets), 'Number of subnets for NIC2') + + subnet = subnets[0] + self.assertEqual('static', subnet.get('type'), 'Subnet type') + self.assertEqual('192.168.6.102', subnet.get('address'), + 'Subnet address') + self.assertEqual('255.255.0.0', subnet.get('netmask'), + 'Subnet netmask') + + def test_custom_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.custom_script_name) + cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") + conf = Config(cf) + self.assertEqual("test-script", conf.custom_script_name) + + def test_post_gc_status(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertFalse(conf.post_gc_status) + cf._insertKey("MISC|POST-GC-STATUS", "YES") + conf = Config(cf) + self.assertTrue(conf.post_gc_status) + + def test_no_default_run_post_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertFalse(conf.default_run_post_script) + cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO") + conf = Config(cf) + self.assertFalse(conf.default_run_post_script) + + def test_yes_default_run_post_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes") + conf = Config(cf) + self.assertTrue(conf.default_run_post_script) + + +class TestVmwareNetConfig(CiTestCase): + """Test conversion of vmware config to cloud-init config.""" + + maxDiff = None + + def _get_NicConfigurator(self, text): + fp = None + try: + with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), + delete=False) as fp: + fp.write(text) + fp.close() + cfg = Config(ConfigFile(fp.name)) + return NicConfigurator(cfg.nics, use_system_devices=False) + finally: + if fp: + os.unlink(fp.name) + + def test_non_primary_nic_without_gateway(self): + """A non primary nic set is not required to have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], + nc.generate()) + + def test_non_primary_nic_with_gateway(self): + """A non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'routes': + [{'type': 'route', 'destination': '10.20.84.0/22', + 'gateway': '10.20.87.253', 'metric': 10000}]}]}], + nc.generate()) + + def test_cust_non_primary_nic_with_gateway_(self): + """A customer non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = static-debug-vm + DOMAINNAME = cluster.local + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:ac:d1:8a + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 100.115.223.75 + NETMASK = 255.255.255.0 + GATEWAY = 100.115.223.254 + + + [DNS] + DNSFROMDHCP=no + + NAMESERVER|1 = 8.8.8.8 + + [DATETIME] + UTC = yes + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:ac:d1:8a', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '100.115.223.75', 'netmask': '255.255.255.0', + 'routes': + [{'type': 'route', 'destination': '100.115.223.0/24', + 'gateway': '100.115.223.254', 'metric': 10000}]}]}], + nc.generate()) + + def test_a_primary_nic_with_gateway(self): + """A primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + PRIMARY = true + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'gateway': '10.20.87.253'}]}], + nc.generate()) + + def test_meta_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.meta_data_name) + cf._insertKey("CLOUDINIT|METADATA", "test-metadata") + conf = Config(cf) + self.assertEqual("test-metadata", conf.meta_data_name) + + def test_user_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.user_data_name) + cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") + conf = Config(cf) + self.assertEqual("test-userdata", conf.user_data_name) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 739bbebf..4382a078 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -12,7 +12,7 @@ from cloudinit import settings from cloudinit import url_helper from cloudinit import util -from cloudinit.tests.helpers import TestCase, CiTestCase, ExitStack, mock +from tests.unittests.helpers import TestCase, CiTestCase, ExitStack, mock class FakeModule(handlers.Handler): diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index 0101b0e3..0c8b8e53 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -6,7 +6,7 @@ import stat from cloudinit import atomic_helper -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase class TestAtomicHelper(CiTestCase): diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 230866b9..cf2c0a4d 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -11,7 +11,7 @@ import tempfile from textwrap import dedent -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja) from cloudinit import handlers diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 1459fd9c..fd717f34 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -5,7 +5,7 @@ import io from collections import namedtuple from cloudinit.cmd import main as cli -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit.util import load_file, load_json diff --git a/tests/unittests/test_conftest.py b/tests/unittests/test_conftest.py new file mode 100644 index 00000000..2e02b7a7 --- /dev/null +++ b/tests/unittests/test_conftest.py @@ -0,0 +1,65 @@ +import pytest + +from cloudinit import subp +from tests.unittests.helpers import CiTestCase + + +class TestDisableSubpUsage: + """Test that the disable_subp_usage fixture behaves as expected.""" + + def test_using_subp_raises_assertion_error(self): + with pytest.raises(AssertionError): + subp.subp(["some", "args"]) + + def test_typeerrors_on_incorrect_usage(self): + with pytest.raises(TypeError): + # We are intentionally passing no value for a parameter, so: + # pylint: disable=no-value-for-parameter + subp.subp() + + @pytest.mark.allow_all_subp + def test_subp_usage_can_be_reenabled(self): + subp.subp(['whoami']) + + @pytest.mark.allow_subp_for("whoami") + def test_subp_usage_can_be_conditionally_reenabled(self): + # The two parameters test each potential invocation with a single + # argument + with pytest.raises(AssertionError) as excinfo: + subp.subp(["some", "args"]) + assert "allowed: whoami" in str(excinfo.value) + subp.subp(['whoami']) + + @pytest.mark.allow_subp_for("whoami", "bash") + def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self): + with pytest.raises(AssertionError) as excinfo: + subp.subp(["some", "args"]) + assert "allowed: whoami,bash" in str(excinfo.value) + subp.subp(['bash', '-c', 'true']) + subp.subp(['whoami']) + + @pytest.mark.allow_all_subp + @pytest.mark.allow_subp_for("bash") + def test_both_marks_raise_an_error(self): + with pytest.raises(AssertionError, match="marked both"): + subp.subp(["bash"]) + + +class TestDisableSubpUsageInTestSubclass(CiTestCase): + """Test that disable_subp_usage doesn't impact CiTestCase's subp logic.""" + + def test_using_subp_raises_exception(self): + with pytest.raises(Exception): + subp.subp(["some", "args"]) + + def test_typeerrors_on_incorrect_usage(self): + with pytest.raises(TypeError): + subp.subp() + + def test_subp_usage_can_be_reenabled(self): + _old_allowed_subp = self.allow_subp + self.allowed_subp = True + try: + subp.subp(['bash', '-c', 'true']) + finally: + self.allowed_subp = _old_allowed_subp diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index bfd07ecf..be9da40c 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit.cs_utils import Cepko diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 8c968ae9..2ee09bbb 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -25,7 +25,7 @@ from cloudinit import user_data as ud from cloudinit import safeyaml from cloudinit import util -from cloudinit.tests import helpers +from tests.unittests import helpers INSTANCE_ID = "i-testing" diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py deleted file mode 100644 index cab1ac2b..00000000 --- a/tests/unittests/test_datasource/test_aliyun.py +++ /dev/null @@ -1,248 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import functools -import httpretty -import os -from unittest import mock - -from cloudinit import helpers -from cloudinit.sources import DataSourceAliYun as ay -from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config -from cloudinit.tests import helpers as test_helpers - -DEFAULT_METADATA = { - 'instance-id': 'aliyun-test-vm-00', - 'eipv4': '10.0.0.1', - 'hostname': 'test-hostname', - 'image-id': 'm-test', - 'launch-index': '0', - 'mac': '00:16:3e:00:00:00', - 'network-type': 'vpc', - 'private-ipv4': '192.168.0.1', - 'serial-number': 'test-string', - 'vpc-cidr-block': '192.168.0.0/16', - 'vpc-id': 'test-vpc', - 'vswitch-id': 'test-vpc', - 'vswitch-cidr-block': '192.168.0.0/16', - 'zone-id': 'test-zone-1', - 'ntp-conf': {'ntp_servers': [ - 'ntp1.aliyun.com', - 'ntp2.aliyun.com', - 'ntp3.aliyun.com']}, - 'source-address': ['http://mirrors.aliyun.com', - 'http://mirrors.aliyuncs.com'], - 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'}, - 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}} -} - -DEFAULT_USERDATA = """\ -#cloud-config - -hostname: localhost""" - - -def register_mock_metaserver(base_url, data): - def register_helper(register, base_url, body): - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url.rstrip('/'), '\n'.join(body) + '\n') - elif isinstance(body, dict): - if not body: - register(base_url.rstrip('/') + '/', 'not found', - status_code=404) - vals = [] - for k, v in body.items(): - if isinstance(v, (str, list)): - suffix = k.rstrip('/') - else: - suffix = k.rstrip('/') + '/' - vals.append(suffix) - url = base_url.rstrip('/') + '/' + suffix - register_helper(register, url, v) - register(base_url, '\n'.join(vals) + '\n') - - register = functools.partial(httpretty.register_uri, httpretty.GET) - register_helper(register, base_url, data) - - -class TestAliYunDatasource(test_helpers.HttprettyTestCase): - def setUp(self): - super(TestAliYunDatasource, self).setUp() - cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} - distro = {} - paths = helpers.Paths({'run_dir': self.tmp_dir()}) - self.ds = ay.DataSourceAliYun(cfg, distro, paths) - self.metadata_address = self.ds.metadata_urls[0] - - @property - def default_metadata(self): - return DEFAULT_METADATA - - @property - def default_userdata(self): - return DEFAULT_USERDATA - - @property - def metadata_url(self): - return os.path.join( - self.metadata_address, - self.ds.min_metadata_version, 'meta-data') + '/' - - @property - def userdata_url(self): - return os.path.join( - self.metadata_address, - self.ds.min_metadata_version, 'user-data') - - # EC2 provides an instance-identity document which must return 404 here - # for this test to pass. - @property - def default_identity(self): - return {} - - @property - def identity_url(self): - return os.path.join(self.metadata_address, - self.ds.min_metadata_version, - 'dynamic', 'instance-identity') - - def regist_default_server(self): - register_mock_metaserver(self.metadata_url, self.default_metadata) - register_mock_metaserver(self.userdata_url, self.default_userdata) - register_mock_metaserver(self.identity_url, self.default_identity) - - def _test_get_data(self): - self.assertEqual(self.ds.metadata, self.default_metadata) - self.assertEqual(self.ds.userdata_raw, - self.default_userdata.encode('utf8')) - - def _test_get_sshkey(self): - pub_keys = [v['openssh-key'] for (_, v) in - self.default_metadata['public-keys'].items()] - self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys) - - def _test_get_iid(self): - self.assertEqual(self.default_metadata['instance-id'], - self.ds.get_instance_id()) - - def _test_host_name(self): - self.assertEqual(self.default_metadata['hostname'], - self.ds.get_hostname()) - - @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - def test_with_mock_server(self, m_is_aliyun): - m_is_aliyun.return_value = True - self.regist_default_server() - ret = self.ds.get_data() - self.assertEqual(True, ret) - self.assertEqual(1, m_is_aliyun.call_count) - self._test_get_data() - self._test_get_sshkey() - self._test_get_iid() - self._test_host_name() - self.assertEqual('aliyun', self.ds.cloud_name) - self.assertEqual('ec2', self.ds.platform) - self.assertEqual( - 'metadata (http://100.100.100.200)', self.ds.subplatform) - - @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): - """If is_aliyun returns false, then get_data should return False.""" - m_is_aliyun.return_value = False - self.regist_default_server() - ret = self.ds.get_data() - self.assertEqual(1, m_is_aliyun.call_count) - self.assertEqual(False, ret) - - def test_parse_public_keys(self): - public_keys = {} - self.assertEqual(ay.parse_public_keys(public_keys), []) - - public_keys = {'key-pair-0': 'ssh-key-0'} - self.assertEqual(ay.parse_public_keys(public_keys), - [public_keys['key-pair-0']]) - - public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'} - self.assertEqual(set(ay.parse_public_keys(public_keys)), - set([public_keys['key-pair-0'], - public_keys['key-pair-1']])) - - public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']} - self.assertEqual(ay.parse_public_keys(public_keys), - public_keys['key-pair-0']) - - public_keys = {'key-pair-0': {'openssh-key': []}} - self.assertEqual(ay.parse_public_keys(public_keys), []) - - public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}} - self.assertEqual(ay.parse_public_keys(public_keys), - [public_keys['key-pair-0']['openssh-key']]) - - public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0', - 'ssh-key-1']}} - self.assertEqual(ay.parse_public_keys(public_keys), - public_keys['key-pair-0']['openssh-key']) - - def test_route_metric_calculated_without_device_number(self): - """Test that route-metric code works without `device-number` - - `device-number` is part of EC2 metadata, but not supported on aliyun. - Attempting to access it will raise a KeyError. - - LP: #1917875 - """ - netcfg = convert_ec2_metadata_network_config( - {"interfaces": {"macs": { - "06:17:04:d7:26:09": { - "interface-id": "eni-e44ef49e", - }, - "06:17:04:d7:26:08": { - "interface-id": "eni-e44ef49f", - } - }}}, - macs_to_nics={ - '06:17:04:d7:26:09': 'eth0', - '06:17:04:d7:26:08': 'eth1', - } - ) - - met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] - met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] - - # route-metric numbers should be 100 apart - assert 100 == abs(met0 - met1) - - -class TestIsAliYun(test_helpers.CiTestCase): - ALIYUN_PRODUCT = 'Alibaba Cloud ECS' - read_dmi_data_expected = [mock.call('system-product-name')] - - @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") - def test_true_on_aliyun_product(self, m_read_dmi_data): - """Should return true if the dmi product data has expected value.""" - m_read_dmi_data.return_value = self.ALIYUN_PRODUCT - ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) - self.assertEqual(True, ret) - - @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") - def test_false_on_empty_string(self, m_read_dmi_data): - """Should return false on empty value returned.""" - m_read_dmi_data.return_value = "" - ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) - self.assertEqual(False, ret) - - @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") - def test_false_on_unknown_string(self, m_read_dmi_data): - """Should return false on an unrelated string.""" - m_read_dmi_data.return_value = "cubs win" - ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) - self.assertEqual(False, ret) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py deleted file mode 100644 index 7a5393ac..00000000 --- a/tests/unittests/test_datasource/test_altcloud.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joe VLcek -# -# This file is part of cloud-init. See LICENSE file for license information. - -''' -This test file exercises the code in sources DataSourceAltCloud.py -''' - -import os -import shutil -import tempfile - -from cloudinit import dmi -from cloudinit import helpers -from cloudinit import subp -from cloudinit import util - -from cloudinit.tests.helpers import CiTestCase, mock - -import cloudinit.sources.DataSourceAltCloud as dsac - -OS_UNAME_ORIG = getattr(os, 'uname') - - -def _write_user_data_files(mount_dir, value): - ''' - Populate the deltacloud_user_data_file the user_data_file - which would be populated with user data. - ''' - deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' - user_data_file = mount_dir + '/user-data.txt' - - udfile = open(deltacloud_user_data_file, 'w') - udfile.write(value) - udfile.close() - os.chmod(deltacloud_user_data_file, 0o664) - - udfile = open(user_data_file, 'w') - udfile.write(value) - udfile.close() - os.chmod(user_data_file, 0o664) - - -def _remove_user_data_files(mount_dir, - dc_file=True, - non_dc_file=True): - ''' - Remove the test files: deltacloud_user_data_file and - user_data_file - ''' - deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' - user_data_file = mount_dir + '/user-data.txt' - - # Ignore any failures removeing files that are already gone. - if dc_file: - try: - os.remove(deltacloud_user_data_file) - except OSError: - pass - - if non_dc_file: - try: - os.remove(user_data_file) - except OSError: - pass - - -def _dmi_data(expected): - ''' - Spoof the data received over DMI - ''' - def _data(key): - return expected - - return _data - - -class TestGetCloudType(CiTestCase): - '''Test to exercise method: DataSourceAltCloud.get_cloud_type()''' - - with_logs = True - - def setUp(self): - '''Set up.''' - super(TestGetCloudType, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - self.dmi_data = dmi.read_dmi_data - # We have a different code path for arm to deal with LP1243287 - # We have to switch arch to x86_64 to avoid test failure - force_arch('x86_64') - - def tearDown(self): - # Reset - dmi.read_dmi_data = self.dmi_data - force_arch() - - def test_cloud_info_file_ioerror(self): - """Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors.""" - self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE) - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - # Attempting to read the directory generates IOError - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp): - self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) - self.assertIn( - "[Errno 21] Is a directory: '%s'" % self.tmp, - self.logs.getvalue()) - - def test_cloud_info_file(self): - """Return uppercase stripped content from /etc/sysconfig/cloud-info.""" - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - cloud_info = self.tmp_path('cloud-info', dir=self.tmp) - util.write_file(cloud_info, ' OverRiDdeN CloudType ') - # Attempting to read the directory generates IOError - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info): - self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type()) - - def test_rhev(self): - ''' - Test method get_cloud_type() for RHEVm systems. - Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor - ''' - dmi.read_dmi_data = _dmi_data('RHEV') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('RHEV', dsrc.get_cloud_type()) - - def test_vsphere(self): - ''' - Test method get_cloud_type() for vSphere systems. - Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor - ''' - dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('VSPHERE', dsrc.get_cloud_type()) - - def test_unknown(self): - ''' - Test method get_cloud_type() for unknown systems. - Forcing read_dmi_data return to match an unrecognized return. - ''' - dmi.read_dmi_data = _dmi_data('Unrecognized Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) - - -class TestGetDataCloudInfoFile(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.get_data() - With a contrived CLOUD_INFO_FILE - ''' - def setUp(self): - '''Set up.''' - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp) - - def test_rhev(self): - '''Success Test module get_data() forcing RHEV.''' - - util.write_file(self.cloud_info_file, 'RHEV') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda: True - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(True, dsrc.get_data()) - self.assertEqual('altcloud', dsrc.cloud_name) - self.assertEqual('altcloud', dsrc.platform_type) - self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform) - - def test_vsphere(self): - '''Success Test module get_data() forcing VSPHERE.''' - - util.write_file(self.cloud_info_file, 'VSPHERE') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda: True - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(True, dsrc.get_data()) - self.assertEqual('altcloud', dsrc.cloud_name) - self.assertEqual('altcloud', dsrc.platform_type) - self.assertEqual('vsphere (unknown)', dsrc.subplatform) - - def test_fail_rhev(self): - '''Failure Test module get_data() forcing RHEV.''' - - util.write_file(self.cloud_info_file, 'RHEV') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda: False - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(False, dsrc.get_data()) - - def test_fail_vsphere(self): - '''Failure Test module get_data() forcing VSPHERE.''' - - util.write_file(self.cloud_info_file, 'VSPHERE') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda: False - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(False, dsrc.get_data()) - - def test_unrecognized(self): - '''Failure Test module get_data() forcing unrecognized.''' - - util.write_file(self.cloud_info_file, 'unrecognized') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(False, dsrc.get_data()) - - -class TestGetDataNoCloudInfoFile(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.get_data() - Without a CLOUD_INFO_FILE - ''' - def setUp(self): - '''Set up.''' - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.dmi_data = dmi.read_dmi_data - dsac.CLOUD_INFO_FILE = \ - 'no such file' - # We have a different code path for arm to deal with LP1243287 - # We have to switch arch to x86_64 to avoid test failure - force_arch('x86_64') - - def tearDown(self): - # Reset - dsac.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' - dmi.read_dmi_data = self.dmi_data - # Return back to original arch - force_arch() - - def test_rhev_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing RHEV.''' - - dmi.read_dmi_data = _dmi_data('RHEV Hypervisor') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda: True - self.assertEqual(True, dsrc.get_data()) - - def test_vsphere_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing VSPHERE.''' - - dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda: True - self.assertEqual(True, dsrc.get_data()) - - def test_failure_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing unrecognized.''' - - dmi.read_dmi_data = _dmi_data('Unrecognized Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.get_data()) - - -class TestUserDataRhevm(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.user_data_rhevm() - ''' - def setUp(self): - '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) - self.mount_dir = self.tmp_dir() - _write_user_data_files(self.mount_dir, 'test user data') - self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', - 'm_modprobe_floppy', return_value=None) - self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', - 'm_udevadm_settle', return_value=('', '')) - self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', - 'm_mount_cb') - - def test_mount_cb_fails(self): - '''Test user_data_rhevm() where mount_cb fails.''' - - self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_modprobe_fails(self): - '''Test user_data_rhevm() where modprobe fails.''' - - self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( - "Failed modprobe") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_no_modprobe_cmd(self): - '''Test user_data_rhevm() with no modprobe command.''' - - self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( - "No such file or dir") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_udevadm_fails(self): - '''Test user_data_rhevm() where udevadm fails.''' - - self.m_udevadm_settle.side_effect = subp.ProcessExecutionError( - "Failed settle.") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_no_udevadm_cmd(self): - '''Test user_data_rhevm() with no udevadm command.''' - - self.m_udevadm_settle.side_effect = OSError("No such file or dir") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - -class TestUserDataVsphere(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.user_data_vsphere() - ''' - def setUp(self): - '''Set up.''' - self.tmp = self.tmp_dir() - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - self.mount_dir = tempfile.mkdtemp() - - _write_user_data_files(self.mount_dir, 'test user data') - - def tearDown(self): - # Reset - - _remove_user_data_files(self.mount_dir) - - # Attempt to remove the temp dir ignoring errors - try: - shutil.rmtree(self.mount_dir) - except OSError: - pass - - dsac.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' - - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") - def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with): - '''Test user_data_vsphere() where mount_cb fails.''' - - m_mount_cb.return_value = [] - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_vsphere()) - self.assertEqual(0, m_mount_cb.call_count) - - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") - def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with): - '''Test user_data_vsphere() where mount_cb fails.''' - - m_find_devs_with.return_value = ["/dev/mock/cdrom"] - m_mount_cb.side_effect = util.MountFailedError("Unable To mount") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_vsphere()) - self.assertEqual(1, m_find_devs_with.call_count) - self.assertEqual(1, m_mount_cb.call_count) - - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") - def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with): - """Test user_data_vsphere() where successful.""" - m_find_devs_with.return_value = ["/dev/mock/cdrom"] - m_mount_cb.return_value = 'raw userdata from cdrom' - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - cloud_info = self.tmp_path('cloud-info', dir=self.tmp) - util.write_file(cloud_info, 'VSPHERE') - self.assertEqual(True, dsrc.user_data_vsphere()) - m_find_devs_with.assert_called_once_with('LABEL=CDROM') - m_mount_cb.assert_called_once_with( - '/dev/mock/cdrom', dsac.read_user_data_callback) - with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'): - self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform) - - -class TestReadUserDataCallback(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.read_user_data_callback() - ''' - def setUp(self): - '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) - self.mount_dir = tempfile.mkdtemp() - - _write_user_data_files(self.mount_dir, 'test user data') - - def tearDown(self): - # Reset - - _remove_user_data_files(self.mount_dir) - - # Attempt to remove the temp dir ignoring errors - try: - shutil.rmtree(self.mount_dir) - except OSError: - pass - - def test_callback_both(self): - '''Test read_user_data_callback() with both files.''' - - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) - - def test_callback_dc(self): - '''Test read_user_data_callback() with only DC file.''' - - _remove_user_data_files(self.mount_dir, - dc_file=False, - non_dc_file=True) - - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) - - def test_callback_non_dc(self): - '''Test read_user_data_callback() with only non-DC file.''' - - _remove_user_data_files(self.mount_dir, - dc_file=True, - non_dc_file=False) - - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) - - def test_callback_none(self): - '''Test read_user_data_callback() no files are found.''' - - _remove_user_data_files(self.mount_dir) - self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) - - -def force_arch(arch=None): - - def _os_uname(): - return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch) - - if arch: - setattr(os, 'uname', _os_uname) - elif arch is None: - setattr(os, 'uname', OS_UNAME_ORIG) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py deleted file mode 100644 index 995d2b10..00000000 --- a/tests/unittests/test_datasource/test_azure.py +++ /dev/null @@ -1,3394 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import distros -from cloudinit import helpers -from cloudinit import url_helper -from cloudinit.sources import ( - UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) -from cloudinit.util import (b64e, decode_binary, load_file, write_file, - MountFailedError, json_dumps, load_json) -from cloudinit.version import version_string as vs -from cloudinit.tests.helpers import ( - HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, resourceLocation) -from cloudinit.sources.helpers import netlink - -import copy -import crypt -import httpretty -import json -import os -import requests -import stat -import xml.etree.ElementTree as ET -import yaml - - -def construct_valid_ovf_env(data=None, pubkeys=None, - userdata=None, platform_settings=None): - if data is None: - data = {'HostName': 'FOOHOST'} - if pubkeys is None: - pubkeys = {} - - content = """ - - - 1.0 - - LinuxProvisioningConfiguration - """ - for key, dval in data.items(): - if isinstance(dval, dict): - val = dict(dval).get('text') - attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v - in dict(dval).items() if k != 'text']) - else: - val = dval - attrs = "" - content += "<%s%s>%s\n" % (key, attrs, val, key) - - if userdata: - content += "%s\n" % (b64e(userdata)) - - if pubkeys: - content += "\n" - for fp, path, value in pubkeys: - content += " " - if fp and path: - content += ("%s%s" % - (fp, path)) - if value: - content += "%s" % value - content += "\n" - content += "" - content += """ - - - 1.0 - - kms.core.windows.net - false - """ - if platform_settings: - for k, v in platform_settings.items(): - content += "<%s>%s\n" % (k, v, k) - if "PreprovisionedVMType" not in platform_settings: - content += """""" - content += """ -""" - - return content - - -NETWORK_METADATA = { - "compute": { - "location": "eastus2", - "name": "my-hostname", - "offer": "UbuntuServer", - "osType": "Linux", - "placementGroupId": "", - "platformFaultDomain": "0", - "platformUpdateDomain": "0", - "publisher": "Canonical", - "resourceGroupName": "srugroup1", - "sku": "19.04-DAILY", - "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", - "tags": "", - "version": "19.04.201906190", - "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", - "vmScaleSetName": "", - "vmSize": "Standard_DS1_v2", - "zone": "", - "publicKeys": [ - { - "keyData": "ssh-rsa key1", - "path": "path1" - } - ] - }, - "network": { - "interface": [ - { - "macAddress": "000D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.0.0" - } - ], - "ipAddress": [ - { - "privateIpAddress": "10.0.0.4", - "publicIpAddress": "104.46.124.81" - } - ] - } - } - ] - } -} - -SECONDARY_INTERFACE = { - "macAddress": "220D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.1.0" - } - ], - "ipAddress": [ - { - "privateIpAddress": "10.0.1.5", - } - ] - } -} - -SECONDARY_INTERFACE_NO_IP = { - "macAddress": "220D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.1.0" - } - ], - "ipAddress": [] - } -} - -IMDS_NETWORK_METADATA = { - "interface": [ - { - "macAddress": "000D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.0.0" - } - ], - "ipAddress": [ - { - "privateIpAddress": "10.0.0.4", - "publicIpAddress": "104.46.124.81" - } - ] - } - } - ] -} - -MOCKPATH = 'cloudinit.sources.DataSourceAzure.' -EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8' - - -class TestParseNetworkConfig(CiTestCase): - - maxDiff = None - fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] - } - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_single_ipv4_nic_configuration(self, m_driver): - """parse_network_config emits dhcp on single nic with ipv4""" - expected = {'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} - self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_increases_route_metric_for_non_primary_nics(self, m_driver): - """parse_network_config increases route-metric for each nic""" - expected = {'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE) - third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - imds_data['network']['interface'].append(third_intf) - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver): - """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp4': True, - 'dhcp6': False, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) - - nic1['ipv6'] = { - "subnet": [{"address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, - {"privateIpAddress": "2001:dead:beef::2"}] - } - imds_data['network']['interface'].append(SECONDARY_INTERFACE) - third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - third_intf['ipv6'] = { - "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] - } - imds_data['network']['interface'].append(third_intf) - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver): - """parse_network_config emits primary ipv4 as dhcp others are static""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) - - nic1['ipv6'] = { - "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] - } - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver): - """parse_network_config emits primary ipv6 as dhcp others are static""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) - - # Secondary ipv6 addresses currently ignored/unconfigured - nic1['ipv6'] = { - "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, - {"privateIpAddress": "2001:dead:beef::2"}] - } - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value='hv_netvsc') - def test_match_driver_for_netvsc(self, m_driver): - """parse_network_config emits driver when using netvsc.""" - expected = {'ethernets': { - 'eth0': { - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': { - 'macaddress': '00:0d:3a:04:75:98', - 'driver': 'hv_netvsc', - }, - 'set-name': 'eth0' - }}, 'version': 2} - self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') - def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata( - self, m_fallback_config, m_driver): - """parse_network_config generates fallback network config when the - IMDS instance metadata is corrupted/invalid, such as when - network metadata is not present. - """ - imds_metadata_missing_network_metadata = copy.deepcopy( - NETWORK_METADATA) - del imds_metadata_missing_network_metadata['network'] - m_fallback_config.return_value = self.fallback_config - self.assertEqual( - self.fallback_config, - dsaz.parse_network_config( - imds_metadata_missing_network_metadata)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') - def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata( - self, m_fallback_config, m_driver): - """parse_network_config generates fallback network config when the - IMDS instance metadata is corrupted/invalid, such as when - network interface metadata is not present. - """ - imds_metadata_missing_interface_metadata = copy.deepcopy( - NETWORK_METADATA) - del imds_metadata_missing_interface_metadata['network']['interface'] - m_fallback_config.return_value = self.fallback_config - self.assertEqual( - self.fallback_config, - dsaz.parse_network_config( - imds_metadata_missing_interface_metadata)) - - -class TestGetMetadataFromIMDS(HttprettyTestCase): - - with_logs = True - - def setUp(self): - super(TestGetMetadataFromIMDS, self).setUp() - self.network_md_url = "{}/instance?api-version=2019-06-01".format( - dsaz.IMDS_URL - ) - - @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_does_not_dhcp_if_network_is_up( - self, m_net_is_up, m_dhcp, m_readurl): - """Do not perform DHCP setup when nic is already up.""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(NETWORK_METADATA).encode('utf-8')) - self.assertEqual( - NETWORK_METADATA, - dsaz.get_metadata_from_imds('eth9', retries=3)) - - m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_not_called() - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_metadata_uses_instance_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.all) - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_network_metadata_uses_network_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - network metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.network) - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance/network?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_default_metadata_uses_instance_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3) - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_metadata_uses_extended_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.all, - api_version="2021-08-01") - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2021-08-01&extended=true", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_performs_dhcp_when_network_is_down( - self, m_net_is_up, m_dhcp, m_readurl): - """Perform DHCP setup when nic is not up.""" - m_net_is_up.return_value = False - m_readurl.return_value = url_helper.StringResponse( - json.dumps(NETWORK_METADATA).encode('utf-8')) - - self.assertEqual( - NETWORK_METADATA, - dsaz.get_metadata_from_imds('eth9', retries=2)) - - m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with(mock.ANY, 'eth9') - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - m_readurl.assert_called_with( - self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) - - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_from_imds_empty_when_no_imds_present( - self, m_net_is_up, m_sleep): - """Return empty dict when IMDS network metadata is absent.""" - httpretty.register_uri( - httpretty.GET, - dsaz.IMDS_URL + '/instance?api-version=2017-12-01', - body={}, status=404) - - m_net_is_up.return_value = True # skips dhcp - - self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) - - m_net_is_up.assert_called_with('eth9') - self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - @mock.patch('requests.Session.request') - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_from_imds_retries_on_timeout( - self, m_net_is_up, m_sleep, m_request): - """Retry IMDS network metadata on timeout errors.""" - - self.attempt = 0 - m_request.side_effect = requests.Timeout('Fake Connection Timeout') - - def retry_callback(request, uri, headers): - self.attempt += 1 - raise requests.Timeout('Fake connection timeout') - - httpretty.register_uri( - httpretty.GET, - dsaz.IMDS_URL + 'instance?api-version=2017-12-01', - body=retry_callback) - - m_net_is_up.return_value = True # skips dhcp - - self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) - - m_net_is_up.assert_called_with('eth9') - self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - -class TestAzureDataSource(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestAzureDataSource, self).setUp() - self.tmp = self.tmp_dir() - - # patch cloud_dir, so our 'seed_dir' is guaranteed empty - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - - self.patches = ExitStack() - self.addCleanup(self.patches.close) - - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - self.m_get_metadata_from_imds = self.patches.enter_context( - mock.patch.object( - dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value=NETWORK_METADATA))) - self.m_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.sources.net.find_fallback_nic', - return_value='eth9')) - self.m_remove_ubuntu_network_scripts = self.patches.enter_context( - mock.patch.object( - dsaz, 'maybe_remove_ubuntu_network_config_scripts', - mock.MagicMock())) - super(TestAzureDataSource, self).setUp() - - def apply_patches(self, patches): - for module, name, new in patches: - self.patches.enter_context(mock.patch.object(module, name, new)) - - def _get_mockds(self): - sysctl_out = "dev.storvsc.3.%pnpinfo: "\ - "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ - "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" - sysctl_out += "dev.storvsc.2.%pnpinfo: "\ - "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ - "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" - sysctl_out += "dev.storvsc.1.%pnpinfo: "\ - "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ - "deviceid=00000000-0001-8899-0000-000000000000\n" - camctl_devbus = """ -scbus0 on ata0 bus 0 -scbus1 on ata1 bus 0 -scbus2 on blkvsc0 bus 0 -scbus3 on blkvsc1 bus 0 -scbus4 on storvsc2 bus 0 -scbus5 on storvsc3 bus 0 -scbus-1 on xpt0 bus 0 - """ - camctl_dev = """ - at scbus1 target 0 lun 0 (cd0,pass0) - at scbus2 target 0 lun 0 (da0,pass1) - at scbus3 target 1 lun 0 (da1,pass2) - """ - self.apply_patches([ - (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( - return_value=sysctl_out)), - (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( - return_value=camctl_devbus)), - (dsaz, 'get_camcontrol_dev', mock.MagicMock( - return_value=camctl_dev)) - ]) - return dsaz - - def _get_ds(self, data, distro='ubuntu', - apply_network=None, instance_id=None): - - def _wait_for_files(flist, _maxwait=None, _naplen=None): - data['waited'] = flist - return [] - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - yield from data.get('dsdevs', []) - if cache_dir: - yield cache_dir - - seed_dir = os.path.join(self.paths.seed_dir, "azure") - if data.get('ovfcontent') is not None: - populate_dir(seed_dir, - {'ovf-env.xml': data['ovfcontent']}) - - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - self.m_is_platform_viable = mock.MagicMock(autospec=True) - self.m_get_metadata_from_fabric = mock.MagicMock( - return_value={'public-keys': []}) - self.m_report_failure_to_fabric = mock.MagicMock(autospec=True) - self.m_ephemeral_dhcpv4 = mock.MagicMock() - self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() - self.m_list_possible_azure_ds = mock.MagicMock( - side_effect=_load_possible_azure_ds) - - if instance_id: - self.instance_id = instance_id - else: - self.instance_id = EXAMPLE_UUID - - def _dmi_mocks(key): - if key == 'system-uuid': - return self.instance_id - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - - self.apply_patches([ - (dsaz, 'list_possible_azure_ds', - self.m_list_possible_azure_ds), - (dsaz, 'perform_hostname_bounce', mock.MagicMock()), - (dsaz, 'get_hostname', mock.MagicMock()), - (dsaz, 'set_hostname', mock.MagicMock()), - (dsaz, '_is_platform_viable', - self.m_is_platform_viable), - (dsaz, 'get_metadata_from_fabric', - self.m_get_metadata_from_fabric), - (dsaz, 'report_failure_to_fabric', - self.m_report_failure_to_fabric), - (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4), - (dsaz, 'EphemeralDHCPv4WithReporting', - self.m_ephemeral_dhcpv4_with_reporting), - (dsaz, 'get_boot_telemetry', mock.MagicMock()), - (dsaz, 'get_system_info', mock.MagicMock()), - (dsaz.subp, 'which', lambda x: True), - (dsaz.dmi, 'read_dmi_data', mock.MagicMock( - side_effect=_dmi_mocks)), - (dsaz.util, 'wait_for_files', mock.MagicMock( - side_effect=_wait_for_files)), - ]) - - if isinstance(distro, str): - distro_cls = distros.fetch(distro) - distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) - dsrc = dsaz.DataSourceAzure( - data.get('sys_cfg', {}), distro=distro, paths=self.paths) - if apply_network is not None: - dsrc.ds_cfg['apply_network_config'] = apply_network - - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def xml_equals(self, oxml, nxml): - """Compare two sets of XML to make sure they are equal""" - - def create_tag_index(xml): - et = ET.fromstring(xml) - ret = {} - for x in et.iter(): - ret[x.tag] = x - return ret - - def tags_exists(x, y): - for tag in x.keys(): - assert tag in y - for tag in y.keys(): - assert tag in x - - def tags_equal(x, y): - for x_val in x.values(): - y_val = y.get(x_val.tag) - assert x_val.text == y_val.text - - old_cnt = create_tag_index(oxml) - new_cnt = create_tag_index(nxml) - tags_exists(old_cnt, new_cnt) - tags_equal(old_cnt, new_cnt) - - def xml_notequals(self, oxml, nxml): - try: - self.xml_equals(oxml, nxml) - except AssertionError: - return - raise AssertionError("XML is the same") - - def test_get_resource_disk(self): - ds = self._get_mockds() - dev = ds.get_resource_disk_on_freebsd(1) - self.assertEqual("da1", dev) - - def test_not_is_platform_viable_seed_should_return_no_datasource(self): - """Check seed_dir using _is_platform_viable and return False.""" - # Return a non-matching asset tag value - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = False - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_report_failure') as m_report_failure: - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertFalse(ret) - # Assert that for non viable platforms, - # there is no communication with the Azure datasource. - self.assertEqual( - 0, - m_crawl_metadata.call_count) - self.assertEqual( - 0, - m_report_failure.call_count) - - def test_platform_viable_but_no_devs_should_return_no_datasource(self): - """For platforms where the Azure platform is viable - (which is indicated by the matching asset tag), - the absence of any devs at all (devs == candidate sources - for crawling Azure datasource) is NOT expected. - Report failure to Azure as this is an unexpected fatal error. - """ - data = {} - dsrc = self._get_ds(data) - with mock.patch.object(dsrc, '_report_failure') as m_report_failure: - self.m_is_platform_viable.return_value = True - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertFalse(ret) - self.assertEqual( - 1, - m_report_failure.call_count) - - def test_crawl_metadata_exception_returns_no_datasource(self): - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - m_crawl_metadata.side_effect = Exception - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertEqual( - 1, - m_crawl_metadata.call_count) - self.assertFalse(ret) - - def test_crawl_metadata_exception_should_report_failure_with_msg(self): - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_report_failure') as m_report_failure: - m_crawl_metadata.side_effect = Exception - dsrc.get_data() - self.assertEqual( - 1, - m_crawl_metadata.call_count) - m_report_failure.assert_called_once_with( - description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) - - def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self): - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - m_crawl_metadata.side_effect = Exception - dsrc.get_data() - self.assertEqual( - 1, - m_crawl_metadata.call_count) - self.assertIn( - "Could not crawl Azure metadata", - self.logs.getvalue()) - - def test_basic_seed_dir(self): - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, "") - self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) - self.assertTrue(os.path.isfile( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertEqual('azure', dsrc.cloud_name) - self.assertEqual('azure', dsrc.platform_type) - self.assertEqual( - 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform) - - def test_basic_dev_file(self): - """When a device path is used, present that in subplatform.""" - data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']} - dsrc = self._get_ds(data) - # DSAzure will attempt to mount /dev/sr0 first, which should - # fail with mount error since the list of devices doesn't have - # /dev/sr0 - with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb: - m_mount_cb.side_effect = [ - MountFailedError("fail"), - ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) - ] - self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.userdata_raw, 'ud') - self.assertEqual(dsrc.metadata['local-hostname'], 'me') - self.assertEqual('azure', dsrc.cloud_name) - self.assertEqual('azure', dsrc.platform_type) - self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform) - - def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): - """get_data on non-Ubuntu will not remove ubuntu net scripts.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - dsrc = self._get_ds(data, distro='debian') - dsrc.get_data() - self.m_remove_ubuntu_network_scripts.assert_not_called() - - def test_get_data_on_ubuntu_will_remove_network_scripts(self): - """get_data will remove ubuntu net scripts on Ubuntu distro.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data, distro='ubuntu') - dsrc.get_data() - self.m_remove_ubuntu_network_scripts.assert_called_once_with() - - def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): - """When apply_network_config false, do not remove scripts on Ubuntu.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data, distro='ubuntu') - dsrc.get_data() - self.m_remove_ubuntu_network_scripts.assert_not_called() - - def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): - """Return all structured metadata and cache no class attributes.""" - yaml_cfg = "" - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, - 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - dsrc = self._get_ds(data) - expected_cfg = { - 'PreprovisionedVMType': None, - 'PreprovisionedVm': False, - 'datasource': {'Azure': {}}, - 'system_info': {'default_user': {'name': 'myuser'}}} - expected_metadata = { - 'azure_data': { - 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': NETWORK_METADATA, - 'instance-id': EXAMPLE_UUID, - 'local-hostname': 'myhost', - 'random_seed': 'wild'} - - crawled_metadata = dsrc.crawl_metadata() - - self.assertCountEqual( - crawled_metadata.keys(), - ['cfg', 'files', 'metadata', 'userdata_raw']) - self.assertEqual(crawled_metadata['cfg'], expected_cfg) - self.assertEqual( - list(crawled_metadata['files'].keys()), ['ovf-env.xml']) - self.assertIn( - b'myhost', - crawled_metadata['files']['ovf-env.xml']) - self.assertEqual(crawled_metadata['metadata'], expected_metadata) - self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') - self.assertEqual(dsrc.userdata_raw, None) - self.assertEqual(dsrc.metadata, {}) - self.assertEqual(dsrc._metadata_imds, UNSET) - self.assertFalse(os.path.isfile( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - - def test_crawl_metadata_raises_invalid_metadata_on_error(self): - """crawl_metadata raises an exception on invalid ovf-env.xml.""" - data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} - dsrc = self._get_ds(data) - error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' - ' syntax error: line 1, column 0') - with self.assertRaises(InvalidMetaDataException) as cm: - dsrc.crawl_metadata() - self.assertEqual(str(cm.exception), error_msg) - - def test_crawl_metadata_call_imds_once_no_reprovision(self): - """If reprovisioning, report ready at the end""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "False"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - dsrc.crawl_metadata() - self.assertEqual(1, self.m_get_metadata_from_imds.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - def test_crawl_metadata_call_imds_twice_with_reprovision( - self, poll_imds_func, m_report_ready, m_write, m_dhcp - ): - """If reprovisioning, imds metadata will be fetched twice""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(2, self.m_get_metadata_from_imds.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - def test_crawl_metadata_on_reprovision_reports_ready( - self, poll_imds_func, m_report_ready, m_write, m_dhcp - ): - """If reprovisioning, report ready at the end""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(1, m_report_ready.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' - '_wait_for_all_nics_ready') - def test_crawl_metadata_waits_for_nic_on_savable_vms( - self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp - ): - """If reprovisioning, report ready at the end""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Savable", - "PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(1, report_ready_func.call_count) - self.assertEqual(1, detect_nics.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' - '_wait_for_all_nics_ready') - @mock.patch('os.path.isfile') - def test_detect_nics_when_marker_present( - self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write, - m_dhcp): - """If reprovisioning, wait for nic attach if marker present""" - - def is_file_ret(key): - return key == dsaz.REPROVISION_NIC_ATTACH_MARKER_FILE - - is_file.side_effect = is_file_ret - ovfenv = construct_valid_ovf_env() - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(1, report_ready_func.call_count) - self.assertEqual(1, detect_nics.call_count) - - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.readurl') - def test_crawl_metadata_on_reprovision_reports_ready_using_lease( - self, m_readurl, m_report_ready, - m_media_switch, m_write - ): - """If reprovisioning, report ready using the obtained lease""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - - with mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - - # For this mock, net should not be up, - # so that cached ephemeral won't be used. - # This is so that a NEW ephemeral dhcp lease will be discovered - # and used instead. - m_dsrc_distro_networking_is_up.return_value = False - - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.return_value = lease - m_media_switch.return_value = None - - reprovision_ovfenv = construct_valid_ovf_env() - m_readurl.return_value = url_helper.StringResponse( - reprovision_ovfenv.encode('utf-8')) - - dsrc.crawl_metadata() - self.assertEqual(2, m_report_ready.call_count) - m_report_ready.assert_called_with(lease=lease) - - def test_waagent_d_has_0700_perms(self): - # we expect /var/lib/waagent to be created 0700 - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertTrue(os.path.isdir(self.waagent_d)) - self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_network_config_set_from_imds(self, m_driver): - """Datasource.network_config returns IMDS network data.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}}, - 'version': 2} - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(expected_network_config, dsrc.network_config) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_network_config_set_from_imds_route_metric_for_secondary_nic( - self, m_driver): - """Datasource.network_config adds route-metric to secondary nics.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}}}, - 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE) - third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - imds_data['network']['interface'].append(third_intf) - - self.m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(expected_network_config, dsrc.network_config) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_network_config_set_from_imds_for_secondary_nic_no_ip( - self, m_driver): - """If an IP address is empty then there should no config for it.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}}, - 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) - self.m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(expected_network_config, dsrc.network_config) - - def test_availability_zone_set_from_imds(self): - """Datasource.availability returns IMDS platformFaultDomain.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual('0', dsrc.availability_zone) - - def test_region_set_from_imds(self): - """Datasource.region returns IMDS region location.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual('eastus2', dsrc.region) - - def test_sys_cfg_set_never_destroy_ntfs(self): - sys_cfg = {'datasource': {'Azure': { - 'never_destroy_ntfs': 'user-supplied-value'}}} - data = {'ovfcontent': construct_valid_ovf_env(data={}), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), - 'user-supplied-value') - - def test_username_used(self): - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.cfg['system_info']['default_user']['name'], - "myuser") - - def test_password_given(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': "mypass"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertIn('default_user', dsrc.cfg['system_info']) - defuser = dsrc.cfg['system_info']['default_user'] - - # default user should be updated username and should not be locked. - self.assertEqual(defuser['name'], odata['UserName']) - self.assertFalse(defuser['lock_passwd']) - # passwd is crypt formated string $id$salt$encrypted - # encrypting plaintext with salt value of everything up to final '$' - # should equal that after the '$' - pos = defuser['passwd'].rfind("$") + 1 - self.assertEqual(defuser['passwd'], - crypt.crypt(odata['UserPassword'], - defuser['passwd'][0:pos])) - - # the same hashed value should also be present in cfg['password'] - self.assertEqual(defuser['passwd'], dsrc.cfg['password']) - - def test_user_not_locked_if_password_redacted(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': dsaz.DEF_PASSWD_REDACTION} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertIn('default_user', dsrc.cfg['system_info']) - defuser = dsrc.cfg['system_info']['default_user'] - - # default user should be updated username and should not be locked. - self.assertEqual(defuser['name'], odata['UserName']) - self.assertIn('lock_passwd', defuser) - self.assertFalse(defuser['lock_passwd']) - - def test_userdata_plain(self): - mydata = "FOOBAR" - odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(decode_binary(dsrc.userdata_raw), mydata) - - def test_userdata_found(self): - mydata = "FOOBAR" - odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) - - def test_default_ephemeral_configs_ephemeral_exists(self): - # make sure the ephemeral configs are correct if disk present - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - orig_exists = dsaz.os.path.exists - - def changed_exists(path): - return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists( - path) - - with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() - - self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - dsaz.RESOURCE_DISK_PATH) - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) - - def test_default_ephemeral_configs_ephemeral_does_not_exist(self): - # make sure the ephemeral configs are correct if disk not present - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - orig_exists = dsaz.os.path.exists - - def changed_exists(path): - return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists( - path) - - with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() - - assert 'disk_setup' not in cfg - assert 'fs_setup' not in cfg - - def test_provide_disk_aliases(self): - # Make sure that user can affect disk aliases - dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} - odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64e(yaml.dump(dscfg)), - 'encoding': 'base64'}} - usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, - 'ephemeral0': False}} - userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" - - ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata) - data = {'ovfcontent': ovfcontent, 'sys_cfg': {}} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() - self.assertTrue(cfg) - - def test_userdata_arrives(self): - userdata = "This is my user-data" - xml = construct_valid_ovf_env(data={}, userdata=userdata) - data = {'ovfcontent': xml} - dsrc = self._get_ds(data) - dsrc.get_data() - - self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw) - - def test_password_redacted_in_ovf(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': "mypass"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - dsrc = self._get_ds(data) - ret = dsrc.get_data() - - self.assertTrue(ret) - ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') - - # The XML should not be same since the user password is redacted - on_disk_ovf = load_file(ovf_env_path) - self.xml_notequals(data['ovfcontent'], on_disk_ovf) - - # Make sure that the redacted password on disk is not used by CI - self.assertNotEqual(dsrc.cfg.get('password'), - dsaz.DEF_PASSWD_REDACTION) - - # Make sure that the password was really encrypted - et = ET.fromstring(on_disk_ovf) - for elem in et.iter(): - if 'UserPassword' in elem.tag: - self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) - - def test_ovf_env_arrives_in_waagent_dir(self): - xml = construct_valid_ovf_env(data={}, userdata="FOODATA") - dsrc = self._get_ds({'ovfcontent': xml}) - dsrc.get_data() - - # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir) - # we expect that the ovf-env.xml file is copied there. - ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') - self.assertTrue(os.path.exists(ovf_env_path)) - self.xml_equals(xml, load_file(ovf_env_path)) - - def test_ovf_can_include_unicode(self): - xml = construct_valid_ovf_env(data={}) - xml = '\ufeff{0}'.format(xml) - dsrc = self._get_ds({'ovfcontent': xml}) - dsrc.get_data() - - def test_dsaz_report_ready_returns_true_when_report_succeeds( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.assertTrue(dsrc._report_ready(lease=mock.MagicMock())) - - def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.m_get_metadata_from_fabric.side_effect = Exception - self.assertFalse(dsrc._report_ready(lease=mock.MagicMock())) - - def test_dsaz_report_failure_returns_true_when_report_succeeds(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - self.assertTrue(dsrc._report_failure()) - self.assertEqual( - 1, - self.m_report_failure_to_fabric.call_count) - - def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ - as m_ephemeral_dhcp_ctx, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # setup mocks to allow using cached ephemeral dhcp lease - m_dsrc_distro_networking_is_up.return_value = True - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} - m_ephemeral_dhcp_ctx.lease = test_lease - - # We expect 3 calls to report_failure_to_fabric, - # because we try 3 different methods of calling report failure. - # The different methods are attempted in the following order: - # 1. Using cached ephemeral dhcp context to report failure to Azure - # 2. Using new ephemeral dhcp to report failure to Azure - # 3. Using fallback lease to report failure to Azure - self.m_report_failure_to_fabric.side_effect = Exception - self.assertFalse(dsrc._report_failure()) - self.assertEqual( - 3, - self.m_report_failure_to_fabric.call_count) - - def test_dsaz_report_failure_description_msg(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - test_msg = 'Test report failure description message' - self.assertTrue(dsrc._report_failure(description=test_msg)) - self.m_report_failure_to_fabric.assert_called_once_with( - dhcp_opts=mock.ANY, description=test_msg) - - def test_dsaz_report_failure_no_description_msg(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - m_crawl_metadata.side_effect = Exception - - self.assertTrue(dsrc._report_failure()) # no description msg - self.m_report_failure_to_fabric.assert_called_once_with( - dhcp_opts=mock.ANY, description=None) - - def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ - as m_ephemeral_dhcp_ctx, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # setup mocks to allow using cached ephemeral dhcp lease - m_dsrc_distro_networking_is_up.return_value = True - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} - m_ephemeral_dhcp_ctx.lease = test_lease - - self.assertTrue(dsrc._report_failure()) - - # ensure called with cached ephemeral dhcp lease option 245 - self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) - - # ensure cached ephemeral is cleaned - self.assertEqual( - 1, - m_ephemeral_dhcp_ctx.clean_network.call_count) - - def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # net is not up and cannot use cached ephemeral dhcp - m_dsrc_distro_networking_is_up.return_value = False - # setup ephemeral dhcp lease discovery mock - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.return_value = test_lease - - self.assertTrue(dsrc._report_failure()) - - # ensure called with the newly discovered - # ephemeral dhcp lease option 245 - self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) - - def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # net is not up and cannot use cached ephemeral dhcp - m_dsrc_distro_networking_is_up.return_value = False - # ephemeral dhcp discovery failure, - # so cannot use a new ephemeral dhcp - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.side_effect = Exception - - self.assertTrue(dsrc._report_failure()) - - # ensure called with fallback lease - self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, - fallback_lease_file=dsrc.dhclient_lease_file) - - def test_exception_fetching_fabric_data_doesnt_propagate(self): - """Errors communicating with fabric should warn, but return True.""" - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.m_get_metadata_from_fabric.side_effect = Exception - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - - def test_fabric_data_included_in_metadata(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.m_get_metadata_from_fabric.return_value = {'test': 'value'} - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual('value', dsrc.metadata['test']) - - def test_instance_id_case_insensitive(self): - """Return the previous iid when current is a case-insensitive match.""" - lower_iid = EXAMPLE_UUID.lower() - upper_iid = EXAMPLE_UUID.upper() - # lowercase current UUID - ds = self._get_ds( - {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid - ) - # UPPERCASE previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - upper_iid) - ds.get_data() - self.assertEqual(upper_iid, ds.metadata['instance-id']) - - # UPPERCASE current UUID - ds = self._get_ds( - {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid - ) - # lowercase previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - lower_iid) - ds.get_data() - self.assertEqual(lower_iid, ds.metadata['instance-id']) - - def test_instance_id_endianness(self): - """Return the previous iid when dmi uuid is the byteswapped iid.""" - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - # byte-swapped previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') - ds.get_data() - self.assertEqual( - '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) - # not byte-swapped previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') - ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) - - def test_instance_id_from_dmidecode_used(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) - - def test_instance_id_from_dmidecode_used_for_builtin(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) - - @mock.patch(MOCKPATH + 'util.is_FreeBSD') - @mock.patch(MOCKPATH + '_check_freebsd_cdrom') - def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, - m_is_FreeBSD): - """On FreeBSD, possible devs should show /dev/cd0.""" - m_is_FreeBSD.return_value = True - m_check_fbsd_cdrom.return_value = True - possible_ds = [] - for src in dsaz.list_possible_azure_ds( - "seed_dir", "cache_dir"): - possible_ds.append(src) - self.assertEqual(possible_ds, ["seed_dir", - dsaz.DEFAULT_PROVISIONING_ISO_DEV, - "/dev/cd0", - "cache_dir"]) - self.assertEqual( - [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') - def test_imds_network_config(self, mock_fallback, m_driver): - """Network config is generated from IMDS network data when present.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - - expected_cfg = { - 'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, - 'version': 2} - - self.assertEqual(expected_cfg, dsrc.network_config) - mock_fallback.assert_not_called() - - @mock.patch('cloudinit.net.get_interface_mac') - @mock.patch('cloudinit.net.get_devicelist') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net.generate_fallback_config') - def test_imds_network_ignored_when_apply_network_config_false( - self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): - """When apply_network_config is False, use fallback instead of IMDS.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] - } - mock_fallback.return_value = fallback_config - - mock_devlist.return_value = ['eth0'] - mock_dd.return_value = ['hv_netsvc'] - mock_get_mac.return_value = '00:11:22:33:44:55' - - dsrc = self._get_ds(data) - self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.network_config, fallback_config) - - @mock.patch('cloudinit.net.get_interface_mac') - @mock.patch('cloudinit.net.get_devicelist') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net.generate_fallback_config', autospec=True) - def test_fallback_network_config(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): - """On absent IMDS network data, generate network fallback config.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] - } - mock_fallback.return_value = fallback_config - - mock_devlist.return_value = ['eth0'] - mock_dd.return_value = ['hv_netsvc'] - mock_get_mac.return_value = '00:11:22:33:44:55' - - dsrc = self._get_ds(data) - # Represent empty response from network imds - self.m_get_metadata_from_imds.return_value = {} - ret = dsrc.get_data() - self.assertTrue(ret) - - netconfig = dsrc.network_config - self.assertEqual(netconfig, fallback_config) - mock_fallback.assert_called_with( - blacklist_drivers=['mlx4_core', 'mlx5_core'], - config_driver=True) - - @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True) - def test_blacklist_through_distro( - self, m_net_get_interfaces): - """Verify Azure DS updates blacklist drivers in the distro's - networking object.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsrc = self._get_ds(data, distro=distro) - dsrc.get_data() - self.assertEqual(distro.networking.blacklist_drivers, - dsaz.BLACKLIST_DRIVERS) - - distro.networking.get_interfaces_by_mac() - m_net_get_interfaces.assert_called_with( - blacklist_drivers=dsaz.BLACKLIST_DRIVERS) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_no_args(self, m_subp): - dsaz.get_hostname() - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_string_arg(self, m_subp): - dsaz.get_hostname(hostname_command="hostname") - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_iterable_arg(self, m_subp): - dsaz.get_hostname(hostname_command=("hostname",)) - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch( - 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') - def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - dsrc.setup(True) - ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ["ssh-rsa key1"]) - self.assertEqual(m_parse_certificates.call_count, 0) - - def test_key_without_crlf_valid(self): - test_key = 'ssh-rsa somerandomkeystuff some comment' - assert True is dsaz._key_is_openssh_formatted(test_key) - - def test_key_with_crlf_invalid(self): - test_key = 'ssh-rsa someran\r\ndomkeystuff some comment' - assert False is dsaz._key_is_openssh_formatted(test_key) - - def test_key_endswith_crlf_valid(self): - test_key = 'ssh-rsa somerandomkeystuff some comment\r\n' - assert True is dsaz._key_is_openssh_formatted(test_key) - - @mock.patch( - 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_get_public_ssh_keys_with_no_openssh_format( - self, - m_get_metadata_from_imds, - m_parse_certificates): - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' - m_get_metadata_from_imds.return_value = imds_data - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - dsrc.setup(True) - ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, []) - self.assertEqual(m_parse_certificates.call_count, 0) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_get_public_ssh_keys_without_imds( - self, - m_get_metadata_from_imds): - m_get_metadata_from_imds.return_value = dict() - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']} - dsrc.get_data() - dsrc.setup(True) - ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ['key2']) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_imds_api_version_wanted_nonexistent( - self, - m_get_metadata_from_imds): - def get_metadata_from_imds_side_eff(*args, **kwargs): - if kwargs['api_version'] == dsaz.IMDS_VER_WANT: - raise url_helper.UrlError("No IMDS version", code=400) - return NETWORK_METADATA - m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertIsNotNone(dsrc.metadata) - self.assertTrue(dsrc.failed_desired_api_version) - - @mock.patch( - MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) - def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertIsNotNone(dsrc.metadata) - self.assertFalse(dsrc.failed_desired_api_version) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_hostname_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) - imds_data_with_os_profile["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true" - ) - m_get_metadata_from_imds.return_value = imds_data_with_os_profile - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_username_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) - imds_data_with_os_profile["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true" - ) - m_get_metadata_from_imds.return_value = imds_data_with_os_profile - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual( - dsrc.cfg["system_info"]["default_user"]["name"], - "username1" - ) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_disable_password_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) - imds_data_with_os_profile["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true" - ) - m_get_metadata_from_imds.return_value = imds_data_with_os_profile - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertTrue(dsrc.metadata["disable_password"]) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_userdata_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - userdata = "userdataImds" - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true", - ) - imds_data["compute"]["userData"] = b64e(userdata) - m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_userdata_from_imds_with_customdata_from_OVF( - self, m_get_metadata_from_imds): - userdataOVF = "userdataOVF" - odata = { - 'HostName': "myhost", 'UserName': "myuser", - 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} - } - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - - userdataImds = "userdataImds" - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true", - ) - imds_data["compute"]["userData"] = b64e(userdataImds) - m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) - - -class TestAzureBounce(CiTestCase): - - with_logs = True - - def mock_out_azure_moving_parts(self): - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - if cache_dir: - yield cache_dir - - self.patches.enter_context( - mock.patch.object(dsaz.util, 'wait_for_files')) - self.patches.enter_context( - mock.patch.object( - dsaz, 'list_possible_azure_ds', - mock.MagicMock(side_effect=_load_possible_azure_ds))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_fabric', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz.subp, 'which', lambda x: True)) - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - - def _dmi_mocks(key): - if key == 'system-uuid': - return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - raise RuntimeError('should not get here') - - self.patches.enter_context( - mock.patch.object(dsaz.dmi, 'read_dmi_data', - mock.MagicMock(side_effect=_dmi_mocks))) - - def setUp(self): - super(TestAzureBounce, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.patches = ExitStack() - self.mock_out_azure_moving_parts() - self.get_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'get_hostname')) - self.set_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'set_hostname')) - self.subp = self.patches.enter_context( - mock.patch(MOCKPATH + 'subp.subp')) - self.find_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) - - def tearDown(self): - self.patches.close() - super(TestAzureBounce, self).tearDown() - - def _get_ds(self, ovfcontent=None): - if ovfcontent is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), - {'ovf-env.xml': ovfcontent}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def get_ovf_env_with_dscfg(self, hostname, cfg): - odata = { - 'HostName': hostname, - 'dscfg': { - 'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64' - } - } - return construct_valid_ovf_env(data=odata) - - def test_disabled_bounce_does_not_change_hostname(self): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_disabled_bounce_does_not_perform_bounce( - self, perform_hostname_bounce): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_same_hostname_does_not_change_hostname(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_unchanged_hostname_does_not_perform_bounce( - self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_force_performs_bounce_regardless(self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_bounce_skipped_on_ifupdown_absent(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - patch_path = MOCKPATH + 'subp.which' - with mock.patch(patch_path) as m_which: - m_which.return_value = None - ret = self._get_and_setup(dsrc) - self.assertEqual([mock.call('ifup')], m_which.call_args_list) - self.assertTrue(ret) - self.assertIn( - "Skipping network bounce: ifupdown utils aren't present.", - self.logs.getvalue()) - - def test_different_hostnames_sets_hostname(self): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(expected_hostname, - self.set_hostname.call_args_list[0][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_different_hostnames_performs_bounce( - self, perform_hostname_bounce): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_different_hostnames_sets_hostname_back(self): - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_failure_in_bounce_still_resets_host_name( - self, perform_hostname_bounce): - perform_hostname_bounce.side_effect = Exception - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_environment_correct_for_bounce_command( - self, mock_get_boot_telemetry): - interface = 'int0' - hostname = 'my-new-host' - old_hostname = 'my-old-host' - self.get_hostname.return_value = old_hostname - cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} - data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_env = self.subp.call_args[1]['env'] - self.assertEqual(interface, bounce_env['interface']) - self.assertEqual(hostname, bounce_env['hostname']) - self.assertEqual(old_hostname, bounce_env['old_hostname']) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_default_bounce_command_ifup_used_by_default( - self, mock_get_boot_telemetry): - cfg = {'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_args = self.subp.call_args[1]['args'] - self.assertEqual( - dsaz.BOUNCE_COMMAND_IFUP, bounce_args) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_option_can_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_set_hostname_option_can_disable_hostname_set(self): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_failed_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} - self.get_hostname.return_value = "old-hostname" - self.set_hostname.side_effect = Exception - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - -class TestLoadAzureDsDir(CiTestCase): - """Tests for load_azure_ds_dir.""" - - def setUp(self): - self.source_dir = self.tmp_dir() - super(TestLoadAzureDsDir, self).setUp() - - def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self): - """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit.""" - with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: - dsaz.load_azure_ds_dir(self.source_dir) - self.assertEqual( - 'No ovf-env file found', - str(context_manager.exception)) - - def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): - """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" - ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') - with open(ovf_path, 'wb') as stream: - stream.write(b'invalid xml') - with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: - dsaz.load_azure_ds_dir(self.source_dir) - self.assertEqual( - 'Invalid ovf-env.xml: syntax error: line 1, column 0', - str(context_manager.exception)) - - -class TestReadAzureOvf(CiTestCase): - - def test_invalid_xml_raises_non_azure_ds(self): - invalid_xml = "" + construct_valid_ovf_env(data={}) - self.assertRaises(dsaz.BrokenAzureDataSource, - dsaz.read_azure_ovf, invalid_xml) - - def test_load_with_pubkeys(self): - mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] - pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] - content = construct_valid_ovf_env(pubkeys=pubkeys) - (_md, _ud, cfg) = dsaz.read_azure_ovf(content) - for mypk in mypklist: - self.assertIn(mypk, cfg['_pubkeys']) - - -class TestCanDevBeReformatted(CiTestCase): - warning_file = 'dataloss_warning_readme.txt' - - def _domock(self, mockpath, sattr=None): - patcher = mock.patch(mockpath) - setattr(self, sattr, patcher.start()) - self.addCleanup(patcher.stop) - - def patchup(self, devs): - bypath = {} - for path, data in devs.items(): - bypath[path] = data - if 'realpath' in data: - bypath[data['realpath']] = data - for ppath, pdata in data.get('partitions', {}).items(): - bypath[ppath] = pdata - if 'realpath' in data: - bypath[pdata['realpath']] = pdata - - def realpath(d): - return bypath[d].get('realpath', d) - - def partitions_on_device(devpath): - parts = bypath.get(devpath, {}).get('partitions', {}) - ret = [] - for path, data in parts.items(): - ret.append((data.get('num'), realpath(path))) - # return sorted by partition number - return sorted(ret, key=lambda d: d[0]) - - def mount_cb(device, callback, mtype, update_env_for_mount): - self.assertEqual('ntfs', mtype) - self.assertEqual('C', update_env_for_mount.get('LANG')) - p = self.tmp_dir() - for f in bypath.get(device).get('files', []): - write_file(os.path.join(p, f), content=f) - return callback(p) - - def has_ntfs_fs(device): - return bypath.get(device, {}).get('fs') == 'ntfs' - - p = MOCKPATH - self._domock(p + "_partitions_on_device", 'm_partitions_on_device') - self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem') - self._domock(p + "util.mount_cb", 'm_mount_cb') - self._domock(p + "os.path.realpath", 'm_realpath') - self._domock(p + "os.path.exists", 'm_exists') - self._domock(p + "util.SeLinuxGuard", 'm_selguard') - - self.m_exists.side_effect = lambda p: p in bypath - self.m_realpath.side_effect = realpath - self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs - self.m_mount_cb.side_effect = mount_cb - self.m_partitions_on_device.side_effect = partitions_on_device - self.m_selguard.__enter__ = mock.Mock(return_value=False) - self.m_selguard.__exit__ = mock.Mock() - - def test_three_partitions_is_false(self): - """A disk with 3 partitions can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2}, - '/dev/sda3': {'num': 3}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("3 or more", msg.lower()) - - def test_no_partitions_is_false(self): - """A disk with no partitions can not be formatted.""" - self.patchup({'/dev/sda': {}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("not partitioned", msg.lower()) - - def test_two_partitions_not_ntfs_false(self): - """2 partitions and 2nd not ntfs can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("not ntfs", msg.lower()) - - def test_two_partitions_ntfs_populated_false(self): - """2 partitions and populated ntfs fs on 2nd can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ntfs', - 'files': ['secret.txt']}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("files on it", msg.lower()) - - def test_two_partitions_ntfs_empty_is_true(self): - """2 partitions and empty ntfs fs on 2nd can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_one_partition_not_ntfs_false(self): - """1 partition witih fs other than ntfs can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'zfs'}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("not ntfs", msg.lower()) - - def test_one_partition_ntfs_populated_false(self): - """1 mountable ntfs partition with many files can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['file1.txt', 'file2.exe']}, - }}}) - with mock.patch.object(dsaz.LOG, 'warning') as warning: - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - wmsg = warning.call_args[0][0] - self.assertIn("looks like you're using NTFS on the ephemeral disk", - wmsg) - self.assertFalse(value) - self.assertIn("files on it", msg.lower()) - - def test_one_partition_ntfs_empty_is_true(self): - """1 mountable ntfs partition and no files can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): - """1 mountable ntfs partition and only warn file can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['dataloss_warning_readme.txt']} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_one_partition_through_realpath_is_true(self): - """A symlink to a device with 1 ntfs partition can be formatted.""" - epath = '/dev/disk/cloud/azure_resource' - self.patchup({ - epath: { - 'realpath': '/dev/sdb', - 'partitions': { - epath + '-part1': { - 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], - 'realpath': '/dev/sdb1'} - }}}) - value, msg = dsaz.can_dev_be_reformatted(epath, - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_three_partition_through_realpath_is_false(self): - """A symlink to a device with 3 partitions can not be formatted.""" - epath = '/dev/disk/cloud/azure_resource' - self.patchup({ - epath: { - 'realpath': '/dev/sdb', - 'partitions': { - epath + '-part1': { - 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], - 'realpath': '/dev/sdb1'}, - epath + '-part2': {'num': 2, 'fs': 'ext3', - 'realpath': '/dev/sdb2'}, - epath + '-part3': {'num': 3, 'fs': 'ext', - 'realpath': '/dev/sdb3'} - }}}) - value, msg = dsaz.can_dev_be_reformatted(epath, - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("3 or more", msg.lower()) - - def test_ntfs_mount_errors_true(self): - """can_dev_be_reformatted does not fail if NTFS is unknown fstype.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} - }}}) - - error_msgs = [ - "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL - "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES - ] - - for err_msg in error_msgs: - self.m_mount_cb.side_effect = MountFailedError( - "Failed mounting %s to %s due to: \nUnexpected.\n%s" % - ('/dev/sda', '/fake-tmp/dir', err_msg)) - - value, msg = dsaz.can_dev_be_reformatted('/dev/sda', - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn('cannot mount NTFS, assuming', msg) - - def test_never_destroy_ntfs_config_false(self): - """Normally formattable situation with never_destroy_ntfs set.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['dataloss_warning_readme.txt']} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=True) - self.assertFalse(value) - self.assertIn("config says to never destroy NTFS " - "(datasource.Azure.never_destroy_ntfs)", msg) - - -class TestClearCachedData(CiTestCase): - - def test_clear_cached_attrs_clears_imds(self): - """All class attributes are reset to defaults, including imds data.""" - tmp = self.tmp_dir() - paths = helpers.Paths( - {'cloud_dir': tmp, 'run_dir': tmp}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths) - clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] - dsrc.metadata = 'md' - dsrc.userdata = 'ud' - dsrc._metadata_imds = 'imds' - dsrc._dirty_cache = True - dsrc.clear_cached_attrs() - self.assertEqual( - [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], - clean_values) - - -class TestAzureNetExists(CiTestCase): - - def test_azure_net_must_exist_for_legacy_objpkl(self): - """DataSourceAzureNet must exist for old obj.pkl files - that reference it.""" - self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) - - -class TestPreprovisioningReadAzureOvfFlag(CiTestCase): - - def test_read_azure_ovf_with_true_flag(self): - """The read_azure_ovf method should set the PreprovisionedVM - cfg flag if the proper setting is present.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - - def test_read_azure_ovf_with_false_flag(self): - """The read_azure_ovf method should set the PreprovisionedVM - cfg flag to false if the proper setting is false.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "False"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertFalse(cfg['PreprovisionedVm']) - - def test_read_azure_ovf_without_flag(self): - """The read_azure_ovf method should not set the - PreprovisionedVM cfg flag.""" - content = construct_valid_ovf_env() - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertFalse(cfg['PreprovisionedVm']) - self.assertEqual(None, cfg["PreprovisionedVMType"]) - - def test_read_azure_ovf_with_running_type(self): - """The read_azure_ovf method should set PreprovisionedVMType - cfg flag to Running.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Running", - "PreprovisionedVm": "True"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - self.assertEqual("Running", cfg['PreprovisionedVMType']) - - def test_read_azure_ovf_with_savable_type(self): - """The read_azure_ovf method should set PreprovisionedVMType - cfg flag to Savable.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Savable", - "PreprovisionedVm": "True"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - self.assertEqual("Savable", cfg['PreprovisionedVMType']) - - -@mock.patch('os.path.isfile') -class TestPreprovisioningShouldReprovision(CiTestCase): - - def setUp(self): - super(TestPreprovisioningShouldReprovision, self).setUp() - tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - @mock.patch(MOCKPATH + 'util.write_file') - def test__should_reprovision_with_true_cfg(self, isfile, write_f): - """The _should_reprovision method should return true with config - flag present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'PreprovisionedVm': True}, None))) - - def test__should_reprovision_with_file_existing(self, isfile): - """The _should_reprovision method should return True if the sentinal - exists.""" - isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'preprovisionedvm': False}, None))) - - def test__should_reprovision_returns_false(self, isfile): - """The _should_reprovision method should return False - if config and sentinal are not present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertFalse(dsa._should_reprovision((None, None, {}, None))) - - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) - def test__should_reprovision_uses_imds_md(self, write_file, isfile): - """The _should_reprovision method should be able to - retrieve the preprovisioning VM type from imds metadata""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {}, None), - {'extended': {'compute': {'ppsType': 'Running'}}})) - self.assertFalse(dsa._should_reprovision( - (None, None, {}, None), - {})) - self.assertFalse(dsa._should_reprovision( - (None, None, {}, None), - {'extended': {'compute': {"hasCustomData": False}}})) - - @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds') - def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): - """_reprovision will poll IMDS.""" - isfile.return_value = False - hostname = "myhost" - username = "myuser" - odata = {'HostName': hostname, 'UserName': username} - _poll_imds.return_value = construct_valid_ovf_env(data=odata) - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - dsa._reprovision() - _poll_imds.assert_called_with() - - -class TestPreprovisioningHotAttachNics(CiTestCase): - - def setUp(self): - super(TestPreprovisioningHotAttachNics, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event', - autospec=True) - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) - def test_nic_detach_writes_marker(self, m_writefile, m_detach): - """When we detect that a nic gets detached, we write a marker for it""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - nl_sock = mock.MagicMock() - dsa._wait_for_nic_detach(nl_sock) - m_detach.assert_called_with(nl_sock) - self.assertEqual(1, m_detach.call_count) - m_writefile.assert_called_with( - dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY) - - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - def test_detect_nic_attach_reports_ready_and_waits_for_detach( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, - m_writefile): - """Report ready first and then wait for nic detach""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - dsa._wait_for_all_nics_ready() - m_fallback_if.return_value = "Dummy interface" - self.assertEqual(1, m_report_ready.call_count) - self.assertEqual(1, m_detach.call_count) - self.assertEqual(1, m_writefile.call_count) - self.assertEqual(1, m_dhcp.call_count) - m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE, - mock.ANY) - - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - def test_detect_nic_attach_skips_report_ready_when_marker_present( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): - """Skip reporting ready if we already have a marker file.""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - - def isfile(key): - return key == dsaz.REPORTED_READY_MARKER_FILE - - m_isfile.side_effect = isfile - dsa._wait_for_all_nics_ready() - m_fallback_if.return_value = "Dummy interface" - self.assertEqual(0, m_report_ready.call_count) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual(1, m_detach.call_count) - - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - def test_detect_nic_attach_skips_nic_detach_when_marker_present( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): - """Skip wait for nic detach if it already happened.""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - - m_isfile.return_value = True - dsa._wait_for_all_nics_ready() - m_fallback_if.return_value = "Dummy interface" - self.assertEqual(0, m_report_ready.call_count) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual(0, m_detach.call_count) - - @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True) - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') - @mock.patch('cloudinit.sources.net.find_fallback_nic') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - @mock.patch('os.path.isfile') - def test_wait_for_nic_attach_if_no_fallback_interface( - self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, - m_attach, m_link_up): - """Wait for nic attach if we do not have a fallback interface""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - - m_isfile.return_value = True - m_attach.return_value = "eth0" - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - m_imds.return_value = IMDS_NETWORK_METADATA - m_fallback_if.return_value = None - - dsa._wait_for_all_nics_ready() - - self.assertEqual(0, m_detach.call_count) - self.assertEqual(1, m_attach.call_count) - self.assertEqual(1, m_dhcpv4.call_count) - self.assertEqual(1, m_imds.call_count) - self.assertEqual(1, m_link_up.call_count) - m_link_up.assert_called_with(mock.ANY, "eth0") - - @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') - @mock.patch('cloudinit.sources.net.find_fallback_nic') - @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - @mock.patch('os.path.isfile') - def test_wait_for_nic_attach_multinic_attach( - self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, - m_attach, m_link_up): - """Wait for nic attach if we do not have a fallback interface""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - m_attach_call_count = 0 - - def nic_attach_ret(nl_sock, nics_found): - nonlocal m_attach_call_count - m_attach_call_count = m_attach_call_count + 1 - if m_attach_call_count == 1: - return "eth0" - elif m_attach_call_count == 2: - return "eth1" - raise RuntimeError("Must have found primary nic by now.") - - # Simulate two NICs by adding the same one twice. - md = { - "interface": [ - IMDS_NETWORK_METADATA['interface'][0], - IMDS_NETWORK_METADATA['interface'][0] - ] - } - - def network_metadata_ret(ifname, retries, type, exc_cb, infinite): - if ifname == "eth0": - return md - raise requests.Timeout('Fake connection timeout') - - m_isfile.return_value = True - m_attach.side_effect = nic_attach_ret - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - m_imds.side_effect = network_metadata_ret - m_fallback_if.return_value = None - - dsa._wait_for_all_nics_ready() - - self.assertEqual(0, m_detach.call_count) - self.assertEqual(2, m_attach.call_count) - # DHCP and network metadata calls will only happen on the primary NIC. - self.assertEqual(1, m_dhcpv4.call_count) - self.assertEqual(1, m_imds.call_count) - self.assertEqual(2, m_link_up.call_count) - - @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - def test_check_if_nic_is_primary_retries_on_failures( - self, m_dhcpv4, m_imds): - """Retry polling for network metadata on all failures except timeout - and network unreachable errors""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - - eth0Retries = [] - eth1Retries = [] - # Simulate two NICs by adding the same one twice. - md = { - "interface": [ - IMDS_NETWORK_METADATA['interface'][0], - IMDS_NETWORK_METADATA['interface'][0] - ] - } - - def network_metadata_ret(ifname, retries, type, exc_cb, infinite): - nonlocal eth0Retries, eth1Retries - - # Simulate readurl functionality with retries and - # exception callbacks so that the callback logic can be - # validated. - if ifname == "eth0": - cause = requests.HTTPError() - for _ in range(0, 15): - error = url_helper.UrlError(cause=cause, code=410) - eth0Retries.append(exc_cb("No goal state.", error)) - else: - for _ in range(0, 10): - # We are expected to retry for a certain period for both - # timeout errors and network unreachable errors. - if _ < 5: - cause = requests.Timeout('Fake connection timeout') - else: - cause = requests.ConnectionError('Network Unreachable') - error = url_helper.UrlError(cause=cause) - eth1Retries.append(exc_cb("Connection timeout", error)) - # Should stop retrying after 10 retries - eth1Retries.append(exc_cb("Connection timeout", error)) - raise cause - return md - - m_imds.side_effect = network_metadata_ret - - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - - is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") - self.assertEqual(True, is_primary) - self.assertEqual(2, expected_nic_count) - - # All Eth0 errors are non-timeout errors. So we should have been - # retrying indefinitely until success. - for i in eth0Retries: - self.assertTrue(i) - - is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") - self.assertEqual(False, is_primary) - - # All Eth1 errors are timeout errors. Retry happens for a max of 10 and - # then we should have moved on assuming it is not the primary nic. - for i in range(0, 10): - self.assertTrue(eth1Retries[i]) - self.assertFalse(eth1Retries[10]) - - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_returns_if_already_up( - self, m_is_link_up): - """Waiting for link to be up should return immediately if the link is - already up.""" - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - m_is_link_up.return_value = True - - dsa.wait_for_link_up("eth0") - self.assertEqual(1, m_is_link_up.call_count) - - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - @mock.patch(MOCKPATH + 'util.write_file') - @mock.patch('cloudinit.net.read_sys_net') - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_checks_link_after_sleep( - self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up): - """Waiting for link to be up should return immediately if the link is - already up.""" - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - m_try_set_link_up.return_value = False - - callcount = 0 - - def is_up_mock(key): - nonlocal callcount - if callcount == 0: - callcount += 1 - return False - return True - - m_is_up.side_effect = is_up_mock - - dsa.wait_for_link_up("eth0") - self.assertEqual(2, m_try_set_link_up.call_count) - self.assertEqual(2, m_is_up.call_count) - - @mock.patch(MOCKPATH + 'util.write_file') - @mock.patch('cloudinit.net.read_sys_net') - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_writes_to_device_file( - self, m_is_link_up, m_read_sys_net, m_writefile): - """Waiting for link to be up should return immediately if the link is - already up.""" - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - - callcount = 0 - - def linkup(key): - nonlocal callcount - if callcount == 0: - callcount += 1 - return False - return True - - m_is_link_up.side_effect = linkup - - dsa.wait_for_link_up("eth0") - self.assertEqual(2, m_is_link_up.call_count) - self.assertEqual(1, m_read_sys_net.call_count) - self.assertEqual(2, m_writefile.call_count) - - @mock.patch('cloudinit.sources.helpers.netlink.' - 'create_bound_netlink_socket') - def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket): - """Waiting for all nics should raise exception if netlink socket - creation fails.""" - - m_socket.side_effect = netlink.NetlinkCreateSocketError - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - - self.assertRaises(netlink.NetlinkCreateSocketError, - dsa._wait_for_all_nics_ready) - # dsa._wait_for_all_nics_ready() - - -@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') -@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') -@mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') -@mock.patch('requests.Session.request') -@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') -class TestPreprovisioningPollIMDS(CiTestCase): - - def setUp(self): - super(TestPreprovisioningPollIMDS, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - @mock.patch('time.sleep', mock.MagicMock()) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready, - m_request, m_media_switch, m_dhcp, - m_net): - """The poll_imds will retry DHCP on IMDS timeout.""" - report_file = self.tmp_path('report_marker', self.tmp) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - m_dhcp.return_value = [lease] - m_media_switch.return_value = None - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - - self.tries = 0 - - def fake_timeout_once(**kwargs): - self.tries += 1 - if self.tries == 1: - raise requests.Timeout('Fake connection timeout') - elif self.tries in (2, 3): - response = requests.Response() - response.status_code = 404 if self.tries == 2 else 410 - raise requests.exceptions.HTTPError( - "fake {}".format(response.status_code), response=response - ) - # Third try should succeed and stop retries or redhcp - return mock.MagicMock(status_code=200, text="good", content="good") - - m_request.side_effect = fake_timeout_once - - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(m_report_ready.call_count, 1) - m_report_ready.assert_called_with(lease=lease) - self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls') - self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS') - - @mock.patch('os.path.isfile') - def test_poll_imds_skips_dhcp_if_ctx_present( - self, m_isfile, report_ready_func, fake_resp, m_media_switch, - m_dhcp, m_net): - """The poll_imds function should reuse the dhcp ctx if it is already - present. This happens when we wait for nic to be hot-attached before - polling for reprovisiondata. Note that if this ctx is set when - _poll_imds is called, then it is not expected to be waiting for - media_disconnect_connect either.""" - report_file = self.tmp_path('report_marker', self.tmp) - m_isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx" - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual(0, m_media_switch.call_count) - - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - def test_poll_imds_does_dhcp_on_retries_if_ctx_present( - self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request, - m_media_switch, m_dhcp, m_net): - """The poll_imds function should reuse the dhcp ctx if it is already - present. This happens when we wait for nic to be hot-attached before - polling for reprovisiondata. Note that if this ctx is set when - _poll_imds is called, then it is not expected to be waiting for - media_disconnect_connect either.""" - - tries = 0 - - def fake_timeout_once(**kwargs): - nonlocal tries - tries += 1 - if tries == 1: - raise requests.Timeout('Fake connection timeout') - return mock.MagicMock(status_code=200, text="good", content="good") - - m_request.side_effect = fake_timeout_once - report_file = self.tmp_path('report_marker', self.tmp) - m_isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\ - mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx: - m_dhcp_ctx.obtain_lease.return_value = "Dummy lease" - dsa._ephemeral_dhcp_ctx = m_dhcp_ctx - dsa._poll_imds() - self.assertEqual(1, m_dhcp_ctx.clean_network.call_count) - self.assertEqual(1, m_ephemeral_dhcpv4.call_count) - self.assertEqual(0, m_media_switch.call_count) - self.assertEqual(2, m_request.call_count) - - def test_does_not_poll_imds_report_ready_when_marker_file_exists( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): - """poll_imds should not call report ready when the reported ready - marker file exists""" - report_file = self.tmp_path('report_marker', self.tmp) - write_file(report_file, content='dont run report_ready :)') - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - m_media_switch.return_value = None - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(m_report_ready.call_count, 0) - - def test_poll_imds_report_ready_success_writes_marker_file( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): - """poll_imds should write the report_ready marker file if - reporting ready succeeds""" - report_file = self.tmp_path('report_marker', self.tmp) - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - m_media_switch.return_value = None - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertFalse(os.path.exists(report_file)) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(m_report_ready.call_count, 1) - self.assertTrue(os.path.exists(report_file)) - - def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): - """poll_imds should write the report_ready marker file if - reporting ready succeeds""" - report_file = self.tmp_path('report_marker', self.tmp) - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - m_media_switch.return_value = None - m_report_ready.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertFalse(os.path.exists(report_file)) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - self.assertRaises( - InvalidMetaDataException, - dsa._poll_imds) - self.assertEqual(m_report_ready.call_count, 1) - self.assertFalse(os.path.exists(report_file)) - - -@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock()) -@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock()) -@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock()) -@mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') -@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True) -@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') -@mock.patch('requests.Session.request') -class TestAzureDataSourcePreprovisioning(CiTestCase): - - def setUp(self): - super(TestAzureDataSourcePreprovisioning, self).setUp() - tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - def test_poll_imds_returns_ovf_env(self, m_request, - m_dhcp, m_net, - m_media_switch): - """The _poll_imds method should return the ovf_env.xml.""" - m_media_switch.return_value = None - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] - url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' - host = "169.254.169.254" - full_url = url.format(host) - m_request.return_value = mock.MagicMock(status_code=200, text="ovf", - content="ovf") - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(len(dsa._poll_imds()) > 0) - self.assertEqual(m_request.call_args_list, - [mock.call(allow_redirects=True, - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs() - }, method='GET', - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url)]) - self.assertEqual(m_dhcp.call_count, 2) - m_net.assert_any_call( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) - self.assertEqual(m_net.call_count, 2) - - def test__reprovision_calls__poll_imds(self, m_request, - m_dhcp, m_net, - m_media_switch): - """The _reprovision method should call poll IMDS.""" - m_media_switch.return_value = None - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' - host = "169.254.169.254" - full_url = url.format(host) - hostname = "myhost" - username = "myuser" - odata = {'HostName': hostname, 'UserName': username} - content = construct_valid_ovf_env(data=odata) - m_request.return_value = mock.MagicMock(status_code=200, text=content, - content=content) - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - md, _ud, cfg, _d = dsa._reprovision() - self.assertEqual(md['local-hostname'], hostname) - self.assertEqual(cfg['system_info']['default_user']['name'], username) - self.assertIn( - mock.call( - allow_redirects=True, - headers={ - 'Metadata': 'true', - 'User-Agent': 'Cloud-Init/%s' % vs() - }, - method='GET', - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url - ), - m_request.call_args_list) - self.assertEqual(m_dhcp.call_count, 2) - m_net.assert_any_call( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) - self.assertEqual(m_net.call_count, 2) - - -class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestRemoveUbuntuNetworkConfigScripts, self).setUp() - self.tmp = self.tmp_dir() - - def test_remove_network_scripts_removes_both_files_and_directories(self): - """Any files or directories in paths are removed when present.""" - file1 = self.tmp_path('file1', dir=self.tmp) - subdir = self.tmp_path('sub1', dir=self.tmp) - subfile = self.tmp_path('leaf1', dir=subdir) - write_file(file1, 'file1content') - write_file(subfile, 'leafcontent') - dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) - - for path in (file1, subdir, subfile): - self.assertFalse(os.path.exists(path), - 'Found unremoved: %s' % path) - - expected_logs = [ - 'INFO: Removing Ubuntu extended network scripts because cloud-init' - ' updates Azure network configuration on the following events:' - " ['boot', 'boot-legacy']", - 'Recursively deleting %s' % subdir, - 'Attempting to remove %s' % file1] - for log in expected_logs: - self.assertIn(log, self.logs.getvalue()) - - def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): - """Any files or directories absent are skipped without error.""" - dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ - self.tmp_path('nodirhere/', dir=self.tmp), - self.tmp_path('notfilehere', dir=self.tmp)]) - self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs - - @mock.patch(MOCKPATH + 'os.path.exists') - def test_remove_network_scripts_default_removes_stock_scripts(self, - m_exists): - """Azure's stock ubuntu image scripts and artifacts are removed.""" - # Report path absent on all to avoid delete operation - m_exists.return_value = False - dsaz.maybe_remove_ubuntu_network_config_scripts() - calls = m_exists.call_args_list - for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS: - self.assertIn(mock.call(path), calls) - - -class TestWBIsPlatformViable(CiTestCase): - """White box tests for _is_platform_viable.""" - with_logs = True - - @mock.patch(MOCKPATH + 'dmi.read_dmi_data') - def test_true_on_non_azure_chassis(self, m_read_dmi_data): - """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" - m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG - self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) - - @mock.patch(MOCKPATH + 'os.path.exists') - @mock.patch(MOCKPATH + 'dmi.read_dmi_data') - def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): - """Return True if ovf-env.xml exists in known seed dirs.""" - # Non-matching Azure chassis-asset-tag - m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' - - m_exist.return_value = True - self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) - m_exist.called_once_with('/other/seed/dir') - - def test_false_on_no_matching_azure_criteria(self): - """Report non-azure on unmatched asset tag, ovf-env absent and no dev. - - Return False when the asset tag doesn't match Azure's static - AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs - and no devices have a label starting with prefix 'rd_rdfe_'. - """ - self.assertFalse(wrap_and_call( - MOCKPATH, - {'os.path.exists': False, - # Non-matching Azure chassis-asset-tag - 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', - 'subp.which': None}, - dsaz._is_platform_viable, 'doesnotmatter')) - self.assertIn( - "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( - dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), - self.logs.getvalue()) - - -class TestRandomSeed(CiTestCase): - """Test proper handling of random_seed""" - - def test_non_ascii_seed_is_serializable(self): - """Pass if a random string from the Azure infrastructure which - contains at least one non-Unicode character can be converted to/from - JSON without alteration and without throwing an exception. - """ - path = resourceLocation("azure/non_unicode_random_string") - result = dsaz._get_random_seed(path) - - obj = {'seed': result} - try: - serialized = json_dumps(obj) - deserialized = load_json(serialized) - except UnicodeDecodeError: - self.fail("Non-serializable random seed returned") - - self.assertEqual(deserialized['seed'], result) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py deleted file mode 100644 index ab4f0b50..00000000 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ /dev/null @@ -1,1441 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import os -import re -import unittest -from textwrap import dedent -from xml.etree import ElementTree -from xml.sax.saxutils import escape, unescape - -from cloudinit.sources.helpers import azure as azure_helper -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir - -from cloudinit.util import load_file -from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim - -GOAL_STATE_TEMPLATE = """\ - - - 2012-11-30 - {incarnation} - - Started - 300000 - - 16001 - - FALSE - - - {container_id} - - - {instance_id} - Started - - - http://100.86.192.70:80/...hostingEnvironmentConfig... - - http://100.86.192.70:80/..SharedConfig.. - - http://100.86.192.70:80/...extensionsConfig... - - http://100.86.192.70:80/...fullConfig... - {certificates_url} - 68ce47.0.68ce47.0.utl-trusty--292258.1.xml - - - - - -""" - -HEALTH_REPORT_XML_TEMPLATE = '''\ - - - {incarnation} - - {container_id} - - - {instance_id} - - {health_status} - {health_detail_subsection} - - - - - -''' - -HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\ -
- {health_substatus} - {health_description} -
- ''') - -HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512 - - -class SentinelException(Exception): - pass - - -class TestFindEndpoint(CiTestCase): - - def setUp(self): - super(TestFindEndpoint, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.load_file = patches.enter_context( - mock.patch.object(azure_helper.util, 'load_file')) - - self.dhcp_options = patches.enter_context( - mock.patch.object(wa_shim, '_load_dhclient_json')) - - self.networkd_leases = patches.enter_context( - mock.patch.object(wa_shim, '_networkd_get_value_from_leases')) - self.networkd_leases.return_value = None - - def test_missing_file(self): - """wa_shim find_endpoint uses default endpoint if leasefile not found - """ - self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") - - def test_missing_special_azure_line(self): - """wa_shim find_endpoint uses default endpoint if leasefile is found - but does not contain DHCP Option 245 (whose value is the endpoint) - """ - self.load_file.return_value = '' - self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") - - @staticmethod - def _build_lease_content(encoded_address): - endpoint = azure_helper._get_dhcp_endpoint_option_name() - return '\n'.join([ - 'lease {', - ' interface "eth0";', - ' option {0} {1};'.format(endpoint, encoded_address), - '}']) - - def test_from_dhcp_client(self): - self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} - self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) - - def test_latest_lease_used(self): - encoded_addresses = ['5:4:3:2', '4:3:2:1'] - file_content = '\n'.join([self._build_lease_content(encoded_address) - for encoded_address in encoded_addresses]) - self.load_file.return_value = file_content - self.assertEqual(encoded_addresses[-1].replace(':', '.'), - wa_shim.find_endpoint("foobar")) - - -class TestExtractIpAddressFromLeaseValue(CiTestCase): - - def test_hex_string(self): - ip_address, encoded_address = '98.76.54.32', '62:4c:36:20' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_hex_string_with_single_character_part(self): - ip_address, encoded_address = '4.3.2.1', '4:3:2:1' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_packed_string(self): - ip_address, encoded_address = '98.76.54.32', 'bL6 ' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_packed_string_with_escaped_quote(self): - ip_address, encoded_address = '100.72.34.108', 'dH\\"l' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_packed_string_containing_a_colon(self): - ip_address, encoded_address = '100.72.58.108', 'dH:l' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - -class TestGoalStateParsing(CiTestCase): - - default_parameters = { - 'incarnation': 1, - 'container_id': 'MyContainerId', - 'instance_id': 'MyInstanceId', - 'certificates_url': 'MyCertificatesUrl', - } - - def _get_formatted_goal_state_xml_string(self, **kwargs): - parameters = self.default_parameters.copy() - parameters.update(kwargs) - xml = GOAL_STATE_TEMPLATE.format(**parameters) - if parameters['certificates_url'] is None: - new_xml_lines = [] - for line in xml.splitlines(): - if 'Certificates' in line: - continue - new_xml_lines.append(line) - xml = '\n'.join(new_xml_lines) - return xml - - def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs): - if m_azure_endpoint_client is None: - m_azure_endpoint_client = mock.MagicMock() - xml = self._get_formatted_goal_state_xml_string(**kwargs) - return azure_helper.GoalState(xml, m_azure_endpoint_client) - - def test_incarnation_parsed_correctly(self): - incarnation = '123' - goal_state = self._get_goal_state(incarnation=incarnation) - self.assertEqual(incarnation, goal_state.incarnation) - - def test_container_id_parsed_correctly(self): - container_id = 'TestContainerId' - goal_state = self._get_goal_state(container_id=container_id) - self.assertEqual(container_id, goal_state.container_id) - - def test_instance_id_parsed_correctly(self): - instance_id = 'TestInstanceId' - goal_state = self._get_goal_state(instance_id=instance_id) - self.assertEqual(instance_id, goal_state.instance_id) - - def test_instance_id_byte_swap(self): - """Return true when previous_iid is byteswapped current_iid""" - previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" - self.assertTrue( - azure_helper.is_byte_swapped(previous_iid, current_iid)) - - def test_instance_id_no_byte_swap_same_instance_id(self): - previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - self.assertFalse( - azure_helper.is_byte_swapped(previous_iid, current_iid)) - - def test_instance_id_no_byte_swap_diff_instance_id(self): - previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - self.assertFalse( - azure_helper.is_byte_swapped(previous_iid, current_iid)) - - def test_certificates_xml_parsed_and_fetched_correctly(self): - m_azure_endpoint_client = mock.MagicMock() - certificates_url = 'TestCertificatesUrl' - goal_state = self._get_goal_state( - m_azure_endpoint_client=m_azure_endpoint_client, - certificates_url=certificates_url) - certificates_xml = goal_state.certificates_xml - self.assertEqual(1, m_azure_endpoint_client.get.call_count) - self.assertEqual( - certificates_url, - m_azure_endpoint_client.get.call_args[0][0]) - self.assertTrue( - m_azure_endpoint_client.get.call_args[1].get( - 'secure', False)) - self.assertEqual( - m_azure_endpoint_client.get.return_value.contents, - certificates_xml) - - def test_missing_certificates_skips_http_get(self): - m_azure_endpoint_client = mock.MagicMock() - goal_state = self._get_goal_state( - m_azure_endpoint_client=m_azure_endpoint_client, - certificates_url=None) - certificates_xml = goal_state.certificates_xml - self.assertEqual(0, m_azure_endpoint_client.get.call_count) - self.assertIsNone(certificates_xml) - - def test_invalid_goal_state_xml_raises_parse_error(self): - xml = 'random non-xml data' - with self.assertRaises(ElementTree.ParseError): - azure_helper.GoalState(xml, mock.MagicMock()) - - def test_missing_container_id_in_goal_state_xml_raises_exc(self): - xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('.*', '', xml) - with self.assertRaises(azure_helper.InvalidGoalStateXMLException): - azure_helper.GoalState(xml, mock.MagicMock()) - - def test_missing_instance_id_in_goal_state_xml_raises_exc(self): - xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('.*', '', xml) - with self.assertRaises(azure_helper.InvalidGoalStateXMLException): - azure_helper.GoalState(xml, mock.MagicMock()) - - def test_missing_incarnation_in_goal_state_xml_raises_exc(self): - xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('.*', '', xml) - with self.assertRaises(azure_helper.InvalidGoalStateXMLException): - azure_helper.GoalState(xml, mock.MagicMock()) - - -class TestAzureEndpointHttpClient(CiTestCase): - - regular_headers = { - 'x-ms-agent-name': 'WALinuxAgent', - 'x-ms-version': '2012-11-30', - } - - def setUp(self): - super(TestAzureEndpointHttpClient, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - self.m_http_with_retries = patches.enter_context( - mock.patch.object(azure_helper, 'http_with_retries')) - - def test_non_secure_get(self): - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - url = 'MyTestUrl' - response = client.get(url, secure=False) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual(self.m_http_with_retries.return_value, response) - self.assertEqual( - mock.call(url, headers=self.regular_headers), - self.m_http_with_retries.call_args) - - def test_non_secure_get_raises_exception(self): - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - url = 'MyTestUrl' - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises(SentinelException, client.get, url, secure=False) - self.assertEqual(1, self.m_http_with_retries.call_count) - - def test_secure_get(self): - url = 'MyTestUrl' - m_certificate = mock.MagicMock() - expected_headers = self.regular_headers.copy() - expected_headers.update({ - "x-ms-cipher-name": "DES_EDE3_CBC", - "x-ms-guest-agent-public-x509-cert": m_certificate, - }) - client = azure_helper.AzureEndpointHttpClient(m_certificate) - response = client.get(url, secure=True) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual(self.m_http_with_retries.return_value, response) - self.assertEqual( - mock.call(url, headers=expected_headers), - self.m_http_with_retries.call_args) - - def test_secure_get_raises_exception(self): - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises(SentinelException, client.get, url, secure=True) - self.assertEqual(1, self.m_http_with_retries.call_count) - - def test_post(self): - m_data = mock.MagicMock() - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - response = client.post(url, data=m_data) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual(self.m_http_with_retries.return_value, response) - self.assertEqual( - mock.call(url, data=m_data, headers=self.regular_headers), - self.m_http_with_retries.call_args) - - def test_post_raises_exception(self): - m_data = mock.MagicMock() - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises(SentinelException, client.post, url, data=m_data) - self.assertEqual(1, self.m_http_with_retries.call_count) - - def test_post_with_extra_headers(self): - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - extra_headers = {'test': 'header'} - client.post(url, extra_headers=extra_headers) - expected_headers = self.regular_headers.copy() - expected_headers.update(extra_headers) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual( - mock.call(url, data=mock.ANY, headers=expected_headers), - self.m_http_with_retries.call_args) - - def test_post_with_sleep_with_extra_headers_raises_exception(self): - m_data = mock.MagicMock() - url = 'MyTestUrl' - extra_headers = {'test': 'header'} - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises( - SentinelException, client.post, - url, data=m_data, extra_headers=extra_headers) - self.assertEqual(1, self.m_http_with_retries.call_count) - - -class TestAzureHelperHttpWithRetries(CiTestCase): - - with_logs = True - - max_readurl_attempts = 240 - default_readurl_timeout = 5 - sleep_duration_between_retries = 5 - periodic_logging_attempts = 12 - - def setUp(self): - super(TestAzureHelperHttpWithRetries, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.m_readurl = patches.enter_context( - mock.patch.object( - azure_helper.url_helper, 'readurl', mock.MagicMock())) - self.m_sleep = patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', autospec=True)) - - def test_http_with_retries(self): - self.m_readurl.return_value = 'TestResp' - self.assertEqual( - azure_helper.http_with_retries('testurl'), - self.m_readurl.return_value) - self.assertEqual(self.m_readurl.call_count, 1) - - def test_http_with_retries_propagates_readurl_exc_and_logs_exc( - self): - self.m_readurl.side_effect = SentinelException - - self.assertRaises( - SentinelException, azure_helper.http_with_retries, 'testurl') - self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts) - - self.assertIsNotNone( - re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) - self.assertIsNone( - re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) - - def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc( - self): - self.m_readurl.side_effect = \ - [SentinelException] * self.periodic_logging_attempts + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - response = azure_helper.http_with_retries('testurl') - self.assertEqual( - response, - self.m_readurl.return_value) - self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts + 1) - - # Ensure that cloud-init did sleep between each failed request - self.assertEqual( - self.m_sleep.call_count, - self.periodic_logging_attempts) - self.m_sleep.assert_called_with(self.sleep_duration_between_retries) - - def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): - self.m_readurl.side_effect = \ - [SentinelException] * self.periodic_logging_attempts + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - azure_helper.http_with_retries('testurl') - - self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts + 1) - self.assertIsNotNone( - re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) - self.assertIsNotNone( - re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) - - def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg( - self): - self.m_readurl.side_effect = \ - [SentinelException] * \ - (self.periodic_logging_attempts - 1) + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - azure_helper.http_with_retries('testurl') - self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts) - - self.assertIsNone( - re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) - self.assertIsNotNone( - re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) - - def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self): - testurl = mock.MagicMock() - kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock(), - # timeout kwarg should not be modified or deleted if present - 'timeout': mock.MagicMock() - } - azure_helper.http_with_retries(testurl, **kwargs) - self.m_readurl.assert_called_once_with(testurl, **kwargs) - - def test_http_with_retries_adds_timeout_kwarg_if_not_present(self): - testurl = mock.MagicMock() - kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock() - } - expected_kwargs = copy.deepcopy(kwargs) - expected_kwargs['timeout'] = self.default_readurl_timeout - - azure_helper.http_with_retries(testurl, **kwargs) - self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) - - def test_http_with_retries_deletes_retries_kwargs_passed_in( - self): - """http_with_retries already implements retry logic, - so url_helper.readurl should not have retries. - http_with_retries should delete kwargs that - cause url_helper.readurl to retry. - """ - testurl = mock.MagicMock() - kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock(), - 'timeout': mock.MagicMock(), - 'retries': mock.MagicMock(), - 'infinite': mock.MagicMock() - } - expected_kwargs = copy.deepcopy(kwargs) - expected_kwargs.pop('retries', None) - expected_kwargs.pop('infinite', None) - - azure_helper.http_with_retries(testurl, **kwargs) - self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) - self.assertIn( - 'retries kwarg passed in for communication with Azure endpoint.', - self.logs.getvalue()) - self.assertIn( - 'infinite kwarg passed in for communication with Azure endpoint.', - self.logs.getvalue()) - - -class TestOpenSSLManager(CiTestCase): - - def setUp(self): - super(TestOpenSSLManager, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.subp = patches.enter_context( - mock.patch.object(azure_helper.subp, 'subp')) - try: - self.open = patches.enter_context( - mock.patch('__builtin__.open')) - except ImportError: - self.open = patches.enter_context( - mock.patch('builtins.open')) - - @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) - @mock.patch.object(azure_helper.temp_utils, 'mkdtemp') - def test_openssl_manager_creates_a_tmpdir(self, mkdtemp): - manager = azure_helper.OpenSSLManager() - self.assertEqual(mkdtemp.return_value, manager.tmpdir) - - def test_generate_certificate_uses_tmpdir(self): - subp_directory = {} - - def capture_directory(*args, **kwargs): - subp_directory['path'] = os.getcwd() - - self.subp.side_effect = capture_directory - manager = azure_helper.OpenSSLManager() - self.assertEqual(manager.tmpdir, subp_directory['path']) - manager.clean_up() - - @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) - @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock()) - @mock.patch.object(azure_helper.util, 'del_dir') - def test_clean_up(self, del_dir): - manager = azure_helper.OpenSSLManager() - manager.clean_up() - self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) - - -class TestOpenSSLManagerActions(CiTestCase): - - def setUp(self): - super(TestOpenSSLManagerActions, self).setUp() - - self.allowed_subp = True - - def _data_file(self, name): - path = 'tests/data/azure' - return os.path.join(path, name) - - @unittest.skip("todo move to cloud_test") - def test_pubkey_extract(self): - cert = load_file(self._data_file('pubkey_extract_cert')) - good_key = load_file(self._data_file('pubkey_extract_ssh_key')) - sslmgr = azure_helper.OpenSSLManager() - key = sslmgr._get_ssh_key_from_cert(cert) - self.assertEqual(good_key, key) - - good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' - fingerprint = sslmgr._get_fingerprint_from_cert(cert) - self.assertEqual(good_fingerprint, fingerprint) - - @unittest.skip("todo move to cloud_test") - @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') - def test_parse_certificates(self, mock_decrypt_certs): - """Azure control plane puts private keys as well as certificates - into the Certificates XML object. Make sure only the public keys - from certs are extracted and that fingerprints are converted to - the form specified in the ovf-env.xml file. - """ - cert_contents = load_file(self._data_file('parse_certificates_pem')) - fingerprints = load_file(self._data_file( - 'parse_certificates_fingerprints') - ).splitlines() - mock_decrypt_certs.return_value = cert_contents - sslmgr = azure_helper.OpenSSLManager() - keys_by_fp = sslmgr.parse_certificates('') - for fp in keys_by_fp.keys(): - self.assertIn(fp, fingerprints) - for fp in fingerprints: - self.assertIn(fp, keys_by_fp) - - -class TestGoalStateHealthReporter(CiTestCase): - - maxDiff = None - - default_parameters = { - 'incarnation': 1634, - 'container_id': 'MyContainerId', - 'instance_id': 'MyInstanceId' - } - - test_azure_endpoint = 'TestEndpoint' - test_health_report_url = 'http://{0}/machine?comp=health'.format( - test_azure_endpoint) - test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'} - - provisioning_success_status = 'Ready' - provisioning_not_ready_status = 'NotReady' - provisioning_failure_substatus = 'ProvisioningFailed' - provisioning_failure_err_description = ( - 'Test error message containing provisioning failure details') - - def setUp(self): - super(TestGoalStateHealthReporter, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) - self.read_file_or_url = patches.enter_context( - mock.patch.object(azure_helper.url_helper, 'read_file_or_url')) - - self.post = patches.enter_context( - mock.patch.object(azure_helper.AzureEndpointHttpClient, - 'post')) - - self.GoalState = patches.enter_context( - mock.patch.object(azure_helper, 'GoalState')) - self.GoalState.return_value.container_id = \ - self.default_parameters['container_id'] - self.GoalState.return_value.instance_id = \ - self.default_parameters['instance_id'] - self.GoalState.return_value.incarnation = \ - self.default_parameters['incarnation'] - - def _text_from_xpath_in_xroot(self, xroot, xpath): - element = xroot.find(xpath) - if element is not None: - return element.text - return None - - def _get_formatted_health_report_xml_string(self, **kwargs): - return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs) - - def _get_formatted_health_detail_subsection_xml_string(self, **kwargs): - return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs) - - def _get_report_ready_health_document(self): - return self._get_formatted_health_report_xml_string( - incarnation=escape(str(self.default_parameters['incarnation'])), - container_id=escape(self.default_parameters['container_id']), - instance_id=escape(self.default_parameters['instance_id']), - health_status=escape(self.provisioning_success_status), - health_detail_subsection='') - - def _get_report_failure_health_document(self): - health_detail_subsection = \ - self._get_formatted_health_detail_subsection_xml_string( - health_substatus=escape(self.provisioning_failure_substatus), - health_description=escape( - self.provisioning_failure_err_description)) - - return self._get_formatted_health_report_xml_string( - incarnation=escape(str(self.default_parameters['incarnation'])), - container_id=escape(self.default_parameters['container_id']), - instance_id=escape(self.default_parameters['instance_id']), - health_status=escape(self.provisioning_not_ready_status), - health_detail_subsection=health_detail_subsection) - - def test_send_ready_signal_sends_post_request(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, - 'build_report') as m_build_report: - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - client, self.test_azure_endpoint) - reporter.send_ready_signal() - - self.assertEqual(1, self.post.call_count) - self.assertEqual( - mock.call( - self.test_health_report_url, - data=m_build_report.return_value, - extra_headers=self.test_default_headers), - self.post.call_args) - - def test_send_failure_signal_sends_post_request(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, - 'build_report') as m_build_report: - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - client, self.test_azure_endpoint) - reporter.send_failure_signal( - description=self.provisioning_failure_err_description) - - self.assertEqual(1, self.post.call_count) - self.assertEqual( - mock.call( - self.test_health_report_url, - data=m_build_report.return_value, - extra_headers=self.test_default_headers), - self.post.call_args) - - def test_build_report_for_ready_signal_health_document(self): - health_document = self._get_report_ready_health_document() - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_success_status) - - self.assertEqual(health_document, generated_health_document) - - generated_xroot = ElementTree.fromstring(generated_health_document) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './GoalStateIncarnation'), - str(self.default_parameters['incarnation'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './Container/ContainerId'), - str(self.default_parameters['container_id'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/InstanceId'), - str(self.default_parameters['instance_id'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/State'), - escape(self.provisioning_success_status)) - self.assertIsNone( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details')) - self.assertIsNone( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/SubStatus')) - self.assertIsNone( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') - ) - - def test_build_report_for_failure_signal_health_document(self): - health_document = self._get_report_failure_health_document() - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=self.provisioning_failure_err_description) - - self.assertEqual(health_document, generated_health_document) - - generated_xroot = ElementTree.fromstring(generated_health_document) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './GoalStateIncarnation'), - str(self.default_parameters['incarnation'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './Container/ContainerId'), - self.default_parameters['container_id']) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/InstanceId'), - self.default_parameters['instance_id']) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/State'), - escape(self.provisioning_not_ready_status)) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/' - 'SubStatus'), - escape(self.provisioning_failure_substatus)) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/' - 'Description'), - escape(self.provisioning_failure_err_description)) - - def test_send_ready_signal_calls_build_report(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, 'build_report' - ) as m_build_report: - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - reporter.send_ready_signal() - - self.assertEqual(1, m_build_report.call_count) - self.assertEqual( - mock.call( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_success_status), - m_build_report.call_args) - - def test_send_failure_signal_calls_build_report(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, 'build_report' - ) as m_build_report: - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - reporter.send_failure_signal( - description=self.provisioning_failure_err_description) - - self.assertEqual(1, m_build_report.call_count) - self.assertEqual( - mock.call( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=self.provisioning_failure_err_description), - m_build_report.call_args) - - def test_build_report_escapes_chars(self): - incarnation = 'jd8\'9*&^<\'A>' - instance_id = 'Opo>>>jas\'&d;[p&fp\"a<&aa\'sd!@&!)((*<&>' - health_substatus = '&as\"d<d<\'^@!5&6<7' - health_description = '&&&>!#$\"&&><>&\"sd<67<]>>' - - health_detail_subsection = \ - self._get_formatted_health_detail_subsection_xml_string( - health_substatus=escape(health_substatus), - health_description=escape(health_description)) - health_document = self._get_formatted_health_report_xml_string( - incarnation=escape(incarnation), - container_id=escape(container_id), - instance_id=escape(instance_id), - health_status=escape(health_status), - health_detail_subsection=health_detail_subsection) - - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - generated_health_document = reporter.build_report( - incarnation=incarnation, - container_id=container_id, - instance_id=instance_id, - status=health_status, - substatus=health_substatus, - description=health_description) - - self.assertEqual(health_document, generated_health_document) - - def test_build_report_conforms_to_length_limits(self): - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100 - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=long_err_msg) - - generated_xroot = ElementTree.fromstring(generated_health_document) - generated_health_report_description = self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') - self.assertEqual( - len(unescape(generated_health_report_description)), - HEALTH_REPORT_DESCRIPTION_TRIM_LEN) - - def test_trim_description_then_escape_conforms_to_len_limits_worst_case( - self): - """When unescaped characters are XML-escaped, the length increases. - Char Escape String - < < - > > - " " - ' ' - & & - - We (step 1) trim the health report XML's description field, - and then (step 2) XML-escape the health report XML's description field. - - The health report XML's description field limit within cloud-init - is HEALTH_REPORT_DESCRIPTION_TRIM_LEN. - - The Azure platform's limit on the health report XML's description field - is 4096 chars. - - For worst-case chars, there is a 5x blowup in length - when the chars are XML-escaped. - ' and " when XML-escaped have a 5x blowup. - - Ensure that (1) trimming and then (2) XML-escaping does not blow past - the Azure platform's limit for health report XML's description field - (4096 chars). - """ - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - long_err_msg = '\'\"' * 10000 - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=long_err_msg) - - generated_xroot = ElementTree.fromstring(generated_health_document) - generated_health_report_description = self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') - # The escaped description string should be less than - # the Azure platform limit for the escaped description string. - self.assertLessEqual(len(generated_health_report_description), 4096) - - -class TestWALinuxAgentShim(CiTestCase): - - def setUp(self): - super(TestWALinuxAgentShim, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.AzureEndpointHttpClient = patches.enter_context( - mock.patch.object(azure_helper, 'AzureEndpointHttpClient')) - self.find_endpoint = patches.enter_context( - mock.patch.object(wa_shim, 'find_endpoint')) - self.GoalState = patches.enter_context( - mock.patch.object(azure_helper, 'GoalState')) - self.OpenSSLManager = patches.enter_context( - mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True)) - patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) - - self.test_incarnation = 'TestIncarnation' - self.test_container_id = 'TestContainerId' - self.test_instance_id = 'TestInstanceId' - self.GoalState.return_value.incarnation = self.test_incarnation - self.GoalState.return_value.container_id = self.test_container_id - self.GoalState.return_value.instance_id = self.test_instance_id - - def test_eject_iso_is_called(self): - shim = wa_shim() - with mock.patch.object( - shim, 'eject_iso', autospec=True - ) as m_eject_iso: - shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") - m_eject_iso.assert_called_once_with("/dev/sr0") - - def test_http_client_does_not_use_certificate_for_report_ready(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - self.assertEqual( - [mock.call(None)], - self.AzureEndpointHttpClient.call_args_list) - - def test_http_client_does_not_use_certificate_for_report_failure(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - self.assertEqual( - [mock.call(None)], - self.AzureEndpointHttpClient.call_args_list) - - def test_correct_url_used_for_goalstate_during_report_ready(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - m_get = self.AzureEndpointHttpClient.return_value.get - self.assertEqual( - [mock.call('http://test_endpoint/machine/?comp=goalstate')], - m_get.call_args_list) - self.assertEqual( - [mock.call( - m_get.return_value.contents, - self.AzureEndpointHttpClient.return_value, - False - )], - self.GoalState.call_args_list) - - def test_correct_url_used_for_goalstate_during_report_failure(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - m_get = self.AzureEndpointHttpClient.return_value.get - self.assertEqual( - [mock.call('http://test_endpoint/machine/?comp=goalstate')], - m_get.call_args_list) - self.assertEqual( - [mock.call( - m_get.return_value.contents, - self.AzureEndpointHttpClient.return_value, - False - )], - self.GoalState.call_args_list) - - def test_certificates_used_to_determine_public_keys(self): - # if register_with_azure_and_fetch_data() isn't passed some info about - # the user's public keys, there's no point in even trying to parse the - # certificates - shim = wa_shim() - mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, - {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] - certs = {'fp1': 'expected-key', - 'fp2': 'should-not-be-found', - 'fp3': 'expected-no-value-key', - } - sslmgr = self.OpenSSLManager.return_value - sslmgr.parse_certificates.return_value = certs - data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) - self.assertEqual( - [mock.call(self.GoalState.return_value.certificates_xml)], - sslmgr.parse_certificates.call_args_list) - self.assertIn('expected-key', data['public-keys']) - self.assertIn('expected-no-value-key', data['public-keys']) - self.assertNotIn('should-not-be-found', data['public-keys']) - - def test_absent_certificates_produces_empty_public_keys(self): - mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] - self.GoalState.return_value.certificates_xml = None - shim = wa_shim() - data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) - self.assertEqual([], data['public-keys']) - - def test_correct_url_used_for_report_ready(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - expected_url = 'http://test_endpoint/machine?comp=health' - self.assertEqual( - [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - self.AzureEndpointHttpClient.return_value.post - .call_args_list) - - def test_correct_url_used_for_report_failure(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - expected_url = 'http://test_endpoint/machine?comp=health' - self.assertEqual( - [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - self.AzureEndpointHttpClient.return_value.post - .call_args_list) - - def test_goal_state_values_used_for_report_ready(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data'] - ) - self.assertIn(self.test_incarnation, posted_document) - self.assertIn(self.test_container_id, posted_document) - self.assertIn(self.test_instance_id, posted_document) - - def test_goal_state_values_used_for_report_failure(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data'] - ) - self.assertIn(self.test_incarnation, posted_document) - self.assertIn(self.test_container_id, posted_document) - self.assertIn(self.test_instance_id, posted_document) - - def test_xml_elems_in_report_ready_post(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - health_document = HEALTH_REPORT_XML_TEMPLATE.format( - incarnation=escape(self.test_incarnation), - container_id=escape(self.test_container_id), - instance_id=escape(self.test_instance_id), - health_status=escape('Ready'), - health_detail_subsection='') - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data']) - self.assertEqual(health_document, posted_document) - - def test_xml_elems_in_report_failure_post(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - health_document = HEALTH_REPORT_XML_TEMPLATE.format( - incarnation=escape(self.test_incarnation), - container_id=escape(self.test_container_id), - instance_id=escape(self.test_instance_id), - health_status=escape('NotReady'), - health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE - .format( - health_substatus=escape('ProvisioningFailed'), - health_description=escape('TestDesc'))) - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data']) - self.assertEqual(health_document, posted_document) - - @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) - def test_register_with_azure_and_fetch_data_calls_send_ready_signal( - self, m_goal_state_health_reporter): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - self.assertEqual( - 1, - m_goal_state_health_reporter.return_value.send_ready_signal - .call_count) - - @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) - def test_register_with_azure_and_report_failure_calls_send_failure_signal( - self, m_goal_state_health_reporter): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - m_goal_state_health_reporter.return_value.send_failure_signal \ - .assert_called_once_with(description='TestDesc') - - def test_register_with_azure_and_report_failure_does_not_need_certificates( - self): - shim = wa_shim() - with mock.patch.object( - shim, '_fetch_goal_state_from_azure', autospec=True - ) as m_fetch_goal_state_from_azure: - shim.register_with_azure_and_report_failure(description='TestDesc') - m_fetch_goal_state_from_azure.assert_called_once_with( - need_certificate=False) - - def test_clean_up_can_be_called_at_any_time(self): - shim = wa_shim() - shim.clean_up() - - def test_openssl_manager_not_instantiated_by_shim_report_status(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - shim.register_with_azure_and_report_failure(description='TestDesc') - shim.clean_up() - self.OpenSSLManager.assert_not_called() - - def test_clean_up_after_report_ready(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - shim.clean_up() - self.OpenSSLManager.return_value.clean_up.assert_not_called() - - def test_clean_up_after_report_failure(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - shim.clean_up() - self.OpenSSLManager.return_value.clean_up.assert_not_called() - - def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self): - self.AzureEndpointHttpClient.return_value.get \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) - - def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self): - self.AzureEndpointHttpClient.return_value.get \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') - - def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self): - self.GoalState.side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) - - def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc( - self): - self.GoalState.side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') - - def test_failure_to_send_report_ready_health_doc_bubbles_up(self): - self.AzureEndpointHttpClient.return_value.post \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) - - def test_failure_to_send_report_failure_health_doc_bubbles_up(self): - self.AzureEndpointHttpClient.return_value.post \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') - - -class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): - - def setUp(self): - super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.m_shim = patches.enter_context( - mock.patch.object(azure_helper, 'WALinuxAgentShim')) - - def test_data_from_shim_returned(self): - ret = azure_helper.get_metadata_from_fabric() - self.assertEqual( - self.m_shim.return_value.register_with_azure_and_fetch_data - .return_value, - ret) - - def test_success_calls_clean_up(self): - azure_helper.get_metadata_from_fabric() - self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) - - def test_failure_in_registration_propagates_exc_and_calls_clean_up( - self): - self.m_shim.return_value.register_with_azure_and_fetch_data \ - .side_effect = SentinelException - self.assertRaises(SentinelException, - azure_helper.get_metadata_from_fabric) - self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) - - def test_calls_shim_register_with_azure_and_fetch_data(self): - m_pubkey_info = mock.MagicMock() - azure_helper.get_metadata_from_fabric( - pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") - self.assertEqual( - 1, - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_count) - self.assertEqual( - mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_args) - - def test_instantiates_shim_with_kwargs(self): - m_fallback_lease_file = mock.MagicMock() - m_dhcp_options = mock.MagicMock() - azure_helper.get_metadata_from_fabric( - fallback_lease_file=m_fallback_lease_file, - dhcp_opts=m_dhcp_options) - self.assertEqual(1, self.m_shim.call_count) - self.assertEqual( - mock.call( - fallback_lease_file=m_fallback_lease_file, - dhcp_options=m_dhcp_options), - self.m_shim.call_args) - - -class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase): - - def setUp(self): - super( - TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.m_shim = patches.enter_context( - mock.patch.object(azure_helper, 'WALinuxAgentShim')) - - def test_success_calls_clean_up(self): - azure_helper.report_failure_to_fabric() - self.assertEqual( - 1, - self.m_shim.return_value.clean_up.call_count) - - def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up( - self): - self.m_shim.return_value.register_with_azure_and_report_failure \ - .side_effect = SentinelException - self.assertRaises(SentinelException, - azure_helper.report_failure_to_fabric) - self.assertEqual( - 1, - self.m_shim.return_value.clean_up.call_count) - - def test_report_failure_to_fabric_with_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric(description='TestDesc') - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with(description='TestDesc') - - def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric() - # default err message description should be shown to the user - # if no description is passed in - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with( - description=azure_helper - .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) - - def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric(description='') - # default err message description should be shown to the user - # if an empty description is passed in - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with( - description=azure_helper - .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) - - def test_instantiates_shim_with_kwargs(self): - m_fallback_lease_file = mock.MagicMock() - m_dhcp_options = mock.MagicMock() - azure_helper.report_failure_to_fabric( - fallback_lease_file=m_fallback_lease_file, - dhcp_opts=m_dhcp_options) - self.m_shim.assert_called_once_with( - fallback_lease_file=m_fallback_lease_file, - dhcp_options=m_dhcp_options) - - -class TestExtractIpAddressFromNetworkd(CiTestCase): - - azure_lease = dedent("""\ - # This is private data. Do not parse. - ADDRESS=10.132.0.5 - NETMASK=255.255.255.255 - ROUTER=10.132.0.1 - SERVER_ADDRESS=169.254.169.254 - NEXT_SERVER=10.132.0.1 - MTU=1460 - T1=43200 - T2=75600 - LIFETIME=86400 - DNS=169.254.169.254 - NTP=169.254.169.254 - DOMAINNAME=c.ubuntu-foundations.internal - DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal - HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal - ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 - CLIENTID=ff405663a200020000ab11332859494d7a8b4c - OPTION_245=624c3620 - """) - - def setUp(self): - super(TestExtractIpAddressFromNetworkd, self).setUp() - self.lease_d = self.tmp_dir() - - def test_no_valid_leases_is_none(self): - """No valid leases should return None.""" - self.assertIsNone( - wa_shim._networkd_get_value_from_leases(self.lease_d)) - - def test_option_245_is_found_in_single(self): - """A single valid lease with 245 option should return it.""" - populate_dir(self.lease_d, {'9': self.azure_lease}) - self.assertEqual( - '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d)) - - def test_option_245_not_found_returns_None(self): - """A valid lease, but no option 245 should return None.""" - populate_dir( - self.lease_d, - {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")}) - self.assertIsNone( - wa_shim._networkd_get_value_from_leases(self.lease_d)) - - def test_multiple_returns_first(self): - """Somewhat arbitrarily return the first address when multiple. - - Most important at the moment is that this is consistent behavior - rather than changing randomly as in order of a dictionary.""" - myval = "624c3601" - populate_dir( - self.lease_d, - {'9': self.azure_lease, - '2': self.azure_lease.replace("624c3620", myval)}) - self.assertEqual( - myval, wa_shim._networkd_get_value_from_leases(self.lease_d)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py deleted file mode 100644 index 7aa3b1d1..00000000 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ /dev/null @@ -1,137 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy - -from cloudinit.cs_utils import Cepko -from cloudinit import distros -from cloudinit import helpers -from cloudinit import sources -from cloudinit.sources import DataSourceCloudSigma - -from cloudinit.tests import helpers as test_helpers - -SERVER_CONTEXT = { - "cpu": 1000, - "cpus_instead_of_cores": False, - "global_context": {"some_global_key": "some_global_val"}, - "mem": 1073741824, - "meta": { - "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe", - "cloudinit-user-data": "#cloud-config\n\n...", - }, - "name": "test_server", - "requirements": [], - "smp": 1, - "tags": ["much server", "very performance"], - "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890", - "vnc_password": "9e84d6cb49e46379", - "vendor_data": { - "location": "zrh", - "cloudinit": "#cloud-config\n\n...", - } -} - -DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma' - - -class CepkoMock(Cepko): - def __init__(self, mocked_context): - self.result = mocked_context - - def all(self): - return self - - -class DataSourceCloudSigmaTest(test_helpers.CiTestCase): - def setUp(self): - super(DataSourceCloudSigmaTest, self).setUp() - self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) - self.add_patch(DS_PATH + '.is_running_in_cloudsigma', - "m_is_container", return_value=True) - - distro_cls = distros.fetch("ubuntu") - distro = distro_cls("ubuntu", cfg={}, paths=self.paths) - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - sys_cfg={}, distro=distro, paths=self.paths) - self.datasource.cepko = CepkoMock(SERVER_CONTEXT) - - def test_get_hostname(self): - self.datasource.get_data() - self.assertEqual("test_server", self.datasource.get_hostname()) - self.datasource.metadata['name'] = '' - self.assertEqual("65b2fb23", self.datasource.get_hostname()) - utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8') - self.datasource.metadata['name'] = utf8_hostname - self.assertEqual("65b2fb23", self.datasource.get_hostname()) - - def test_get_public_ssh_keys(self): - self.datasource.get_data() - self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']], - self.datasource.get_public_ssh_keys()) - - def test_get_instance_id(self): - self.datasource.get_data() - self.assertEqual(SERVER_CONTEXT['uuid'], - self.datasource.get_instance_id()) - - def test_platform(self): - """All platform-related attributes are set.""" - self.datasource.get_data() - self.assertEqual(self.datasource.cloud_name, 'cloudsigma') - self.assertEqual(self.datasource.platform_type, 'cloudsigma') - self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') - - def test_metadata(self): - self.datasource.get_data() - self.assertEqual(self.datasource.metadata, SERVER_CONTEXT) - - def test_user_data(self): - self.datasource.get_data() - self.assertEqual(self.datasource.userdata_raw, - SERVER_CONTEXT['meta']['cloudinit-user-data']) - - def test_encoded_user_data(self): - encoded_context = copy.deepcopy(SERVER_CONTEXT) - encoded_context['meta']['base64_fields'] = 'cloudinit-user-data' - encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK' - self.datasource.cepko = CepkoMock(encoded_context) - self.datasource.get_data() - - self.assertEqual(self.datasource.userdata_raw, b'hi world\n') - - def test_vendor_data(self): - self.datasource.get_data() - self.assertEqual(self.datasource.vendordata_raw, - SERVER_CONTEXT['vendor_data']['cloudinit']) - - def test_lack_of_vendor_data(self): - stripped_context = copy.deepcopy(SERVER_CONTEXT) - del stripped_context["vendor_data"] - self.datasource.cepko = CepkoMock(stripped_context) - self.datasource.get_data() - - self.assertIsNone(self.datasource.vendordata_raw) - - def test_lack_of_cloudinit_key_in_vendor_data(self): - stripped_context = copy.deepcopy(SERVER_CONTEXT) - del stripped_context["vendor_data"]["cloudinit"] - self.datasource.cepko = CepkoMock(stripped_context) - self.datasource.get_data() - - self.assertIsNone(self.datasource.vendordata_raw) - - -class DsLoads(test_helpers.TestCase): - def test_get_datasource_list_returns_in_local(self): - deps = (sources.DEP_FILESYSTEM,) - ds_list = DataSourceCloudSigma.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceCloudSigma.DataSourceCloudSigma]) - - def test_list_sources_finds_ds(self): - found = sources.list_sources( - ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources']) - self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], - found) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py deleted file mode 100644 index e68168f2..00000000 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ /dev/null @@ -1,186 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import helpers -from cloudinit import util -from cloudinit.sources.DataSourceCloudStack import ( - DataSourceCloudStack, get_latest_lease) - -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock - -import os -import time - -MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' -DS_PATH = MOD_PATH + '.DataSourceCloudStack' - - -class TestCloudStackPasswordFetching(CiTestCase): - - def setUp(self): - super(TestCloudStackPasswordFetching, self).setUp() - self.patches = ExitStack() - self.addCleanup(self.patches.close) - mod_name = MOD_PATH - self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) - self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) - default_gw = "192.201.20.0" - get_latest_lease = mock.MagicMock(return_value=None) - self.patches.enter_context(mock.patch( - mod_name + '.get_latest_lease', get_latest_lease)) - - get_default_gw = mock.MagicMock(return_value=default_gw) - self.patches.enter_context(mock.patch( - mod_name + '.get_default_gateway', get_default_gw)) - - get_networkd_server_address = mock.MagicMock(return_value=None) - self.patches.enter_context(mock.patch( - mod_name + '.dhcp.networkd_get_option_from_leases', - get_networkd_server_address)) - self.tmp = self.tmp_dir() - - def _set_password_server_response(self, response_string): - subp = mock.MagicMock(return_value=(response_string, '')) - self.patches.enter_context( - mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp', - subp)) - return subp - - def test_empty_password_doesnt_create_config(self): - self._set_password_server_response('') - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertEqual({}, ds.get_config_obj()) - - def test_saved_password_doesnt_create_config(self): - self._set_password_server_response('saved_password') - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertEqual({}, ds.get_config_obj()) - - @mock.patch(DS_PATH + '.wait_for_metadata_service') - def test_password_sets_password(self, m_wait): - m_wait.return_value = True - password = 'SekritSquirrel' - self._set_password_server_response(password) - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertEqual(password, ds.get_config_obj()['password']) - - @mock.patch(DS_PATH + '.wait_for_metadata_service') - def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): - m_wait.return_value = True - self._set_password_server_response('bad_request') - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - self.assertTrue(ds.get_data()) - - def assertRequestTypesSent(self, subp, expected_request_types): - request_types = [] - for call in subp.call_args_list: - args = call[0][0] - for arg in args: - if arg.startswith('DomU_Request'): - request_types.append(arg.split()[1]) - self.assertEqual(expected_request_types, request_types) - - @mock.patch(DS_PATH + '.wait_for_metadata_service') - def test_valid_response_means_password_marked_as_saved(self, m_wait): - m_wait.return_value = True - password = 'SekritSquirrel' - subp = self._set_password_server_response(password) - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertRequestTypesSent(subp, - ['send_my_password', 'saved_password']) - - def _check_password_not_saved_for(self, response_string): - subp = self._set_password_server_response(response_string) - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: - m_wait.return_value = True - ds.get_data() - self.assertRequestTypesSent(subp, ['send_my_password']) - - def test_password_not_saved_if_empty(self): - self._check_password_not_saved_for('') - - def test_password_not_saved_if_already_saved(self): - self._check_password_not_saved_for('saved_password') - - def test_password_not_saved_if_bad_request(self): - self._check_password_not_saved_for('bad_request') - - -class TestGetLatestLease(CiTestCase): - - def _populate_dir_list(self, bdir, files): - """populate_dir_list([(name, data), (name, data)]) - - writes files to bdir, and updates timestamps to ensure - that their mtime increases with each file.""" - - start = int(time.time()) - for num, fname in enumerate(reversed(files)): - fpath = os.path.sep.join((bdir, fname)) - util.write_file(fpath, fname.encode()) - os.utime(fpath, (start - num, start - num)) - - def _pop_and_test(self, files, expected): - lease_d = self.tmp_dir() - self._populate_dir_list(lease_d, files) - self.assertEqual(self.tmp_path(expected, lease_d), - get_latest_lease(lease_d)) - - def test_skips_dhcpv6_files(self): - """files started with dhclient6 should be skipped.""" - expected = "dhclient.lease" - self._pop_and_test([expected, "dhclient6.lease"], expected) - - def test_selects_dhclient_dot_files(self): - """files named dhclient.lease or dhclient.leases should be used. - - Ubuntu names files dhclient.eth0.leases dhclient6.leases and - sometimes dhclient.leases.""" - self._pop_and_test(["dhclient.lease"], "dhclient.lease") - self._pop_and_test(["dhclient.leases"], "dhclient.leases") - - def test_selects_dhclient_dash_files(self): - """files named dhclient-lease or dhclient-leases should be used. - - Redhat/Centos names files with dhclient--eth0.lease (centos 7) or - dhclient-eth0.leases (centos 6). - """ - self._pop_and_test(["dhclient-eth0.lease"], "dhclient-eth0.lease") - self._pop_and_test(["dhclient--eth0.lease"], "dhclient--eth0.lease") - - def test_ignores_by_extension(self): - """only .lease or .leases file should be considered.""" - - self._pop_and_test(["dhclient.lease", "dhclient.lease.bk", - "dhclient.lease-old", "dhclient.leaselease"], - "dhclient.lease") - - def test_selects_newest_matching(self): - """If multiple files match, the newest written should be used.""" - lease_d = self.tmp_dir() - valid_1 = "dhclient.leases" - valid_2 = "dhclient.lease" - valid_1_path = self.tmp_path(valid_1, lease_d) - valid_2_path = self.tmp_path(valid_2, lease_d) - - self._populate_dir_list(lease_d, [valid_1, valid_2]) - self.assertEqual(valid_2_path, get_latest_lease(lease_d)) - - # now update mtime on valid_2 to be older than valid_1 and re-check. - mtime = int(os.path.getmtime(valid_1_path)) - 1 - os.utime(valid_2_path, (mtime, mtime)) - - self.assertEqual(valid_1_path, get_latest_lease(lease_d)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py deleted file mode 100644 index 9089e5de..00000000 --- a/tests/unittests/test_datasource/test_common.py +++ /dev/null @@ -1,121 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import settings -from cloudinit import sources -from cloudinit import type_utils -from cloudinit.sources import ( - DataSource, - DataSourceAliYun as AliYun, - DataSourceAltCloud as AltCloud, - DataSourceAzure as Azure, - DataSourceBigstep as Bigstep, - DataSourceCloudSigma as CloudSigma, - DataSourceCloudStack as CloudStack, - DataSourceConfigDrive as ConfigDrive, - DataSourceDigitalOcean as DigitalOcean, - DataSourceEc2 as Ec2, - DataSourceExoscale as Exoscale, - DataSourceGCE as GCE, - DataSourceHetzner as Hetzner, - DataSourceIBMCloud as IBMCloud, - DataSourceLXD as LXD, - DataSourceMAAS as MAAS, - DataSourceNoCloud as NoCloud, - DataSourceOpenNebula as OpenNebula, - DataSourceOpenStack as OpenStack, - DataSourceOracle as Oracle, - DataSourceOVF as OVF, - DataSourceRbxCloud as RbxCloud, - DataSourceScaleway as Scaleway, - DataSourceSmartOS as SmartOS, - DataSourceUpCloud as UpCloud, - DataSourceVultr as Vultr, - DataSourceVMware as VMware, -) -from cloudinit.sources import DataSourceNone as DSNone - -from cloudinit.tests import helpers as test_helpers - -DEFAULT_LOCAL = [ - Azure.DataSourceAzure, - CloudSigma.DataSourceCloudSigma, - ConfigDrive.DataSourceConfigDrive, - DigitalOcean.DataSourceDigitalOcean, - GCE.DataSourceGCELocal, - Hetzner.DataSourceHetzner, - IBMCloud.DataSourceIBMCloud, - LXD.DataSourceLXD, - NoCloud.DataSourceNoCloud, - OpenNebula.DataSourceOpenNebula, - Oracle.DataSourceOracle, - OVF.DataSourceOVF, - SmartOS.DataSourceSmartOS, - Vultr.DataSourceVultr, - Ec2.DataSourceEc2Local, - OpenStack.DataSourceOpenStackLocal, - RbxCloud.DataSourceRbxCloud, - Scaleway.DataSourceScaleway, - UpCloud.DataSourceUpCloudLocal, - VMware.DataSourceVMware, -] - -DEFAULT_NETWORK = [ - AliYun.DataSourceAliYun, - AltCloud.DataSourceAltCloud, - Bigstep.DataSourceBigstep, - CloudStack.DataSourceCloudStack, - DSNone.DataSourceNone, - Ec2.DataSourceEc2, - Exoscale.DataSourceExoscale, - GCE.DataSourceGCE, - MAAS.DataSourceMAAS, - NoCloud.DataSourceNoCloudNet, - OpenStack.DataSourceOpenStack, - OVF.DataSourceOVFNet, - UpCloud.DataSourceUpCloud, - VMware.DataSourceVMware, -] - - -class ExpectedDataSources(test_helpers.TestCase): - builtin_list = settings.CFG_BUILTIN['datasource_list'] - deps_local = [sources.DEP_FILESYSTEM] - deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] - pkg_list = [type_utils.obj_name(sources)] - - def test_expected_default_local_sources_found(self): - found = sources.list_sources( - self.builtin_list, self.deps_local, self.pkg_list) - self.assertEqual(set(DEFAULT_LOCAL), set(found)) - - def test_expected_default_network_sources_found(self): - found = sources.list_sources( - self.builtin_list, self.deps_network, self.pkg_list) - self.assertEqual(set(DEFAULT_NETWORK), set(found)) - - def test_expected_nondefault_network_sources_found(self): - found = sources.list_sources( - ['AliYun'], self.deps_network, self.pkg_list) - self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) - - -class TestDataSourceInvariants(test_helpers.TestCase): - def test_data_sources_have_valid_network_config_sources(self): - for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: - for cfg_src in ds.network_config_sources: - fail_msg = ('{} has an invalid network_config_sources entry:' - ' {}'.format(str(ds), cfg_src)) - self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), - fail_msg) - - def test_expected_dsname_defined(self): - for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: - fail_msg = ( - '{} has an invalid / missing dsname property: {}'.format( - str(ds), str(ds.dsname) - ) - ) - self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg) - self.assertIsNotNone(ds.dsname) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py deleted file mode 100644 index be13165c..00000000 --- a/tests/unittests/test_datasource/test_configdrive.py +++ /dev/null @@ -1,844 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from copy import copy, deepcopy -import json -import os - -from cloudinit import helpers -from cloudinit.net import eni -from cloudinit.net import network_state -from cloudinit import settings -from cloudinit.sources import DataSourceConfigDrive as ds -from cloudinit.sources.helpers import openstack -from cloudinit import util - -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir - - -PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' -EC2_META = { - 'ami-id': 'ami-00000001', - 'ami-launch-index': 0, - 'ami-manifest-path': 'FIXME', - 'block-device-mapping': { - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': '/dev/sda1', - 'swap': 'sda3'}, - 'hostname': 'sm-foo-test.novalocal', - 'instance-action': 'none', - 'instance-id': 'i-00000001', - 'instance-type': 'm1.tiny', - 'local-hostname': 'sm-foo-test.novalocal', - 'local-ipv4': None, - 'placement': {'availability-zone': 'nova'}, - 'public-hostname': 'sm-foo-test.novalocal', - 'public-ipv4': '', - 'public-keys': {'0': {'openssh-key': PUBKEY}}, - 'reservation-id': 'r-iru5qm4m', - 'security-groups': ['default'] -} -USER_DATA = b'#!/bin/sh\necho This is user data\n' -OSTACK_META = { - 'availability_zone': 'nova', - 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, - {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], - 'hostname': 'sm-foo-test.novalocal', - 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} - -CONTENT_0 = b'This is contents of /etc/foo.cfg\n' -CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' -NETWORK_DATA = { - 'services': [ - {'type': 'dns', 'address': '199.204.44.24'}, - {'type': 'dns', 'address': '199.204.47.54'} - ], - 'links': [ - {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', - 'ethernet_mac_address': 'fa:16:3e:69:b0:58', - 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, - {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', - 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', - 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, - {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc', - 'ethernet_mac_address': 'fa:16:3e:05:30:fe', - 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'} - ], - 'networks': [ - {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp', - 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', - 'id': 'network0'}, - {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp', - 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', - 'id': 'network1'}, - {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp', - 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5', - 'id': 'network2'} - ] -} - -NETWORK_DATA_2 = { - "services": [ - {"type": "dns", "address": "1.1.1.191"}, - {"type": "dns", "address": "1.1.1.4"}], - "networks": [ - {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4", - "netmask": "255.255.255.248", "link": "eth0", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "2.2.2.9"}], - "ip_address": "2.2.2.10", "id": "network0-ipv4"}, - {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4", - "netmask": "255.255.255.224", "link": "eth1", - "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}], - "links": [ - {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500, - "type": "vif", "id": "eth0", "vif_id": "vif-foo1"}, - {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500, - "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}] -} - -# This network data ha 'tap' or null type for a link. -NETWORK_DATA_3 = { - "services": [{"type": "dns", "address": "172.16.36.11"}, - {"type": "dns", "address": "172.16.36.12"}], - "networks": [ - {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", - "type": "ipv4", "netmask": "255.255.255.128", - "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18", - "id": "network0", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "172.17.48.1"}]}, - {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", - "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::", - "link": "tap77a0dc5b-72", - "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", - "id": "network1", - "routes": [{"netmask": "::", "network": "::", - "gateway": "fdb8:52d0:9d14::1"}]}, - {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", - "type": "ipv4", "netmask": "255.255.255.128", - "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13", - "id": "network2", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "172.16.48.1"}, - {"netmask": "255.255.0.0", "network": "172.16.0.0", - "gateway": "172.16.48.1"}]}], - "links": [ - {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None, - "type": "tap", "id": "tap77a0dc5b-72", - "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"}, - {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None, - "type": None, "id": "tap7d6b7bec-93", - "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"} - ] -} - -BOND_MAC = "fa:16:3e:b3:72:36" -NETWORK_DATA_BOND = { - "services": [ - {"type": "dns", "address": "1.1.1.191"}, - {"type": "dns", "address": "1.1.1.4"}, - ], - "networks": [ - {"id": "network2-ipv4", "ip_address": "2.2.2.13", - "link": "vlan2", "netmask": "255.255.255.248", - "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", - "type": "ipv4", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "2.2.2.9"}]}, - {"id": "network3-ipv4", "ip_address": "10.0.1.5", - "link": "vlan3", "netmask": "255.255.255.248", - "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", - "type": "ipv4", - "routes": [{"netmask": "255.255.255.255", - "network": "192.168.1.0", "gateway": "10.0.1.1"}]} - ], - "links": [ - {"ethernet_mac_address": "0c:c4:7a:34:6e:3c", - "id": "eth0", "mtu": 1500, "type": "phy"}, - {"ethernet_mac_address": "0c:c4:7a:34:6e:3d", - "id": "eth1", "mtu": 1500, "type": "phy"}, - {"bond_links": ["eth0", "eth1"], - "bond_miimon": 100, "bond_mode": "4", - "bond_xmit_hash_policy": "layer3+4", - "ethernet_mac_address": BOND_MAC, - "id": "bond0", "type": "bond"}, - {"ethernet_mac_address": "fa:16:3e:b3:72:30", - "id": "vlan2", "type": "vlan", "vlan_id": 602, - "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, - {"ethernet_mac_address": "fa:16:3e:66:ab:a6", - "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0", - "vlan_mac_address": "fa:16:3e:66:ab:a6"} - ] -} - -NETWORK_DATA_VLAN = { - "services": [{"type": "dns", "address": "1.1.1.191"}], - "networks": [ - {"id": "network1-ipv4", "ip_address": "10.0.1.5", - "link": "vlan1", "netmask": "255.255.255.248", - "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", - "type": "ipv4", - "routes": [{"netmask": "255.255.255.255", - "network": "192.168.1.0", "gateway": "10.0.1.1"}]} - ], - "links": [ - {"ethernet_mac_address": "fa:16:3e:69:b0:58", - "id": "eth0", "mtu": 1500, "type": "phy"}, - {"ethernet_mac_address": "fa:16:3e:b3:72:30", - "id": "vlan1", "type": "vlan", "vlan_id": 602, - "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, - ] -} - -KNOWN_MACS = { - 'fa:16:3e:69:b0:58': 'enp0s1', - 'fa:16:3e:d4:57:ad': 'enp0s2', - 'fa:16:3e:dd:50:9a': 'foo1', - 'fa:16:3e:a8:14:69': 'foo2', - 'fa:16:3e:ed:9a:59': 'foo3', - '0c:c4:7a:34:6e:3d': 'oeth1', - '0c:c4:7a:34:6e:3c': 'oeth0', -} - -CFG_DRIVE_FILES_V2 = { - 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), - 'ec2/2009-04-04/user-data': USER_DATA, - 'ec2/latest/meta-data.json': json.dumps(EC2_META), - 'ec2/latest/user-data': USER_DATA, - 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META), - 'openstack/2012-08-10/user_data': USER_DATA, - 'openstack/content/0000': CONTENT_0, - 'openstack/content/0001': CONTENT_1, - 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), - 'openstack/latest/user_data': USER_DATA, - 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA), - 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META), - 'openstack/2015-10-15/user_data': USER_DATA, - 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} - -M_PATH = "cloudinit.sources.DataSourceConfigDrive." - - -class TestConfigDriveDataSource(CiTestCase): - - def setUp(self): - super(TestConfigDriveDataSource, self).setUp() - self.add_patch( - M_PATH + "util.find_devs_with", - "m_find_devs_with", return_value=[]) - self.tmp = self.tmp_dir() - - def test_ec2_metadata(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - found = ds.read_config_drive(self.tmp) - self.assertTrue('ec2-metadata' in found) - ec2_md = found['ec2-metadata'] - self.assertEqual(EC2_META, ec2_md) - - def test_dev_os_remap(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - cfg_ds.metadata = found['metadata'] - name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', - } - for name, dev_name in name_tests.items(): - with ExitStack() as mocks: - provided_name = dev_name[len('/dev/'):] - provided_name = "s" + provided_name[1:] - find_mock = mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - return_value=[provided_name])) - # We want os.path.exists() to return False on its first call, - # and True on its second call. We use a handy generator as - # the mock side effect for this. The mocked function returns - # what the side effect returns. - - def exists_side_effect(): - yield False - yield True - exists_mock = mocks.enter_context( - mock.patch.object(os.path, 'exists', - side_effect=exists_side_effect())) - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - - find_mock.assert_called_once_with(mock.ANY) - self.assertEqual(exists_mock.call_count, 2) - - def test_dev_os_map(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - os_md = found['metadata'] - cfg_ds.metadata = os_md - name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', - } - for name, dev_name in name_tests.items(): - with ExitStack() as mocks: - find_mock = mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - return_value=[dev_name])) - exists_mock = mocks.enter_context( - mock.patch.object(os.path, 'exists', - return_value=True)) - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - - find_mock.assert_called_once_with(mock.ANY) - exists_mock.assert_called_once_with(mock.ANY) - - def test_dev_ec2_remap(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - ec2_md = found['ec2-metadata'] - os_md = found['metadata'] - cfg_ds.ec2_metadata = ec2_md - cfg_ds.metadata = os_md - name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', - None: None, - 'bob': None, - 'root2k': None, - } - for name, dev_name in name_tests.items(): - # We want os.path.exists() to return False on its first call, - # and True on its second call. We use a handy generator as - # the mock side effect for this. The mocked function returns - # what the side effect returns. - def exists_side_effect(): - yield False - yield True - with mock.patch.object(os.path, 'exists', - side_effect=exists_side_effect()): - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - # We don't assert the call count for os.path.exists() because - # not all of the entries in name_tests results in two calls to - # that function. Specifically, 'root2k' doesn't seem to call - # it at all. - - def test_dev_ec2_map(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - ec2_md = found['ec2-metadata'] - os_md = found['metadata'] - cfg_ds.ec2_metadata = ec2_md - cfg_ds.metadata = os_md - name_tests = { - 'ami': '/dev/sda1', - 'root': '/dev/sda1', - 'ephemeral0': '/dev/sda2', - 'swap': '/dev/sda3', - None: None, - 'bob': None, - 'root2k': None, - } - for name, dev_name in name_tests.items(): - with mock.patch.object(os.path, 'exists', return_value=True): - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - - def test_dir_valid(self): - """Verify a dir is read as such.""" - - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - - found = ds.read_config_drive(self.tmp) - - expected_md = copy(OSTACK_META) - expected_md['instance-id'] = expected_md['uuid'] - expected_md['local-hostname'] = expected_md['hostname'] - - self.assertEqual(USER_DATA, found['userdata']) - self.assertEqual(expected_md, found['metadata']) - self.assertEqual(NETWORK_DATA, found['networkdata']) - self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0) - self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1) - - def test_seed_dir_valid_extra(self): - """Verify extra files do not affect datasource validity.""" - - data = copy(CFG_DRIVE_FILES_V2) - data["myfoofile.txt"] = "myfoocontent" - data["openstack/latest/random-file.txt"] = "random-content" - - populate_dir(self.tmp, data) - - found = ds.read_config_drive(self.tmp) - - expected_md = copy(OSTACK_META) - expected_md['instance-id'] = expected_md['uuid'] - expected_md['local-hostname'] = expected_md['hostname'] - - self.assertEqual(expected_md, found['metadata']) - - def test_seed_dir_bad_json_metadata(self): - """Verify that bad json in metadata raises BrokenConfigDriveDir.""" - data = copy(CFG_DRIVE_FILES_V2) - - data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}" - data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}" - data["openstack/latest/meta_data.json"] = "non-json garbage {}" - - populate_dir(self.tmp, data) - - self.assertRaises(openstack.BrokenMetadata, - ds.read_config_drive, self.tmp) - - def test_seed_dir_no_configdrive(self): - """Verify that no metadata raises NonConfigDriveDir.""" - - my_d = os.path.join(self.tmp, "non-configdrive") - data = copy(CFG_DRIVE_FILES_V2) - data["myfoofile.txt"] = "myfoocontent" - data["openstack/latest/random-file.txt"] = "random-content" - data["content/foo"] = "foocontent" - - self.assertRaises(openstack.NonReadable, - ds.read_config_drive, my_d) - - def test_seed_dir_missing(self): - """Verify that missing seed_dir raises NonConfigDriveDir.""" - my_d = os.path.join(self.tmp, "nonexistantdirectory") - self.assertRaises(openstack.NonReadable, - ds.read_config_drive, my_d) - - def test_find_candidates(self): - devs_with_answers = {} - - def my_devs_with(*args, **kwargs): - criteria = args[0] if len(args) else kwargs.pop('criteria', None) - return devs_with_answers.get(criteria, []) - - def my_is_partition(dev): - return dev[-1] in "0123456789" and not dev.startswith("sr") - - try: - orig_find_devs_with = util.find_devs_with - util.find_devs_with = my_devs_with - - orig_is_partition = util.is_partition - util.is_partition = my_is_partition - - devs_with_answers = {"TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=config-2": ["/dev/vdb"]} - self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) - - # add a vfat item - # zdd reverse sorts after vdb, but config-2 label is preferred - devs_with_answers['TYPE=vfat'] = ["/dev/zdd"] - self.assertEqual(["/dev/vdb", "/dev/zdd"], - ds.find_candidate_devs()) - - # verify that partitions are considered, that have correct label. - devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], - "TYPE=iso9660": [], - "LABEL=config-2": ["/dev/vdb3"]} - self.assertEqual(["/dev/vdb3"], - ds.find_candidate_devs()) - - # Verify that uppercase labels are also found. - devs_with_answers = {"TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=CONFIG-2": ["/dev/vdb"]} - self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) - - finally: - util.find_devs_with = orig_find_devs_with - util.is_partition = orig_is_partition - - @mock.patch(M_PATH + 'on_first_boot') - def test_pubkeys_v2(self, on_first_boot): - """Verify that public-keys work in config-drive-v2.""" - myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - self.assertEqual(myds.get_public_ssh_keys(), - [OSTACK_META['public_keys']['mykey']]) - self.assertEqual('configdrive', myds.cloud_name) - self.assertEqual('openstack', myds.platform) - self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) - - def test_subplatform_config_drive_when_starts_with_dev(self): - """subplatform reports config-drive when source starts with /dev/.""" - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: - with mock.patch(M_PATH + 'util.mount_cb'): - with mock.patch(M_PATH + 'on_first_boot'): - m_find_devs.return_value = ['/dev/anything'] - self.assertEqual(True, cfg_ds.get_data()) - self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) - - -@mock.patch( - "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) -) -class TestNetJson(CiTestCase): - def setUp(self): - super(TestNetJson, self).setUp() - self.tmp = self.tmp_dir() - self.maxDiff = None - - @mock.patch(M_PATH + 'on_first_boot') - def test_network_data_is_found(self, on_first_boot): - """Verify that network_data is present in ds in config-drive-v2.""" - myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - self.assertIsNotNone(myds.network_json) - - @mock.patch(M_PATH + 'on_first_boot') - def test_network_config_is_converted(self, on_first_boot): - """Verify that network_data is converted and present on ds object.""" - myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - network_config = openstack.convert_net_json(NETWORK_DATA, - known_macs=KNOWN_MACS) - self.assertEqual(myds.network_config, network_config) - - def test_network_config_conversion_dhcp6(self): - """Test some ipv6 input network json and check the expected - conversions.""" - in_data = { - 'links': [ - {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', - 'ethernet_mac_address': 'fa:16:3e:69:b0:58', - 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, - {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', - 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', - 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, - ], - 'networks': [ - {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', - 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', - 'id': 'network0'}, - {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', - 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', - 'id': 'network1'}, - ] - } - out_data = { - 'version': 1, - 'config': [ - {'mac_address': 'fa:16:3e:69:b0:58', - 'mtu': None, - 'name': 'enp0s1', - 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], - 'type': 'physical'}, - {'mac_address': 'fa:16:3e:d4:57:ad', - 'mtu': None, - 'name': 'enp0s2', - 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], - 'type': 'physical', - 'accept-ra': True} - ], - } - conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) - self.assertEqual(out_data, conv_data) - - def test_network_config_conversions(self): - """Tests a bunch of input network json and checks the - expected conversions.""" - in_datas = [ - NETWORK_DATA, - { - 'services': [{'type': 'dns', 'address': '172.19.0.12'}], - 'networks': [{ - 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', - 'type': 'ipv4', - 'netmask': '255.255.252.0', - 'link': 'tap1a81968a-79', - 'routes': [{ - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - 'gateway': '172.19.3.254', - }], - 'ip_address': '172.19.1.34', - 'id': 'network0', - }], - 'links': [{ - 'type': 'bridge', - 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', - 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', - 'id': 'tap1a81968a-79', - 'mtu': None, - }], - }, - ] - out_datas = [ - { - 'version': 1, - 'config': [ - { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:69:b0:58', - 'name': 'enp0s1', - 'mtu': None, - }, - { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:d4:57:ad', - 'name': 'enp0s2', - 'mtu': None, - }, - { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:05:30:fe', - 'name': 'nic0', - 'mtu': None, - }, - { - 'type': 'nameserver', - 'address': '199.204.44.24', - }, - { - 'type': 'nameserver', - 'address': '199.204.47.54', - } - ], - - }, - { - 'version': 1, - 'config': [ - { - 'name': 'foo3', - 'mac_address': 'fa:16:3e:ed:9a:59', - 'mtu': None, - 'type': 'physical', - 'subnets': [ - { - 'address': '172.19.1.34', - 'netmask': '255.255.252.0', - 'type': 'static', - 'ipv4': True, - 'routes': [{ - 'gateway': '172.19.3.254', - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - }], - } - ] - }, - { - 'type': 'nameserver', - 'address': '172.19.0.12', - } - ], - }, - ] - for in_data, out_data in zip(in_datas, out_datas): - conv_data = openstack.convert_net_json(in_data, - known_macs=KNOWN_MACS) - self.assertEqual(out_data, conv_data) - - -@mock.patch( - "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) -) -class TestConvertNetworkData(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestConvertNetworkData, self).setUp() - self.tmp = self.tmp_dir() - - def _getnames_in_config(self, ncfg): - return set([n['name'] for n in ncfg['config'] - if n['type'] == 'physical']) - - def test_conversion_fills_names(self): - ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS) - expected = set(['nic0', 'enp0s1', 'enp0s2']) - found = self._getnames_in_config(ncfg) - self.assertEqual(found, expected) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac): - macs = KNOWN_MACS.copy() - macs.update({'fa:16:3e:05:30:fe': 'foonic1', - 'fa:16:3e:69:b0:58': 'ens1'}) - get_interfaces_by_mac.return_value = macs - - ncfg = openstack.convert_net_json(NETWORK_DATA) - expected = set(['nic0', 'ens1', 'enp0s2']) - found = self._getnames_in_config(ncfg) - self.assertEqual(found, expected) - - def test_convert_raises_value_error_on_missing_name(self): - macs = {'aa:aa:aa:aa:aa:00': 'ens1'} - self.assertRaises(ValueError, openstack.convert_net_json, - NETWORK_DATA, known_macs=macs) - - def test_conversion_with_route(self): - ncfg = openstack.convert_net_json(NETWORK_DATA_2, - known_macs=KNOWN_MACS) - # not the best test, but see that we get a route in the - # network config and that it gets rendered to an ENI file - routes = [] - for n in ncfg['config']: - for s in n.get('subnets', []): - routes.extend(s.get('routes', [])) - self.assertIn( - {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'}, - routes) - eni_renderer = eni.Renderer() - eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: - eni_rendering = f.read() - self.assertIn("route add default gw 2.2.2.9", eni_rendering) - - def test_conversion_with_tap(self): - ncfg = openstack.convert_net_json(NETWORK_DATA_3, - known_macs=KNOWN_MACS) - physicals = set() - for i in ncfg['config']: - if i.get('type') == "physical": - physicals.add(i['name']) - self.assertEqual(physicals, set(('foo1', 'foo2'))) - - def test_bond_conversion(self): - # light testing of bond conversion and eni rendering of bond - ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, - known_macs=KNOWN_MACS) - eni_renderer = eni.Renderer() - - eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: - eni_rendering = f.read() - - # Verify there are expected interfaces in the net config. - interfaces = sorted( - [i['name'] for i in ncfg['config'] - if i['type'] in ('vlan', 'bond', 'physical')]) - self.assertEqual( - sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]), - interfaces) - - words = eni_rendering.split() - # 'eth0' and 'eth1' are the ids. because their mac adresses - # map to other names, we should not see them in the ENI - self.assertNotIn('eth0', words) - self.assertNotIn('eth1', words) - - # oeth0 and oeth1 are the interface names for eni. - # bond0 will be generated for the bond. Each should be auto. - self.assertIn("auto oeth0", eni_rendering) - self.assertIn("auto oeth1", eni_rendering) - self.assertIn("auto bond0", eni_rendering) - # The bond should have the given mac address - pos = eni_rendering.find("auto bond0") - self.assertIn(BOND_MAC, eni_rendering[pos:]) - - def test_vlan(self): - # light testing of vlan config conversion and eni rendering - ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN, - known_macs=KNOWN_MACS) - eni_renderer = eni.Renderer() - eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: - eni_rendering = f.read() - - self.assertIn("iface enp0s1", eni_rendering) - self.assertIn("address 10.0.1.5", eni_rendering) - self.assertIn("auto enp0s1.602", eni_rendering) - - def test_mac_addrs_can_be_upper_case(self): - # input mac addresses on rackspace may be upper case - my_netdata = deepcopy(NETWORK_DATA) - for link in my_netdata['links']: - link['ethernet_mac_address'] = link['ethernet_mac_address'].upper() - - ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) - config_name2mac = {} - for n in ncfg['config']: - if n['type'] == 'physical': - config_name2mac[n['name']] = n['mac_address'] - - expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', - 'enp0s2': 'fa:16:3e:d4:57:ad'} - self.assertEqual(expected, config_name2mac) - - def test_unknown_device_types_accepted(self): - # If we don't recognise a link, we should treat it as physical for a - # best-effort boot - my_netdata = deepcopy(NETWORK_DATA) - my_netdata['links'][0]['type'] = 'my-special-link-type' - - ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) - config_name2mac = {} - for n in ncfg['config']: - if n['type'] == 'physical': - config_name2mac[n['name']] = n['mac_address'] - - expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', - 'enp0s2': 'fa:16:3e:d4:57:ad'} - self.assertEqual(expected, config_name2mac) - - # We should, however, warn the user that we don't recognise the type - self.assertIn('Unknown network_data link type (my-special-link-type)', - self.logs.getvalue()) - - -def cfg_ds_from_dir(base_d, files=None): - run = os.path.join(base_d, "run") - os.mkdir(run) - cfg_ds = ds.DataSourceConfigDrive( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) - cfg_ds.seed_dir = os.path.join(base_d, "seed") - if files: - populate_dir(cfg_ds.seed_dir, files) - cfg_ds.known_macs = KNOWN_MACS.copy() - if not cfg_ds.get_data(): - raise RuntimeError("Data source did not extract itself from" - " seed directory %s" % cfg_ds.seed_dir) - return cfg_ds - - -def populate_ds_from_read_config(cfg_ds, source, results): - """Patch the DataSourceConfigDrive from the results of - read_config_drive_dir hopefully in line with what it would have - if cfg_ds.get_data had been successfully called""" - cfg_ds.source = source - cfg_ds.metadata = results.get('metadata') - cfg_ds.ec2_metadata = results.get('ec2-metadata') - cfg_ds.userdata_raw = results.get('userdata') - cfg_ds.version = results.get('version') - cfg_ds.network_json = results.get('networkdata') - cfg_ds._network_config = openstack.convert_net_json( - cfg_ds.network_json, known_macs=KNOWN_MACS) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py deleted file mode 100644 index 3127014b..00000000 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (C) 2014 Neal Shrader -# -# Author: Neal Shrader -# Author: Ben Howard -# Author: Scott Moser -# -# This file is part of cloud-init. See LICENSE file for license information. - -import json - -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import DataSourceDigitalOcean -from cloudinit.sources.helpers import digitalocean - -from cloudinit.tests.helpers import mock, CiTestCase - -DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", - "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] -DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" - -# the following JSON was taken from droplet (that's why its a string) -DO_META = json.loads(""" -{ - "droplet_id": "22532410", - "hostname": "utl-96268", - "vendor_data": "vendordata goes here", - "user_data": "userdata goes here", - "public_keys": "", - "auth_key": "authorization_key", - "region": "nyc3", - "interfaces": { - "private": [ - { - "ipv4": { - "ip_address": "10.132.6.205", - "netmask": "255.255.0.0", - "gateway": "10.132.0.1" - }, - "mac": "04:01:57:d1:9e:02", - "type": "private" - } - ], - "public": [ - { - "ipv4": { - "ip_address": "192.0.0.20", - "netmask": "255.255.255.0", - "gateway": "104.236.0.1" - }, - "ipv6": { - "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", - "cidr": 64, - "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" - }, - "anchor_ipv4": { - "ip_address": "10.0.0.5", - "netmask": "255.255.0.0", - "gateway": "10.0.0.1" - }, - "mac": "04:01:57:d1:9e:01", - "type": "public" - } - ] - }, - "floating_ip": { - "ipv4": { - "active": false - } - }, - "dns": { - "nameservers": [ - "2001:4860:4860::8844", - "2001:4860:4860::8888", - "8.8.8.8" - ] - } -} -""") - -# This has no private interface -DO_META_2 = { - "droplet_id": 27223699, - "hostname": "smtest1", - "vendor_data": "\n".join([ - ('"Content-Type: multipart/mixed; ' - 'boundary=\"===============8645434374073493512==\"'), - 'MIME-Version: 1.0', - '', - '--===============8645434374073493512==', - 'MIME-Version: 1.0' - 'Content-Type: text/cloud-config; charset="us-ascii"' - 'Content-Transfer-Encoding: 7bit' - 'Content-Disposition: attachment; filename="cloud-config"' - '', - '#cloud-config', - 'disable_root: false', - 'manage_etc_hosts: true', - '', - '', - '--===============8645434374073493512==' - ]), - "public_keys": [ - "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" - ], - "auth_key": "88888888888888888888888888888888", - "region": "nyc3", - "interfaces": { - "public": [{ - "ipv4": { - "ip_address": "45.55.249.133", - "netmask": "255.255.192.0", - "gateway": "45.55.192.1" - }, - "anchor_ipv4": { - "ip_address": "10.17.0.5", - "netmask": "255.255.0.0", - "gateway": "10.17.0.1" - }, - "mac": "ae:cc:08:7c:88:00", - "type": "public" - }] - }, - "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, - "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, - "tags": None, -} - -DO_META['public_keys'] = DO_SINGLE_KEY - -MD_URL = 'http://169.254.169.254/metadata/v1.json' - - -def _mock_dmi(): - return (True, DO_META.get('id')) - - -class TestDataSourceDigitalOcean(CiTestCase): - """ - Test reading the meta-data - """ - def setUp(self): - super(TestDataSourceDigitalOcean, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): - ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds.use_ip4LL = False - if get_sysinfo is not None: - ds._get_sysinfo = get_sysinfo - return ds - - @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') - def test_returns_false_not_on_docean(self, m_read_sysinfo): - m_read_sysinfo.return_value = (False, None) - ds = self.get_ds(get_sysinfo=None) - self.assertEqual(False, ds.get_data()) - self.assertTrue(m_read_sysinfo.called) - - @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') - def test_metadata(self, mock_readmd): - mock_readmd.return_value = DO_META.copy() - - ds = self.get_ds() - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) - self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) - self.assertEqual(DO_META.get('region'), ds.availability_zone) - self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) - self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) - - # Single key - self.assertEqual([DO_META.get('public_keys')], - ds.get_public_ssh_keys()) - - self.assertIsInstance(ds.get_public_ssh_keys(), list) - - @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') - def test_multiple_ssh_keys(self, mock_readmd): - metadata = DO_META.copy() - metadata['public_keys'] = DO_MULTIPLE_KEYS - mock_readmd.return_value = metadata.copy() - - ds = self.get_ds() - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - # Multiple keys - self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) - self.assertIsInstance(ds.get_public_ssh_keys(), list) - - -class TestNetworkConvert(CiTestCase): - - def _get_networking(self): - self.m_get_by_mac.return_value = { - '04:01:57:d1:9e:01': 'ens1', - '04:01:57:d1:9e:02': 'ens2', - 'b8:ae:ed:75:5f:9a': 'enp0s25', - 'ae:cc:08:7c:88:00': 'meta2p1'} - netcfg = digitalocean.convert_network_configuration( - DO_META['interfaces'], DO_META['dns']['nameservers']) - self.assertIn('config', netcfg) - return netcfg - - def setUp(self): - super(TestNetworkConvert, self).setUp() - self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') - - def test_networking_defined(self): - netcfg = self._get_networking() - self.assertIsNotNone(netcfg) - dns_defined = False - - for part in netcfg.get('config'): - n_type = part.get('type') - print("testing part ", n_type, "\n", json.dumps(part, indent=3)) - - if n_type == 'nameserver': - n_address = part.get('address') - self.assertIsNotNone(n_address) - self.assertEqual(len(n_address), 3) - - dns_resolvers = DO_META["dns"]["nameservers"] - for x in n_address: - self.assertIn(x, dns_resolvers) - dns_defined = True - - else: - n_subnets = part.get('type') - n_name = part.get('name') - n_mac = part.get('mac_address') - - self.assertIsNotNone(n_type) - self.assertIsNotNone(n_subnets) - self.assertIsNotNone(n_name) - self.assertIsNotNone(n_mac) - - self.assertTrue(dns_defined) - - def _get_nic_definition(self, int_type, expected_name): - """helper function to return if_type (i.e. public) and the expected - name used by cloud-init (i.e eth0)""" - netcfg = self._get_networking() - meta_def = (DO_META.get('interfaces')).get(int_type)[0] - - self.assertEqual(int_type, meta_def.get('type')) - - for nic_def in netcfg.get('config'): - print(nic_def) - if nic_def.get('name') == expected_name: - return nic_def, meta_def - - def _get_match_subn(self, subnets, ip_addr): - """get the matching subnet definition based on ip address""" - for subn in subnets: - address = subn.get('address') - self.assertIsNotNone(address) - - # equals won't work because of ipv6 addressing being in - # cidr notation, i.e fe00::1/64 - if ip_addr in address: - print(json.dumps(subn, indent=3)) - return subn - - def test_correct_gateways_defined(self): - """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" - netcfg = self._get_networking() - gateways = [] - for nic_def in netcfg.get('config'): - if nic_def.get('type') != 'physical': - continue - for subn in nic_def.get('subnets'): - if 'gateway' in subn: - gateways.append(subn.get('gateway')) - - # we should have two gateways, one ipv4 and ipv6 - self.assertEqual(len(gateways), 2) - - # make that the ipv6 gateway is there - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('ipv4') - self.assertIn(ipv4_def.get('gateway'), gateways) - - # make sure the the ipv6 gateway is there - ipv6_def = meta_def.get('ipv6') - self.assertIn(ipv6_def.get('gateway'), gateways) - - def test_public_interface_defined(self): - """test that the public interface is defined as eth0""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - self.assertEqual('eth0', nic_def.get('name')) - self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) - self.assertEqual('physical', nic_def.get('type')) - - def test_private_interface_defined(self): - """test that the private interface is defined as eth1""" - (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') - self.assertEqual('eth1', nic_def.get('name')) - self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) - self.assertEqual('physical', nic_def.get('type')) - - def test_public_interface_ipv6(self): - """test public ipv6 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv6_def = meta_def.get('ipv6') - self.assertIsNotNone(ipv6_def) - - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv6_def.get('ip_address')) - - cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), - ipv6_def.get('cidr')) - - self.assertEqual(cidr_notated_address, subn_def.get('address')) - self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) - - def test_public_interface_ipv4(self): - """test public ipv4 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('ipv4') - self.assertIsNotNone(ipv4_def) - - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv4_def.get('ip_address')) - - self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) - self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) - - def test_public_interface_anchor_ipv4(self): - """test public ipv4 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('anchor_ipv4') - self.assertIsNotNone(ipv4_def) - - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv4_def.get('ip_address')) - - self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) - self.assertNotIn('gateway', subn_def) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_convert_without_private(self, m_get_by_mac): - m_get_by_mac.return_value = { - 'b8:ae:ed:75:5f:9a': 'enp0s25', - 'ae:cc:08:7c:88:00': 'meta2p1'} - netcfg = digitalocean.convert_network_configuration( - DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) - - # print(netcfg) - byname = {} - for i in netcfg['config']: - if 'name' in i: - if i['name'] in byname: - raise ValueError("name '%s' in config twice: %s" % - (i['name'], netcfg)) - byname[i['name']] = i - self.assertTrue('eth0' in byname) - self.assertTrue('subnets' in byname['eth0']) - eth0 = byname['eth0'] - self.assertEqual( - sorted(['45.55.249.133', '10.17.0.5']), - sorted([i['address'] for i in eth0['subnets']])) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py deleted file mode 100644 index a93f2195..00000000 --- a/tests/unittests/test_datasource/test_ec2.py +++ /dev/null @@ -1,978 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import httpretty -import json -import requests -from unittest import mock - -from cloudinit import helpers -from cloudinit.sources import DataSourceEc2 as ec2 -from cloudinit.tests import helpers as test_helpers - - -DYNAMIC_METADATA = { - "instance-identity": { - "document": json.dumps({ - "devpayProductCodes": None, - "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], - "availabilityZone": "us-west-2b", - "privateIp": "10.158.112.84", - "version": "2017-09-30", - "instanceId": "my-identity-id", - "billingProducts": None, - "instanceType": "t2.micro", - "accountId": "123456789012", - "imageId": "ami-5fb8c835", - "pendingTime": "2016-11-19T16:32:11Z", - "architecture": "x86_64", - "kernelId": None, - "ramdiskId": None, - "region": "us-west-2" - }) - } -} - - -# collected from api version 2016-09-02/ with -# python3 -c 'import json -# from cloudinit.ec2_utils import get_instance_metadata as gm -# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' -# Note that the MAC addresses have been modified to sort in the opposite order -# to the device-number attribute, to test LP: #1876312 -DEFAULT_METADATA = { - "ami-id": "ami-8b92b4ee", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "hostname": "ip-172-31-31-158.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-0a33f80f09c96477f", - "instance-type": "t2.small", - "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", - "local-ipv4": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "metrics": {"vhostmd": ""}, - "network": { - "interfaces": { - "macs": { - "06:17:04:d7:26:09": { - "device-number": "0", - "interface-id": "eni-e44ef49e", - "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, - "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", - "local-hostname": ("ip-172-3-3-15.us-east-2." - "compute.internal"), - "local-ipv4s": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "owner-id": "950047163771", - "public-hostname": ("ec2-13-59-77-202.us-east-2." - "compute.amazonaws.com"), - "public-ipv4s": "13.59.77.202", - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56" - }, - "06:17:04:d7:26:08": { - "device-number": "1", # Only IPv4 local config - "interface-id": "eni-e44ef49f", - "ipv4-associations": {"": "172.3.3.16"}, - "ipv6s": "", # No IPv6 config - "local-hostname": ("ip-172-3-3-16.us-east-2." - "compute.internal"), - "local-ipv4s": "172.3.3.16", - "mac": "06:17:04:d7:26:08", - "owner-id": "950047163771", - "public-hostname": ("ec2-172-3-3-16.us-east-2." - "compute.amazonaws.com"), - "public-ipv4s": "", # No public ipv4 config - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "" - } - } - } - }, - "placement": {"availability-zone": "us-east-2b"}, - "profile": "default-hvm", - "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", - "public-ipv4": "13.59.77.202", - "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, - "reservation-id": "r-01efbc9996bac1bd6", - "security-groups": "my-wide-open", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -# collected from api version 2018-09-24/ with -# python3 -c 'import json -# from cloudinit.ec2_utils import get_instance_metadata as gm -# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' - -NIC1_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "0", - "interface-id": "eni-0d6335689899ce9cc", - "ipv4-associations": { - "18.218.219.181": "172.31.44.13" - }, - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - "2600:1f16:292:100:f152:2222:3333:4444" - ], - "local-hostname": ("ip-172-31-44-13.us-east-2." - "compute.internal"), - "local-ipv4s": [ - "172.31.44.13", - "172.31.45.70" - ], - "mac": "0a:07:84:3d:6e:38", - "owner-id": "329910648901", - "public-hostname": ("ec2-18-218-219-181.us-east-2." - "compute.amazonaws.com"), - "public-ipv4s": "18.218.219.181", - "security-group-ids": "sg-0c387755222ba8d2e", - "security-groups": "launch-wizard-4", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56" -} - -NIC2_MD = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16" -} - -SECONDARY_IP_METADATA_2018_09_24 = { - "ami-id": "ami-0986c2ac728528ac2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": { - "ami": "/dev/sda1", - "root": "/dev/sda1" - }, - "events": { - "maintenance": { - "history": "[]", - "scheduled": "[]" - } - }, - "hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "identity-credentials": { - "ec2": { - "info": { - "AccountId": "329910648901", - "Code": "Success", - "LastUpdated": "2019-07-06T14:22:56Z" - } - } - }, - "instance-action": "none", - "instance-id": "i-069e01e8cc43732f8", - "instance-type": "t2.micro", - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4": "172.31.44.13", - "mac": "0a:07:84:3d:6e:38", - "metrics": { - "vhostmd": "" - }, - "network": { - "interfaces": { - "macs": { - "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, - } - } - }, - "placement": { - "availability-zone": "us-east-2c" - }, - "profile": "default-hvm", - "public-hostname": ( - "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"), - "public-ipv4": "18.218.219.181", - "public-keys": { - "yourkeyname,e": [ - "ssh-rsa AAAAW...DZ yourkeyname" - ] - }, - "reservation-id": "r-09b4917135cdd33be", - "security-groups": "launch-wizard-4", - "services": { - "domain": "amazonaws.com", - "partition": "aws" - } -} - -M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.' - - -def _register_ssh_keys(rfunc, base_url, keys_data): - """handle ssh key inconsistencies. - - public-keys in the ec2 metadata is inconsistently formated compared - to other entries. - Given keys_data of {name1: pubkey1, name2: pubkey2} - - This registers the following urls: - base_url 0={name1}\n1={name2} # (for each name) - base_url/ 0={name1}\n1={name2} # (for each name) - base_url/0 openssh-key - base_url/0/ openssh-key - base_url/0/openssh-key {pubkey1} - base_url/0/openssh-key/ {pubkey1} - ... - """ - - base_url = base_url.rstrip("/") - odd_index = '\n'.join( - ["{0}={1}".format(n, name) - for n, name in enumerate(sorted(keys_data))]) - - rfunc(base_url, odd_index) - rfunc(base_url + "/", odd_index) - - for n, name in enumerate(sorted(keys_data)): - val = keys_data[name] - if isinstance(val, list): - val = '\n'.join(val) - burl = base_url + "/%s" % n - rfunc(burl, "openssh-key") - rfunc(burl + "/", "openssh-key") - rfunc(burl + "/%s/openssh-key" % name, val) - rfunc(burl + "/%s/openssh-key/" % name, val) - - -def register_mock_metaserver(base_url, data): - """Register with httpretty a ec2 metadata like service serving 'data'. - - If given a dictionary, it will populate urls under base_url for - that dictionary. For example, input of - {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} - populates - base_url with 'instance-id\nmac' - base_url/ with 'instance-id\nmac' - base_url/instance-id with i-abc - base_url/mac with 00:16:3e:00:00:00 - In the index, references to lists or dictionaries have a trailing /. - """ - def register_helper(register, base_url, body): - if not isinstance(base_url, str): - register(base_url, body) - return - base_url = base_url.rstrip("/") - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url, '\n'.join(body) + '\n') - register(base_url + '/', '\n'.join(body) + '\n') - elif isinstance(body, dict): - vals = [] - for k, v in body.items(): - if k == 'public-keys': - _register_ssh_keys( - register, base_url + '/public-keys/', v) - continue - suffix = k.rstrip("/") - if not isinstance(v, (str, list)): - suffix += "/" - vals.append(suffix) - url = base_url + '/' + suffix - register_helper(register, url, v) - register(base_url, '\n'.join(vals) + '\n') - register(base_url + '/', '\n'.join(vals) + '\n') - elif body is None: - register(base_url, 'not found', status=404) - - def myreg(*argc, **kwargs): - url = argc[0] - method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET - return httpretty.register_uri(method, *argc, **kwargs) - - register_helper(myreg, base_url, data) - - -class TestEc2(test_helpers.HttprettyTestCase): - with_logs = True - maxDiff = None - - valid_platform_data = { - 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', - 'uuid_source': 'dmi', - 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', - } - - def setUp(self): - super(TestEc2, self).setUp() - self.datasource = ec2.DataSourceEc2 - self.metadata_addr = self.datasource.metadata_urls[0] - self.tmp = self.tmp_dir() - - def data_url(self, version, data_item='meta-data'): - """Return a metadata url based on the version provided.""" - return '/'.join([self.metadata_addr, version, data_item]) - - def _patch_add_cleanup(self, mpath, *args, **kwargs): - p = mock.patch(mpath, *args, **kwargs) - p.start() - self.addCleanup(p.stop) - - def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): - self.uris = [] - distro = {} - paths = helpers.Paths({'run_dir': self.tmp}) - if sys_cfg is None: - sys_cfg = {} - ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) - if not md_version: - md_version = ds.min_metadata_version - if platform_data is not None: - self._patch_add_cleanup( - "cloudinit.sources.DataSourceEc2._collect_platform_data", - return_value=platform_data) - - if md: - all_versions = ( - [ds.min_metadata_version] + ds.extended_metadata_versions) - token_url = self.data_url('latest', data_item='api/token') - register_mock_metaserver(token_url, 'API-TOKEN') - for version in all_versions: - metadata_url = self.data_url(version) + '/' - if version == md_version: - # Register all metadata for desired version - register_mock_metaserver( - metadata_url, md.get('md', DEFAULT_METADATA)) - userdata_url = self.data_url( - version, data_item='user-data') - register_mock_metaserver(userdata_url, md.get('ud', '')) - identity_url = self.data_url( - version, data_item='dynamic/instance-identity') - register_mock_metaserver( - identity_url, md.get('id', DYNAMIC_METADATA)) - else: - instance_id_url = metadata_url + 'instance-id' - if version == ds.min_metadata_version: - # Add min_metadata_version service availability check - register_mock_metaserver( - instance_id_url, DEFAULT_METADATA['instance-id']) - else: - # Register 404s for all unrequested extended versions - register_mock_metaserver(instance_id_url, None) - return ds - - def test_network_config_property_returns_version_2_network_data(self): - """network_config property returns network version 2 for metadata""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' - ds.get_data() - - mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_set_dhcp4(self): - """network_config property configures dhcp4 on nics with local-ipv4s. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' - ds.get_data() - - mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_secondary_private_ips(self): - """network_config property configures any secondary ipv4 addresses. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': SECONDARY_IP_METADATA_2018_09_24}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' - ds.get_data() - - mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6 - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1}, 'set-name': 'eth9', - 'addresses': ['172.31.45.70/20', - '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - 'dhcp4': True, 'dhcp6': True}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_is_cached_in_datasource(self): - """network_config property is cached in DataSourceEc2.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - ds._network_config = {'cached': 'data'} - self.assertEqual({'cached': 'data'}, ds.network_config) - - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): - """Refresh the network_config Ec2 cache if network key is absent. - - This catches an upgrade issue where obj.pkl contained stale metadata - which lacked newly required network key. - """ - old_metadata = copy.deepcopy(DEFAULT_METADATA) - old_metadata.pop('network') - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': old_metadata}) - self.assertTrue(ds.get_data()) - # Provide new revision of metadata that contains network data - register_mock_metaserver( - 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA) - mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA - get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac' - ds.fallback_nic = 'eth9' - with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - nc = ds.network_config # Will re-crawl network metadata - self.assertIsNotNone(nc) - self.assertIn( - 'Refreshing stale metadata from prior to upgrade', - self.logs.getvalue()) - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual(expected, ds.network_config) - - def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): - """get_instance-id gets DataSourceEc2Local.identity if not present. - - This handles an upgrade case where the old pickled datasource didn't - set up self.identity, but 'systemctl cloud-init init' runs - get_instance_id which traces on missing self.identity. lp:1748354. - """ - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - # Mock 404s on all versions except latest - all_versions = ( - [ds.min_metadata_version] + ds.extended_metadata_versions) - for ver in all_versions[:-1]: - register_mock_metaserver( - 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver), - None) - ds.metadata_address = 'http://169.254.169.254' - register_mock_metaserver( - '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]), - DEFAULT_METADATA) - # Register dynamic/instance-identity document which we now read. - register_mock_metaserver( - '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), - DYNAMIC_METADATA) - ds._cloud_name = ec2.CloudNames.AWS - # Setup cached metadata on the Datasource - ds.metadata = DEFAULT_METADATA - self.assertEqual('my-identity-id', ds.get_instance_id()) - - def test_classic_instance_true(self): - """If no vpc-id in metadata, is_classic_instance must return true.""" - md_copy = copy.deepcopy(DEFAULT_METADATA) - ifaces_md = md_copy.get('network', {}).get('interfaces', {}) - for _mac, mac_data in ifaces_md.get('macs', {}).items(): - if 'vpc-id' in mac_data: - del mac_data['vpc-id'] - - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': md_copy}) - self.assertTrue(ds.get_data()) - self.assertTrue(ds.is_classic_instance()) - - def test_classic_instance_false(self): - """If vpc-id in metadata, is_classic_instance must return false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - self.assertTrue(ds.get_data()) - self.assertFalse(ds.is_classic_instance()) - - def test_aws_inaccessible_imds_service_fails_with_retries(self): - """Inaccessibility of http://169.254.169.254 are retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=None) - - conn_error = requests.exceptions.ConnectionError( - '[Errno 113] no route to host' - ) - - mock_success = mock.MagicMock(contents=b'fakesuccess') - mock_success.ok.return_value = True - - with mock.patch('cloudinit.url_helper.readurl') as m_readurl: - m_readurl.side_effect = (conn_error, conn_error, mock_success) - with mock.patch('cloudinit.url_helper.time.sleep'): - self.assertTrue(ds.wait_for_metadata_service()) - - # Just one /latest/api/token request - self.assertEqual(3, len(m_readurl.call_args_list)) - for readurl_call in m_readurl.call_args_list: - self.assertIn('latest/api/token', readurl_call[0][0]) - - def test_aws_token_403_fails_without_retries(self): - """Verify that 403s fetching AWS tokens are not retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=None) - token_url = self.data_url('latest', data_item='api/token') - httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403) - self.assertFalse(ds.get_data()) - # Just one /latest/api/token request - logs = self.logs.getvalue() - failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0' - expected_logs = [ - 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is' - ' disabled. Aborting.', - "WARNING: IMDS's HTTP endpoint is probably disabled", - failed_put_log - ] - for log in expected_logs: - self.assertIn(log, logs) - self.assertEqual( - 1, - len([line for line in logs.splitlines() if failed_put_log in line]) - ) - - def test_aws_token_redacted(self): - """Verify that aws tokens are redacted when logged.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() - REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" - REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" - logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] - logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(81, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) - - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_valid_platform_with_strict_true(self, m_dhcp): - """Valid platform data should return true with strict_id true.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual('aws', ds.cloud_name) - self.assertEqual('ec2', ds.platform_type) - self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform) - - def test_valid_platform_with_strict_false(self): - """Valid platform data should return true with strict_id false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertTrue(ret) - - def test_unknown_platform_with_strict_true(self): - """Unknown platform data with strict_id true should return False.""" - uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' - ds = self._setup_ds( - platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertFalse(ret) - - def test_unknown_platform_with_strict_false(self): - """Unknown platform data with strict_id false should return True.""" - uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' - ds = self._setup_ds( - platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertTrue(ret) - - def test_ec2_local_returns_false_on_non_aws(self): - """DataSourceEc2Local returns False when platform is not AWS.""" - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - platform_attrs = [ - attr for attr in ec2.CloudNames.__dict__.keys() - if not attr.startswith('__')] - for attr_name in platform_attrs: - platform_name = getattr(ec2.CloudNames, attr_name) - if platform_name != 'aws': - ds._cloud_name = platform_name - ret = ds.get_data() - self.assertEqual('ec2', ds.platform_type) - self.assertFalse(ret) - message = ( - "Local Ec2 mode only supported on ('aws',)," - ' not {0}'.format(platform_name)) - self.assertIn(message, self.logs.getvalue()) - - @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') - def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): - """DataSourceEc2Local returns False on BSD. - - FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. - """ - m_is_freebsd.return_value = True - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertFalse(ret) - self.assertIn( - "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue()) - - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') - def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp, - m_fallback_nic, m_net): - """Ec2Local returns True for valid platform data on non-BSD with dhcp. - - DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. - Then the metadata services is crawled for more network config info. - When the platform data is valid, return True. - """ - - m_fallback_nic.return_value = 'eth9' - m_is_bsd.return_value = False - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'broadcast-address': '192.168.2.255'}] - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - - ret = ds.get_data() - self.assertTrue(ret) - m_dhcp.assert_called_once_with('eth9', None) - m_net.assert_called_once_with( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) - self.assertIn('Crawl of metadata service took', self.logs.getvalue()) - - -class TestGetSecondaryAddresses(test_helpers.CiTestCase): - - mac = '06:17:04:d7:26:ff' - with_logs = True - - def test_md_with_no_secondary_addresses(self): - """Empty list is returned when nic metadata contains no secondary ip""" - self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) - - def test_md_with_secondary_v4_and_v6_addresses(self): - """All secondary addresses are returned from nic metadata""" - self.assertEqual( - ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac)) - - def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): - """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" - invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) - invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected" - invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is" - self.assertEqual( - ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac)) - expected_logs = [ - "WARNING: Could not parse subnet-ipv4-cidr-block" - " something-unexpected for mac 06:17:04:d7:26:ff." - " ipv4 network config prefix defaults to /24", - "WARNING: Could not parse subnet-ipv6-cidr-block" - " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128" - ] - logs = self.logs.getvalue() - for log in expected_logs: - self.assertIn(log, logs) - - -class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - - def setUp(self): - super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = '06:17:04:d7:26:09' - interface_dict = copy.deepcopy( - DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1]) - # These tests are written assuming the base interface doesn't have IPv6 - interface_dict.pop('ipv6s') - self.network_metadata = { - 'interfaces': {'macs': {self.mac1: interface_dict}}} - - def test_convert_ec2_metadata_network_config_skips_absent_macs(self): - """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'} - - # DE:AD:BE:EF:FF:FF represented by OS but not in metadata - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): - """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): - """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['local-ipv4s'] = '172.3.3.15' - nic1_metadata.pop('public-ipv4s') - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): - """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['public-ipv4s'] = '' - - # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics, fallback_nic='eth9')) - - def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): - """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') - nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6.""" - mac2 = '06:17:04:d7:26:08' - macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg - nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc - expected = {'version': 2, 'ethernets': { - 'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}}, - 'eth10': { - 'match': {'macaddress': mac2}, 'set-name': 'eth10', - 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}, - 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): - """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) - - def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): - """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, - 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config(self.network_metadata)) - - -class TesIdentifyPlatform(test_helpers.CiTestCase): - - def collmock(self, **kwargs): - """return non-special _collect_platform_data updated with changes.""" - unspecial = { - 'asset_tag': '3857-0037-2746-7462-1818-3997-77', - 'serial': 'H23-C4J3JV-R6', - 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', - 'uuid_source': 'dmi', - 'vendor': 'tothecloud', - } - unspecial.update(**kwargs) - return unspecial - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_zstack(self, m_collect): - """zstack should be identified if chassis-asset-tag ends in .zstack.io - """ - m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') - self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_zstack_full_domain_only(self, m_collect): - """zstack asset-tag matching should match only on full domain boundary. - """ - m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_e24cloud(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor='e24cloud') - self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_e24cloud_negative(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor='e24cloudyday') - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py deleted file mode 100644 index f0061199..00000000 --- a/tests/unittests/test_datasource/test_exoscale.py +++ /dev/null @@ -1,211 +0,0 @@ -# Author: Mathieu Corbin -# Author: Christopher Glass -# -# This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import helpers -from cloudinit.sources.DataSourceExoscale import ( - API_VERSION, - DataSourceExoscale, - METADATA_URL, - get_password, - PASSWORD_SERVER_PORT, - read_metadata) -from cloudinit.tests.helpers import HttprettyTestCase, mock -from cloudinit import util - -import httpretty -import os -import requests - - -TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, - PASSWORD_SERVER_PORT, - API_VERSION) - -TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, - API_VERSION) - -TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, - API_VERSION) - - -@httpretty.activate -class TestDatasourceExoscale(HttprettyTestCase): - - def setUp(self): - super(TestDatasourceExoscale, self).setUp() - self.tmp = self.tmp_dir() - self.password_url = TEST_PASSWORD_URL - self.metadata_url = TEST_METADATA_URL - self.userdata_url = TEST_USERDATA_URL - - def test_password_saved(self): - """The password is not set when it is not found - in the metadata service.""" - httpretty.register_uri(httpretty.GET, - self.password_url, - body="saved_password") - self.assertFalse(get_password()) - - def test_password_empty(self): - """No password is set if the metadata service returns - an empty string.""" - httpretty.register_uri(httpretty.GET, - self.password_url, - body="") - self.assertFalse(get_password()) - - def test_password(self): - """The password is set to what is found in the metadata - service.""" - expected_password = "p@ssw0rd" - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_password) - password = get_password() - self.assertEqual(expected_password, password) - - def test_activate_removes_set_passwords_semaphore(self): - """Allow set_passwords to run every boot by removing the semaphore.""" - path = helpers.Paths({'cloud_dir': self.tmp}) - sem_dir = self.tmp_path('instance/sem', dir=self.tmp) - util.ensure_dir(sem_dir) - sem_file = os.path.join(sem_dir, 'config_set_passwords') - with open(sem_file, 'w') as stream: - stream.write('') - ds = DataSourceExoscale({}, None, path) - ds.activate(None, None) - self.assertFalse(os.path.exists(sem_file)) - - def test_get_data(self): - """The datasource conforms to expected behavior when supplied - full test data.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: True - expected_password = "p@ssw0rd" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_password) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - self.assertTrue(ds._get_data()) - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'ssh_pwauth': True, - 'password': expected_password, - 'chpasswd': { - 'expire': False, - }}) - - def test_get_data_saved_password(self): - """The datasource conforms to expected behavior when saved_password is - returned by the password server.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: True - expected_answer = "saved_password" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_answer) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - self.assertTrue(ds._get_data()) - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), {}) - - def test_get_data_no_password(self): - """The datasource conforms to expected behavior when no password is - returned by the password server.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: True - expected_answer = "" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_answer) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - self.assertTrue(ds._get_data()) - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), {}) - - @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') - def test_read_metadata_when_password_server_unreachable(self, m_password): - """The read_metadata function returns partial results in case the - password server (only) is unreachable.""" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - - m_password.side_effect = requests.Timeout('Fake Connection Timeout') - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - - result = read_metadata() - - self.assertIsNone(result.get("password")) - self.assertEqual(result.get("user-data").decode("utf-8"), - expected_userdata) - - def test_non_viable_platform(self): - """The datasource fails fast when the platform is not viable.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: False - self.assertFalse(ds._get_data()) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py deleted file mode 100644 index 1d91b301..00000000 --- a/tests/unittests/test_datasource/test_gce.py +++ /dev/null @@ -1,388 +0,0 @@ -# Copyright (C) 2014 Vaidas Jablonskis -# -# Author: Vaidas Jablonskis -# -# This file is part of cloud-init. See LICENSE file for license information. - -import datetime -import httpretty -import json -import re -from unittest import mock -from urllib.parse import urlparse - -from base64 import b64encode, b64decode - -from cloudinit import distros -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import DataSourceGCE - -from cloudinit.tests import helpers as test_helpers - - -GCE_META = { - 'instance/id': '123', - 'instance/zone': 'foo/bar', - 'instance/hostname': 'server.project-foo.local', -} - -GCE_META_PARTIAL = { - 'instance/id': '1234', - 'instance/hostname': 'server.project-bar.local', - 'instance/zone': 'bar/baz', -} - -GCE_META_ENCODING = { - 'instance/id': '12345', - 'instance/hostname': 'server.project-baz.local', - 'instance/zone': 'baz/bang', - 'instance/attributes': { - 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'), - 'user-data-encoding': 'base64', - } -} - -GCE_USER_DATA_TEXT = { - 'instance/id': '12345', - 'instance/hostname': 'server.project-baz.local', - 'instance/zone': 'baz/bang', - 'instance/attributes': { - 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n', - } -} - -HEADERS = {'Metadata-Flavor': 'Google'} -MD_URL_RE = re.compile( - r'http://metadata.google.internal/computeMetadata/v1/.*') -GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' - 'v1/instance/guest-attributes/hostkeys/') - - -def _set_mock_metadata(gce_meta=None): - if gce_meta is None: - gce_meta = GCE_META - - def _request_callback(method, uri, headers): - url_path = urlparse(uri).path - if url_path.startswith('/computeMetadata/v1/'): - path = url_path.split('/computeMetadata/v1/')[1:][0] - recursive = path.endswith('/') - path = path.rstrip('/') - else: - path = None - if path in gce_meta: - response = gce_meta.get(path) - if recursive: - response = json.dumps(response) - return (200, headers, response) - else: - return (404, headers, '') - - # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 - httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) - - -@httpretty.activate -class TestDataSourceGCE(test_helpers.HttprettyTestCase): - - def _make_distro(self, dtype, def_user=None): - cfg = dict(settings.CFG_BUILTIN) - cfg['system_info']['distro'] = dtype - paths = helpers.Paths(cfg['system_info']['paths']) - distro_cls = distros.fetch(dtype) - if def_user: - cfg['system_info']['default_user'] = def_user.copy() - distro = distro_cls(dtype, cfg['system_info'], paths) - return distro - - def setUp(self): - tmp = self.tmp_dir() - self.ds = DataSourceGCE.DataSourceGCE( - settings.CFG_BUILTIN, None, - helpers.Paths({'run_dir': tmp})) - ppatch = self.m_platform_reports_gce = mock.patch( - 'cloudinit.sources.DataSourceGCE.platform_reports_gce') - self.m_platform_reports_gce = ppatch.start() - self.m_platform_reports_gce.return_value = True - self.addCleanup(ppatch.stop) - self.add_patch('time.sleep', 'm_sleep') # just to speed up tests - super(TestDataSourceGCE, self).setUp() - - def test_connection(self): - _set_mock_metadata() - success = self.ds.get_data() - self.assertTrue(success) - - req_header = httpretty.last_request().headers - for header_name, expected_value in HEADERS.items(): - self.assertEqual(expected_value, req_header.get(header_name)) - - def test_metadata(self): - # UnicodeDecodeError if set to ds.userdata instead of userdata_raw - meta = GCE_META.copy() - meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' - - _set_mock_metadata() - self.ds.get_data() - - shostname = GCE_META.get('instance/hostname').split('.')[0] - self.assertEqual(shostname, - self.ds.get_hostname()) - - self.assertEqual(GCE_META.get('instance/id'), - self.ds.get_instance_id()) - - self.assertEqual(GCE_META.get('instance/attributes/user-data'), - self.ds.get_userdata_raw()) - - # test partial metadata (missing user-data in particular) - def test_metadata_partial(self): - _set_mock_metadata(GCE_META_PARTIAL) - self.ds.get_data() - - self.assertEqual(GCE_META_PARTIAL.get('instance/id'), - self.ds.get_instance_id()) - - shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0] - self.assertEqual(shostname, self.ds.get_hostname()) - - def test_userdata_no_encoding(self): - """check that user-data is read.""" - _set_mock_metadata(GCE_USER_DATA_TEXT) - self.ds.get_data() - self.assertEqual( - GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(), - self.ds.get_userdata_raw()) - - def test_metadata_encoding(self): - """user-data is base64 encoded if user-data-encoding is 'base64'.""" - _set_mock_metadata(GCE_META_ENCODING) - self.ds.get_data() - - instance_data = GCE_META_ENCODING.get('instance/attributes') - decoded = b64decode(instance_data.get('user-data')) - self.assertEqual(decoded, self.ds.get_userdata_raw()) - - def test_missing_required_keys_return_false(self): - for required_key in ['instance/id', 'instance/zone', - 'instance/hostname']: - meta = GCE_META_PARTIAL.copy() - del meta[required_key] - _set_mock_metadata(meta) - self.assertEqual(False, self.ds.get_data()) - httpretty.reset() - - def test_no_ssh_keys_metadata(self): - _set_mock_metadata() - self.ds.get_data() - self.assertEqual([], self.ds.get_public_ssh_keys()) - - def test_cloudinit_ssh_keys(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(0)), - 'user:{0}'.format(invalid_key.format(0)), - ]), - 'ssh-keys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(1)), - 'user:{0}'.format(invalid_key.format(1)), - ]), - } - instance_attributes = { - 'ssh-keys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(2)), - 'user:{0}'.format(invalid_key.format(2)), - ]), - 'block-project-ssh-keys': 'False', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - self.ds.get_data() - - expected = [valid_key.format(key) for key in range(3)] - self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) - - @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") - def test_default_user_ssh_keys(self, mock_ug_util): - mock_ug_util.normalize_users_groups.return_value = None, None - mock_ug_util.extract_default.return_value = 'ubuntu', None - ubuntu_ds = DataSourceGCE.DataSourceGCE( - settings.CFG_BUILTIN, self._make_distro('ubuntu'), - helpers.Paths({'run_dir': self.tmp_dir()})) - - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(0)), - 'user:{0}'.format(invalid_key.format(0)), - ]), - 'ssh-keys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(1)), - 'user:{0}'.format(invalid_key.format(1)), - ]), - } - instance_attributes = { - 'ssh-keys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(2)), - 'user:{0}'.format(invalid_key.format(2)), - ]), - 'block-project-ssh-keys': 'False', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - ubuntu_ds.get_data() - - expected = [valid_key.format(key) for key in range(3)] - self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) - - def test_instance_ssh_keys_override(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), - } - instance_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), - 'block-project-ssh-keys': 'False', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - self.ds.get_data() - - expected = [valid_key.format(key) for key in range(2)] - self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) - - def test_block_project_ssh_keys_override(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), - } - instance_attributes = { - 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), - 'block-project-ssh-keys': 'True', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - self.ds.get_data() - - expected = [valid_key.format(0)] - self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) - - def test_only_last_part_of_zone_used_for_availability_zone(self): - _set_mock_metadata() - r = self.ds.get_data() - self.assertEqual(True, r) - self.assertEqual('bar', self.ds.availability_zone) - - @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher") - def test_get_data_returns_false_if_not_on_gce(self, m_fetcher): - self.m_platform_reports_gce.return_value = False - ret = self.ds.get_data() - self.assertEqual(False, ret) - m_fetcher.assert_not_called() - - def test_has_expired(self): - - def _get_timestamp(days): - format_str = '%Y-%m-%dT%H:%M:%S+0000' - today = datetime.datetime.now() - timestamp = today + datetime.timedelta(days=days) - return timestamp.strftime(format_str) - - past = _get_timestamp(-1) - future = _get_timestamp(1) - ssh_keys = { - None: False, - '': False, - 'Invalid': False, - 'user:ssh-rsa key user@domain.com': False, - 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, - 'user:ssh-rsa key google-ssh': False, - 'user:ssh-rsa key google-ssh {invalid:json}': False, - 'user:ssh-rsa key google-ssh {"userName":"user"}': False, - 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, - 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, - 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True, - } - - for key, expired in ssh_keys.items(): - self.assertEqual(DataSourceGCE._has_expired(key), expired) - - def test_parse_public_keys_non_ascii(self): - public_key_data = [ - 'cloudinit:rsa ssh-ke%s invalid' % chr(165), - 'use%sname:rsa ssh-key' % chr(174), - 'cloudinit:test 1', - 'default:test 2', - 'user:test 3', - ] - expected = ['test 1', 'test 2'] - found = DataSourceGCE._parse_public_keys( - public_key_data, default_user='default') - self.assertEqual(sorted(found), sorted(expected)) - - @mock.patch("cloudinit.url_helper.readurl") - def test_publish_host_keys(self, m_readurl): - hostkeys = [('ssh-rsa', 'asdfasdf'), - ('ssh-ed25519', 'qwerqwer')] - readurl_expected_calls = [ - mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, - request_method='PUT', - url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), - mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, - request_method='PUT', - url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), - ] - self.ds.publish_host_keys(hostkeys) - m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) - - @mock.patch( - "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", - autospec=True, - ) - @mock.patch( - "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface" - ) - def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp): - _set_mock_metadata() - ds = DataSourceGCE.DataSourceGCELocal( - sys_cfg={}, distro=None, paths=None - ) - ds._get_data() - assert m_dhcp.call_count == 1 - - @mock.patch( - "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", - autospec=True, - ) - def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp): - _set_mock_metadata() - ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None) - ds._get_data() - assert m_dhcp.call_count == 0 - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py deleted file mode 100644 index eadb92f1..00000000 --- a/tests/unittests/test_datasource/test_hetzner.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (C) 2018 Jonas Keidel -# -# Author: Jonas Keidel -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.sources import DataSourceHetzner -import cloudinit.sources.helpers.hetzner as hc_helper -from cloudinit import util, settings, helpers - -from cloudinit.tests.helpers import mock, CiTestCase - -import base64 -import pytest - -METADATA = util.load_yaml(""" -hostname: cloudinit-test -instance-id: 123456 -local-ipv4: '' -network-config: - config: - - mac_address: 96:00:00:08:19:da - name: eth0 - subnets: - - dns_nameservers: - - 213.133.99.99 - - 213.133.100.100 - - 213.133.98.98 - ipv4: true - type: dhcp - type: physical - - name: eth0:0 - subnets: - - address: 2a01:4f8:beef:beef::1/64 - gateway: fe80::1 - ipv6: true - routes: - - gateway: fe80::1%eth0 - netmask: 0 - network: '::' - type: static - type: physical - version: 1 -network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\ - ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\ - IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\ - IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\ - DNS1=213.133.99.99\nDNS2=213.133.100.100\n" -public-ipv4: 192.168.0.1 -public-keys: -- ssh-ed25519 \ - AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \ - test-key@workstation -vendor_data: "test" -""") - -USERDATA = b"""#cloud-config -runcmd: -- [touch, /root/cloud-init-worked ] -""" - - -class TestDataSourceHetzner(CiTestCase): - """ - Test reading the meta-data - """ - def setUp(self): - super(TestDataSourceHetzner, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self): - ds = DataSourceHetzner.DataSourceHetzner( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - return ds - - @mock.patch('cloudinit.net.EphemeralIPv4Network') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') - @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') - @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') - def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd, - m_fallback_nic, m_net): - m_get_hcloud_data.return_value = (True, - str(METADATA.get('instance-id'))) - m_readmd.return_value = METADATA.copy() - m_usermd.return_value = USERDATA - m_fallback_nic.return_value = 'eth0' - - ds = self.get_ds() - ret = ds.get_data() - self.assertTrue(ret) - - m_net.assert_called_once_with( - 'eth0', '169.254.0.1', - 16, '169.254.255.255' - ) - - self.assertTrue(m_readmd.called) - - self.assertEqual(METADATA.get('hostname'), ds.get_hostname()) - - self.assertEqual(METADATA.get('public-keys'), - ds.get_public_ssh_keys()) - - self.assertIsInstance(ds.get_public_ssh_keys(), list) - self.assertEqual(ds.get_userdata_raw(), USERDATA) - self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) - - @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') - def test_not_on_hetzner_returns_false(self, m_get_hcloud_data, - m_find_fallback, m_read_md): - """If helper 'get_hcloud_data' returns False, - return False from get_data.""" - m_get_hcloud_data.return_value = (False, None) - ds = self.get_ds() - ret = ds.get_data() - - self.assertFalse(ret) - # These are a white box attempt to ensure it did not search. - m_find_fallback.assert_not_called() - m_read_md.assert_not_called() - - -class TestMaybeB64Decode: - """Test the maybe_b64decode helper function.""" - - @pytest.mark.parametrize("invalid_input", (str("not bytes"), int(4))) - def test_raises_error_on_non_bytes(self, invalid_input): - """maybe_b64decode should raise error if data is not bytes.""" - with pytest.raises(TypeError): - hc_helper.maybe_b64decode(invalid_input) - - @pytest.mark.parametrize("in_data,expected", [ - # If data is not b64 encoded, then return value should be the same. - (b"this is my data", b"this is my data"), - # If data is b64 encoded, then return value should be decoded. - (base64.b64encode(b"data"), b"data"), - ]) - def test_happy_path(self, in_data, expected): - assert expected == hc_helper.maybe_b64decode(in_data) diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py deleted file mode 100644 index 9013ae9f..00000000 --- a/tests/unittests/test_datasource/test_ibmcloud.py +++ /dev/null @@ -1,343 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.helpers import Paths -from cloudinit.sources import DataSourceIBMCloud as ibm -from cloudinit.tests import helpers as test_helpers -from cloudinit import util - -import base64 -import copy -import json -from textwrap import dedent - -mock = test_helpers.mock - -D_PATH = "cloudinit.sources.DataSourceIBMCloud." - - -@mock.patch(D_PATH + "_is_xen", return_value=True) -@mock.patch(D_PATH + "_is_ibm_provisioning") -@mock.patch(D_PATH + "util.blkid") -class TestGetIBMPlatform(test_helpers.CiTestCase): - """Test the get_ibm_platform helper.""" - - blkid_base = { - "/dev/xvda1": { - "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", - "TYPE": "ext3"}, - "/dev/xvda2": { - "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", - "TYPE": "ext4"}, - } - - blkid_metadata_disk = { - "/dev/xvdh1": { - "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", - "SEC_TYPE": "msdos", "UUID": "681B-8C5D", - "PARTUUID": "3d631e09-01"}, - } - - blkid_oscode_disk = { - "/dev/xvdh": { - "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", - "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} - } - - def setUp(self): - self.blkid_metadata = copy.deepcopy(self.blkid_base) - self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk)) - - self.blkid_oscode = copy.deepcopy(self.blkid_base) - self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk)) - - def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen): - """identify TEMPLATE_LIVE_METADATA.""" - m_blkid.return_value = self.blkid_metadata - m_is_prov.return_value = False - self.assertEqual( - (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), - ibm.get_ibm_platform()) - - def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): - """identify TEMPLATE_PROVISIONING_METADATA.""" - m_blkid.return_value = self.blkid_metadata - m_is_prov.return_value = True - self.assertEqual( - (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), - ibm.get_ibm_platform()) - - def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): - """identify TEMPLATE_PROVISIONING_NODATA.""" - m_blkid.return_value = self.blkid_base - m_is_prov.return_value = True - self.assertEqual( - (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), - ibm.get_ibm_platform()) - - def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): - """Identify OS_CODE.""" - m_blkid.return_value = self.blkid_oscode - m_is_prov.return_value = False - self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), - ibm.get_ibm_platform()) - - def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): - """Test against false positive on openstack with non-ibm UUID.""" - blkid = self.blkid_oscode - blkid["/dev/xvdh"]["UUID"] = "9999-9999" - m_blkid.return_value = blkid - m_is_prov.return_value = False - self.assertEqual((None, None), ibm.get_ibm_platform()) - - -@mock.patch(D_PATH + "_read_system_uuid", return_value=None) -@mock.patch(D_PATH + "get_ibm_platform") -class TestReadMD(test_helpers.CiTestCase): - """Test the read_datasource helper.""" - - template_md = { - "files": [], - "network_config": {"content_path": "/content/interfaces"}, - "hostname": "ci-fond-ram", - "name": "ci-fond-ram", - "domain": "testing.ci.cloud-init.org", - "meta": {"dsmode": "net"}, - "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", - "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"}, - } - - oscode_md = { - "hostname": "ci-grand-gannet.testing.ci.cloud-init.org", - "name": "ci-grand-gannet", - "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", - "random_seed": "bm90LXJhbmRvbQo=", - "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", - "configuration_token": "eyJhbGciOi..M3ZA", - "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, - } - - content_interfaces = dedent("""\ - auto lo - iface lo inet loopback - - auto eth0 - allow-hotplug eth0 - iface eth0 inet static - address 10.82.43.5 - netmask 255.255.255.192 - """) - - userdata = b"#!/bin/sh\necho hi mom\n" - # meta.js file gets json encoded userdata as a list. - meta_js = '["#!/bin/sh\necho hi mom\n"]' - vendor_data = { - "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} - - network_data = { - "links": [ - {"id": "interface_29402281", "name": "eth0", "mtu": None, - "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, - {"id": "interface_29402279", "name": "eth1", "mtu": None, - "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} - ], - "networks": [ - {"id": "network_109887563", "link": "interface_29402281", - "type": "ipv4", "ip_address": "10.82.43.2", - "netmask": "255.255.255.192", - "routes": [ - {"network": "10.0.0.0", "netmask": "255.0.0.0", - "gateway": "10.82.43.1"}, - {"network": "161.26.0.0", "netmask": "255.255.0.0", - "gateway": "10.82.43.1"}]}, - {"id": "network_109887551", "link": "interface_29402279", - "type": "ipv4", "ip_address": "108.168.194.252", - "netmask": "255.255.255.248", - "routes": [ - {"network": "0.0.0.0", "netmask": "0.0.0.0", - "gateway": "108.168.194.249"}]} - ], - "services": [ - {"type": "dns", "address": "10.0.80.11"}, - {"type": "dns", "address": "10.0.80.12"} - ], - } - - sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' - - def _get_expected_metadata(self, os_md): - """return expected 'metadata' for data loaded from meta_data.json.""" - os_md = copy.deepcopy(os_md) - renames = ( - ('hostname', 'local-hostname'), - ('uuid', 'instance-id'), - ('public_keys', 'public-keys')) - ret = {} - for osname, mdname in renames: - if osname in os_md: - ret[mdname] = os_md[osname] - if 'random_seed' in os_md: - ret['random_seed'] = base64.b64decode(os_md['random_seed']) - - return ret - - def test_provisioning_md(self, m_platform, m_sysuuid): - """Provisioning env with a metadata disk should return None.""" - m_platform.return_value = ( - ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") - self.assertIsNone(ibm.read_md()) - - def test_provisioning_no_metadata(self, m_platform, m_sysuuid): - """Provisioning env with no metadata disk should return None.""" - m_platform.return_value = ( - ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) - self.assertIsNone(ibm.read_md()) - - def test_provisioning_not_ibm(self, m_platform, m_sysuuid): - """Provisioning env but not identified as IBM should return None.""" - m_platform.return_value = (None, None) - self.assertIsNone(ibm.read_md()) - - def test_template_live(self, m_platform, m_sysuuid): - """Template live environment should be identified.""" - tmpdir = self.tmp_dir() - m_platform.return_value = ( - ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) - m_sysuuid.return_value = self.sysuuid - - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.template_md), - 'openstack/latest/user_data': self.userdata, - 'openstack/content/interfaces': self.content_interfaces, - 'meta.js': self.meta_js}) - - ret = ibm.read_md() - self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, - ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertEqual(self.userdata, ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.template_md), - ret['metadata']) - self.assertEqual(self.sysuuid, ret['system-uuid']) - - def test_os_code_live(self, m_platform, m_sysuuid): - """Verify an os_code metadata path.""" - tmpdir = self.tmp_dir() - m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) - netdata = json.dumps(self.network_data) - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), - 'openstack/latest/user_data': self.userdata, - 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), - 'openstack/latest/network_data.json': netdata, - }) - - ret = ibm.read_md() - self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertEqual(self.userdata, ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.oscode_md), - ret['metadata']) - - def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): - """Verify os_code without user-data.""" - tmpdir = self.tmp_dir() - m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), - 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), - }) - - ret = ibm.read_md() - self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertIsNone(ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.oscode_md), - ret['metadata']) - - -class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): - """Test the _is_ibm_provisioning method.""" - inst_log = "/root/swinstall.log" - prov_cfg = "/root/provisioningConfiguration.cfg" - boot_ref = "/proc/1/environ" - with_logs = True - - def _call_with_root(self, rootd): - self.reRoot(rootd) - return ibm._is_ibm_provisioning() - - def test_no_config(self): - """No provisioning config means not provisioning.""" - self.assertFalse(self._call_with_root(self.tmp_dir())) - - def test_config_only(self): - """A provisioning config without a log means provisioning.""" - rootd = self.tmp_dir() - test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"}) - self.assertTrue(self._call_with_root(rootd)) - - def test_config_with_old_log(self): - """A config with a log from previous boot is not provisioning.""" - rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", -30), - self.boot_ref: ("PWD=/", 0)} - test_helpers.populate_dir_with_ts(rootd, data) - self.assertFalse(self._call_with_root(rootd=rootd)) - self.assertIn("from previous boot", self.logs.getvalue()) - - def test_config_with_new_log(self): - """A config with a log from this boot is provisioning.""" - rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", 30), - self.boot_ref: ("PWD=/", 0)} - test_helpers.populate_dir_with_ts(rootd, data) - self.assertTrue(self._call_with_root(rootd=rootd)) - self.assertIn("from current boot", self.logs.getvalue()) - - def test_config_and_log_no_reference(self): - """If the config and log existed, but no reference, assume not.""" - rootd = self.tmp_dir() - test_helpers.populate_dir( - rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}) - self.assertFalse(self._call_with_root(rootd=rootd)) - self.assertIn("no reference file", self.logs.getvalue()) - - -class TestDataSourceIBMCloud(test_helpers.CiTestCase): - - def setUp(self): - super(TestDataSourceIBMCloud, self).setUp() - self.tmp = self.tmp_dir() - self.cloud_dir = self.tmp_path('cloud', dir=self.tmp) - util.ensure_dir(self.cloud_dir) - paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir}) - self.ds = ibm.DataSourceIBMCloud( - sys_cfg={}, distro=None, paths=paths) - - def test_get_data_false(self): - """When read_md returns None, get_data returns False.""" - with mock.patch(D_PATH + 'read_md', return_value=None): - self.assertFalse(self.ds.get_data()) - - def test_get_data_processes_read_md(self): - """get_data processes and caches content returned by read_md.""" - md = { - 'metadata': {}, 'networkdata': 'net', 'platform': 'plat', - 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud', - 'vendordata': 'vd'} - with mock.patch(D_PATH + 'read_md', return_value=md): - self.assertTrue(self.ds.get_data()) - self.assertEqual('src', self.ds.source) - self.assertEqual('plat', self.ds.platform) - self.assertEqual({}, self.ds.metadata) - self.assertEqual('ud', self.ds.userdata_raw) - self.assertEqual('net', self.ds.network_json) - self.assertEqual('vd', self.ds.vendordata_pure) - self.assertEqual('uuid', self.ds.system_uuid) - self.assertEqual('ibmcloud', self.ds.cloud_name) - self.assertEqual('ibmcloud', self.ds.platform_type) - self.assertEqual('plat (src)', self.ds.subplatform) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py deleted file mode 100644 index 41b6c27b..00000000 --- a/tests/unittests/test_datasource/test_maas.py +++ /dev/null @@ -1,200 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from copy import copy -import os -import shutil -import tempfile -import yaml -from unittest import mock - -from cloudinit.sources import DataSourceMAAS -from cloudinit import url_helper -from cloudinit.tests.helpers import CiTestCase, populate_dir - - -class TestMAASDataSource(CiTestCase): - - def setUp(self): - super(TestMAASDataSource, self).setUp() - # Make a temp directoy for tests to use. - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_seed_dir_valid(self): - """Verify a valid seeddir is read as such.""" - - userdata = b'valid01-userdata' - data = {'meta-data/instance-id': 'i-valid01', - 'meta-data/local-hostname': 'valid01-hostname', - 'user-data': userdata, - 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} - - my_d = os.path.join(self.tmp, "valid") - populate_dir(my_d, data) - - ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) - - self.assertEqual(userdata, ud) - for key in ('instance-id', 'local-hostname'): - self.assertEqual(data["meta-data/" + key], md[key]) - - # verify that 'userdata' is not returned as part of the metadata - self.assertFalse(('user-data' in md)) - self.assertIsNone(vd) - - def test_seed_dir_valid_extra(self): - """Verify extra files do not affect seed_dir validity.""" - - userdata = b'valid-extra-userdata' - data = {'meta-data/instance-id': 'i-valid-extra', - 'meta-data/local-hostname': 'valid-extra-hostname', - 'user-data': userdata, 'foo': 'bar'} - - my_d = os.path.join(self.tmp, "valid_extra") - populate_dir(my_d, data) - - ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) - - self.assertEqual(userdata, ud) - for key in ('instance-id', 'local-hostname'): - self.assertEqual(data['meta-data/' + key], md[key]) - - # additional files should not just appear as keys in metadata atm - self.assertFalse(('foo' in md)) - - def test_seed_dir_invalid(self): - """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" - - valid = {'instance-id': 'i-instanceid', - 'local-hostname': 'test-hostname', 'user-data': ''} - - my_based = os.path.join(self.tmp, "valid_extra") - - # missing 'userdata' file - my_d = "%s-01" % my_based - invalid_data = copy(valid) - del invalid_data['local-hostname'] - populate_dir(my_d, invalid_data) - self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, - DataSourceMAAS.read_maas_seed_dir, my_d) - - # missing 'instance-id' - my_d = "%s-02" % my_based - invalid_data = copy(valid) - del invalid_data['instance-id'] - populate_dir(my_d, invalid_data) - self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, - DataSourceMAAS.read_maas_seed_dir, my_d) - - def test_seed_dir_none(self): - """Verify that empty seed_dir raises MAASSeedDirNone.""" - - my_d = os.path.join(self.tmp, "valid_empty") - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, - DataSourceMAAS.read_maas_seed_dir, my_d) - - def test_seed_dir_missing(self): - """Verify that missing seed_dir raises MAASSeedDirNone.""" - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, - DataSourceMAAS.read_maas_seed_dir, - os.path.join(self.tmp, "nonexistantdirectory")) - - def mock_read_maas_seed_url(self, data, seed, version="19991231"): - """mock up readurl to appear as a web server at seed has provided data. - return what read_maas_seed_url returns.""" - def my_readurl(*args, **kwargs): - if len(args): - url = args[0] - else: - url = kwargs['url'] - prefix = "%s/%s/" % (seed, version) - if not url.startswith(prefix): - raise ValueError("unexpected call %s" % url) - - short = url[len(prefix):] - if short not in data: - raise url_helper.UrlError("not found", code=404, url=url) - return url_helper.StringResponse(data[short]) - - # Now do the actual call of the code under test. - with mock.patch("cloudinit.url_helper.readurl") as mock_readurl: - mock_readurl.side_effect = my_readurl - return DataSourceMAAS.read_maas_seed_url(seed, version=version) - - def test_seed_url_valid(self): - """Verify that valid seed_url is read as such.""" - valid = { - 'meta-data/instance-id': 'i-instanceid', - 'meta-data/local-hostname': 'test-hostname', - 'meta-data/public-keys': 'test-hostname', - 'meta-data/vendor-data': b'my-vendordata', - 'user-data': b'foodata', - } - my_seed = "http://example.com/xmeta" - my_ver = "1999-99-99" - ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver) - - self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) - self.assertEqual( - valid['meta-data/local-hostname'], md['local-hostname']) - self.assertEqual(valid['meta-data/public-keys'], md['public-keys']) - self.assertEqual(valid['user-data'], ud) - # vendor-data is yaml, which decodes a string - self.assertEqual(valid['meta-data/vendor-data'].decode(), vd) - - def test_seed_url_vendor_data_dict(self): - expected_vd = {'key1': 'value1'} - valid = { - 'meta-data/instance-id': 'i-instanceid', - 'meta-data/local-hostname': 'test-hostname', - 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), - } - _ud, md, vd = self.mock_read_maas_seed_url( - valid, "http://example.com/foo") - - self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) - self.assertEqual(expected_vd, vd) - - -@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") -class TestGetOauthHelper(CiTestCase): - base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', - 'token_key': 'FAKE_TOKEN_KEY', - 'token_secret': 'FAKE_TOKEN_SECRET', - 'consumer_secret': None} - - def test_all_required(self, m_helper): - """Valid config as expected.""" - DataSourceMAAS.get_oauth_helper(self.base_cfg.copy()) - m_helper.assert_has_calls([mock.call(**self.base_cfg)]) - - def test_other_fields_not_passed_through(self, m_helper): - """Only relevant fields are passed through.""" - mycfg = self.base_cfg.copy() - mycfg['unrelated_field'] = 'unrelated' - DataSourceMAAS.get_oauth_helper(mycfg) - m_helper.assert_has_calls([mock.call(**self.base_cfg)]) - - -class TestGetIdHash(CiTestCase): - v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', - 'token_secret': 'TSEC'} - v1_id = ( - 'v1:' - '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') - - def test_v1_expected(self): - """Test v1 id generated as expected working behavior from config.""" - result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy()) - self.assertEqual(self.v1_id, result) - - def test_v1_extra_fields_are_ignored(self): - """Test v1 id ignores unused entries in config.""" - cfg = self.v1_cfg.copy() - cfg['consumer_secret'] = "BOO" - cfg['unrelated'] = "HI MOM" - result = DataSourceMAAS.get_id_from_ds_cfg(cfg) - self.assertEqual(self.v1_id, result) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py deleted file mode 100644 index 02cc9b38..00000000 --- a/tests/unittests/test_datasource/test_nocloud.py +++ /dev/null @@ -1,393 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import dmi -from cloudinit import helpers -from cloudinit.sources.DataSourceNoCloud import ( - DataSourceNoCloud as dsNoCloud, - _maybe_remove_top_network, - parse_cmdline_data) -from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack - -import os -import textwrap -import yaml - - -@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd') -class TestNoCloudDataSource(CiTestCase): - - def setUp(self): - super(TestNoCloudDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - - self.cmdline = "root=TESTCMDLINE" - - self.mocks = ExitStack() - self.addCleanup(self.mocks.close) - - self.mocks.enter_context( - mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) - self.mocks.enter_context( - mock.patch.object(dmi, 'read_dmi_data', return_value=None)) - - def _test_fs_config_is_read(self, fs_label, fs_label_to_search): - vfat_device = 'device-1' - - def m_mount_cb(device, callback, mtype): - if (device == vfat_device): - return {'meta-data': yaml.dump({'instance-id': 'IID'})} - else: - return {} - - def m_find_devs_with(query='', path=''): - if 'TYPE=vfat' == query: - return [vfat_device] - elif 'LABEL={}'.format(fs_label) == query: - return [vfat_device] - else: - return [] - - self.mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - side_effect=m_find_devs_with)) - self.mocks.enter_context( - mock.patch.object(util, 'mount_cb', - side_effect=m_mount_cb)) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - - self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') - self.assertTrue(ret) - - def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): - md = {'instance-id': 'IID', 'dsmode': 'local'} - ud = b"USER_DATA_HERE" - seed_dir = os.path.join(self.paths.seed_dir, "nocloud") - populate_dir(seed_dir, - {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) - - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, ud) - self.assertEqual(dsrc.metadata, md) - self.assertEqual(dsrc.platform_type, 'lxd') - self.assertEqual( - dsrc.subplatform, 'seed-dir (%s)' % seed_dir) - self.assertTrue(ret) - - def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): - """Non-lxd environments will list nocloud as the platform.""" - m_is_lxd.return_value = False - md = {'instance-id': 'IID', 'dsmode': 'local'} - seed_dir = os.path.join(self.paths.seed_dir, "nocloud") - populate_dir(seed_dir, - {'user-data': '', 'meta-data': yaml.safe_dump(md)}) - - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.platform_type, 'nocloud') - self.assertEqual( - dsrc.subplatform, 'seed-dir (%s)' % seed_dir) - - def test_fs_label(self, m_is_lxd): - # find_devs_with should not be called ff fs_label is None - class PsuedoException(Exception): - pass - - self.mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - side_effect=PsuedoException)) - - # by default, NoCloud should search for filesystems by label - sys_cfg = {'datasource': {'NoCloud': {}}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertRaises(PsuedoException, dsrc.get_data) - - # but disabling searching should just end up with None found - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertFalse(ret) - - def test_fs_config_lowercase_label(self, m_is_lxd): - self._test_fs_config_is_read('cidata', 'cidata') - - def test_fs_config_uppercase_label(self, m_is_lxd): - self._test_fs_config_is_read('CIDATA', 'cidata') - - def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): - self._test_fs_config_is_read('cidata', 'CIDATA') - - def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): - self._test_fs_config_is_read('CIDATA', 'CIDATA') - - def test_no_datasource_expected(self, m_is_lxd): - # no source should be found if no cmdline, config, and fs_label=None - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertFalse(dsrc.get_data()) - - def test_seed_in_config(self, m_is_lxd): - data = { - 'fs_label': None, - 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), - 'user-data': b"USER_DATA_RAW", - } - - sys_cfg = {'datasource': {'NoCloud': data}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") - self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') - self.assertTrue(ret) - - def test_nocloud_seed_with_vendordata(self, m_is_lxd): - md = {'instance-id': 'IID', 'dsmode': 'local'} - ud = b"USER_DATA_HERE" - vd = b"THIS IS MY VENDOR_DATA" - - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': ud, 'meta-data': yaml.safe_dump(md), - 'vendor-data': vd}) - - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, ud) - self.assertEqual(dsrc.metadata, md) - self.assertEqual(dsrc.vendordata_raw, vd) - self.assertTrue(ret) - - def test_nocloud_no_vendordata(self, m_is_lxd): - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, b"ud") - self.assertFalse(dsrc.vendordata) - self.assertTrue(ret) - - def test_metadata_network_interfaces(self, m_is_lxd): - gateway = "103.225.10.1" - md = { - 'instance-id': 'i-abcd', - 'local-hostname': 'hostname1', - 'network-interfaces': textwrap.dedent("""\ - auto eth0 - iface eth0 inet static - hwaddr 00:16:3e:70:e1:04 - address 103.225.10.12 - netmask 255.255.255.0 - gateway """ + gateway + """ - dns-servers 8.8.8.8""")} - - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': yaml.dump(md) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - # very simple check just for the strings above - self.assertIn(gateway, str(dsrc.network_config)) - - def test_metadata_network_config(self, m_is_lxd): - # network-config needs to get into network_config - netconf = {'version': 1, - 'config': [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}]} - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': "instance-id: IID\n", - 'network-config': yaml.dump(netconf) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(netconf, dsrc.network_config) - - def test_metadata_network_config_with_toplevel_network(self, m_is_lxd): - """network-config may have 'network' top level key.""" - netconf = {'config': 'disabled'} - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': "instance-id: IID\n", - 'network-config': yaml.dump({'network': netconf}) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(netconf, dsrc.network_config) - - def test_metadata_network_config_over_interfaces(self, m_is_lxd): - # network-config should override meta-data/network-interfaces - gateway = "103.225.10.1" - md = { - 'instance-id': 'i-abcd', - 'local-hostname': 'hostname1', - 'network-interfaces': textwrap.dedent("""\ - auto eth0 - iface eth0 inet static - hwaddr 00:16:3e:70:e1:04 - address 103.225.10.12 - netmask 255.255.255.0 - gateway """ + gateway + """ - dns-servers 8.8.8.8""")} - - netconf = {'version': 1, - 'config': [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}]} - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': yaml.dump(md) + "\n", - 'network-config': yaml.dump(netconf) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(netconf, dsrc.network_config) - self.assertNotIn(gateway, str(dsrc.network_config)) - - @mock.patch("cloudinit.util.blkid") - def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - self.mocks.enter_context( - mock.patch.object(util, 'is_FreeBSD', return_value=True)) - - def _mfind_devs_with_freebsd( - criteria=None, oformat='device', - tag=None, no_cache=False, path=None): - if not criteria: - return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] - if criteria.startswith("LABEL="): - return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] - elif criteria == "TYPE=vfat": - return ["/dev/msdosfs/foo"] - elif criteria == "TYPE=iso9660": - return ["/dev/iso9660/foo"] - return [] - - self.mocks.enter_context( - mock.patch.object( - util, 'find_devs_with_freebsd', - side_effect=_mfind_devs_with_freebsd)) - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc._get_devices('foo') - self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) - fake_blkid.assert_not_called() - - -class TestParseCommandLineData(CiTestCase): - - def test_parse_cmdline_data_valid(self): - ds_id = "ds=nocloud" - pairs = ( - ("root=/dev/sda1 %(ds_id)s", {}), - ("%(ds_id)s; root=/dev/foo", {}), - ("%(ds_id)s", {}), - ("%(ds_id)s;", {}), - ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}), - ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost", - {'seedfrom': 'SEED', 'local-hostname': 'xhost'}), - ("%(ds_id)s;h=xhost", - {'local-hostname': 'xhost'}), - ("%(ds_id)s;h=xhost;i=IID", - {'local-hostname': 'xhost', 'instance-id': 'IID'}), - ) - - for (fmt, expected) in pairs: - fill = {} - cmdline = fmt % {'ds_id': ds_id} - ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) - self.assertEqual(expected, fill) - self.assertTrue(ret) - - def test_parse_cmdline_data_none(self): - ds_id = "ds=foo" - cmdlines = ( - "root=/dev/sda1 ro", - "console=/dev/ttyS0 root=/dev/foo", - "", - "ds=foocloud", - "ds=foo-net", - "ds=nocloud;s=SEED", - ) - - for cmdline in cmdlines: - fill = {} - ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) - self.assertEqual(fill, {}) - self.assertFalse(ret) - - -class TestMaybeRemoveToplevelNetwork(CiTestCase): - """test _maybe_remove_top_network function.""" - basecfg = [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}] - - def test_should_remove_safely(self): - mcfg = {'config': self.basecfg, 'version': 1} - self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) - - def test_no_remove_if_other_keys(self): - """should not shift if other keys at top level.""" - mcfg = {'network': {'config': self.basecfg, 'version': 1}, - 'unknown_keyname': 'keyval'} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - def test_no_remove_if_non_dict(self): - """should not shift if not a dict.""" - mcfg = {'network': '"content here'} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - def test_no_remove_if_missing_config_or_version(self): - """should not shift unless network entry has config and version.""" - mcfg = {'network': {'config': self.basecfg}} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - mcfg = {'network': {'version': 1}} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - def test_remove_with_config_disabled(self): - """network/config=disabled should be shifted.""" - mcfg = {'config': 'disabled'} - self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py deleted file mode 100644 index 283b65c2..00000000 --- a/tests/unittests/test_datasource/test_opennebula.py +++ /dev/null @@ -1,977 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import helpers -from cloudinit.sources import DataSourceOpenNebula as ds -from cloudinit import util -from cloudinit.tests.helpers import mock, populate_dir, CiTestCase - -import os -import pwd -import unittest - -import pytest - - -TEST_VARS = { - 'VAR1': 'single', - 'VAR2': 'double word', - 'VAR3': 'multi\nline\n', - 'VAR4': "'single'", - 'VAR5': "'double word'", - 'VAR6': "'multi\nline\n'", - 'VAR7': 'single\\t', - 'VAR8': 'double\\tword', - 'VAR9': 'multi\\t\nline\n', - 'VAR10': '\\', # expect '\' - 'VAR11': '\'', # expect ' - 'VAR12': '$', # expect $ -} - -INVALID_CONTEXT = ';' -USER_DATA = '#cloud-config\napt_upgrade: true' -SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' -HOSTNAME = 'foo.example.com' -PUBLIC_IP = '10.0.0.3' -MACADDR = '02:00:0a:12:01:01' -IP_BY_MACADDR = '10.18.1.1' -IP4_PREFIX = '24' -IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba' -IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba' -IP6_GW = '2001:db8:1::ffff' -IP6_PREFIX = '48' - -DS_PATH = "cloudinit.sources.DataSourceOpenNebula" - - -class TestOpenNebulaDataSource(CiTestCase): - parsed_user = None - allowed_subp = ['bash'] - - def setUp(self): - super(TestOpenNebulaDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - - # defaults for few tests - self.ds = ds.DataSourceOpenNebula - self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula") - self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}} - - # we don't want 'sudo' called in tests. so we patch switch_user_cmd - def my_switch_user_cmd(user): - self.parsed_user = user - return [] - - self.switch_user_cmd_real = ds.switch_user_cmd - ds.switch_user_cmd = my_switch_user_cmd - - def tearDown(self): - ds.switch_user_cmd = self.switch_user_cmd_real - super(TestOpenNebulaDataSource, self).tearDown() - - def test_get_data_non_contextdisk(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertFalse(ret) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data_broken_contextdisk(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data_invalid_identity(self): - orig_find_devs_with = util.find_devs_with - try: - # generate non-existing system user name - sys_cfg = self.sys_cfg - invalid_user = 'invalid' - while not sys_cfg['datasource']['OpenNebula'].get('parseuser'): - try: - pwd.getpwnam(invalid_user) - invalid_user += 'X' - except KeyError: - sys_cfg['datasource']['OpenNebula']['parseuser'] = \ - invalid_user - - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) - dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - finally: - util.find_devs_with = orig_find_devs_with - self.assertEqual('opennebula', dsrc.cloud_name) - self.assertEqual('opennebula', dsrc.platform_type) - self.assertEqual( - 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform) - - def test_seed_dir_non_contextdisk(self): - self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir, - self.seed_dir, mock.Mock()) - - def test_seed_dir_empty1_context(self): - populate_dir(self.seed_dir, {'context.sh': ''}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertIsNone(results['userdata']) - self.assertEqual(results['metadata'], {}) - - def test_seed_dir_empty2_context(self): - populate_context_dir(self.seed_dir, {}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertIsNone(results['userdata']) - self.assertEqual(results['metadata'], {}) - - def test_seed_dir_broken_context(self): - populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) - - self.assertRaises(ds.BrokenContextDiskDir, - ds.read_context_disk_dir, - self.seed_dir, mock.Mock()) - - def test_context_parser(self): - populate_context_dir(self.seed_dir, TEST_VARS) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('metadata' in results) - self.assertEqual(TEST_VARS, results['metadata']) - - def test_ssh_key(self): - public_keys = ['first key', 'second key'] - for c in range(4): - for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'): - my_d = os.path.join(self.tmp, "%s-%i" % (k, c)) - populate_context_dir(my_d, {k: '\n'.join(public_keys)}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('metadata' in results) - self.assertTrue('public-keys' in results['metadata']) - self.assertEqual(public_keys, - results['metadata']['public-keys']) - - public_keys.append(SSH_KEY % (c + 1,)) - - def test_user_data_plain(self): - for k in ('USER_DATA', 'USERDATA'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: USER_DATA, - 'USERDATA_ENCODING': ''}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) - - def test_user_data_encoding_required_for_decode(self): - b64userdata = util.b64e(USER_DATA) - for k in ('USER_DATA', 'USERDATA'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: b64userdata}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('userdata' in results) - self.assertEqual(b64userdata, results['userdata']) - - def test_user_data_base64_encoding(self): - for k in ('USER_DATA', 'USERDATA'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: util.b64e(USER_DATA), - 'USERDATA_ENCODING': 'base64'}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_hostname(self, m_get_phys_by_mac): - for dev in ('eth0', 'ens3'): - m_get_phys_by_mac.return_value = {MACADDR: dev} - for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', - 'ETH0_IP'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: PUBLIC_IP}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('metadata' in results) - self.assertTrue('local-hostname' in results['metadata']) - self.assertEqual( - PUBLIC_IP, results['metadata']['local-hostname']) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_network_interfaces(self, m_get_phys_by_mac): - for dev in ('eth0', 'ens3'): - m_get_phys_by_mac.return_value = {MACADDR: dev} - - # without ETH0_MAC - # for Older OpenNebula? - populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP and ETH0_MAC - populate_context_dir( - self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP with empty string and ETH0_MAC - # in the case of using Virtual Network contains - # "AR = [ TYPE = ETHER ]" - populate_context_dir( - self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_MASK - populate_context_dir( - self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '255.255.0.0' - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/16' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_MASK with empty string - populate_context_dir( - self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '' - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6 - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_GLOBAL + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6_ULA - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_ULA + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_GLOBAL + '/' + IP6_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_GLOBAL + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - def test_find_candidates(self): - def my_devs_with(criteria): - return { - "LABEL=CONTEXT": ["/dev/sdb"], - "LABEL=CDROM": ["/dev/sr0"], - "TYPE=iso9660": ["/dev/vdb"], - }.get(criteria, []) - - orig_find_devs_with = util.find_devs_with - try: - util.find_devs_with = my_devs_with - self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"], - ds.find_candidate_devs()) - finally: - util.find_devs_with = orig_find_devs_with - - -@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={})) -class TestOpenNebulaNetwork(unittest.TestCase): - - system_nics = ('eth0', 'ens3') - - def test_context_devname(self): - """Verify context_devname correctly returns mac and name.""" - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH1_MAC': '02:00:0a:12:0f:0f', } - expected = { - '02:00:0a:12:01:01': 'ETH0', - '02:00:0a:12:0f:0f': 'ETH1', } - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(expected, net.context_devname) - - def test_get_nameservers(self): - """ - Verify get_nameservers('device') correctly returns DNS server addresses - and search domains. - """ - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } - expected = { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_nameservers('eth0') - self.assertEqual(expected, val) - - def test_get_mtu(self): - """Verify get_mtu('device') correctly returns MTU size.""" - context = {'ETH0_MTU': '1280'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mtu('eth0') - self.assertEqual('1280', val) - - def test_get_ip(self): - """Verify get_ip('device') correctly returns IPv4 address.""" - context = {'ETH0_IP': PUBLIC_IP} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip('eth0', MACADDR) - self.assertEqual(PUBLIC_IP, val) - - def test_get_ip_emptystring(self): - """ - Verify get_ip('device') correctly returns IPv4 address. - It returns IP address created by MAC address if ETH0_IP has empty - string. - """ - context = {'ETH0_IP': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip('eth0', MACADDR) - self.assertEqual(IP_BY_MACADDR, val) - - def test_get_ip6(self): - """ - Verify get_ip6('device') correctly returns IPv6 address. - In this case, IPv6 address is Given by ETH0_IP6. - """ - context = { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_ULA': '', } - expected = [IP6_GLOBAL] - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') - self.assertEqual(expected, val) - - def test_get_ip6_ula(self): - """ - Verify get_ip6('device') correctly returns IPv6 address. - In this case, IPv6 address is Given by ETH0_IP6_ULA. - """ - context = { - 'ETH0_IP6': '', - 'ETH0_IP6_ULA': IP6_ULA, } - expected = [IP6_ULA] - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') - self.assertEqual(expected, val) - - def test_get_ip6_dual(self): - """ - Verify get_ip6('device') correctly returns IPv6 address. - In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. - """ - context = { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_ULA': IP6_ULA, } - expected = [IP6_GLOBAL, IP6_ULA] - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') - self.assertEqual(expected, val) - - def test_get_ip6_prefix(self): - """ - Verify get_ip6_prefix('device') correctly returns IPv6 prefix. - """ - context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6_prefix('eth0') - self.assertEqual(IP6_PREFIX, val) - - def test_get_ip6_prefix_emptystring(self): - """ - Verify get_ip6_prefix('device') correctly returns IPv6 prefix. - It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty - string. - """ - context = {'ETH0_IP6_PREFIX_LENGTH': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6_prefix('eth0') - self.assertEqual('64', val) - - def test_get_gateway(self): - """ - Verify get_gateway('device') correctly returns IPv4 default gateway - address. - """ - context = {'ETH0_GATEWAY': '1.2.3.5'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_gateway('eth0') - self.assertEqual('1.2.3.5', val) - - def test_get_gateway6(self): - """ - Verify get_gateway6('device') correctly returns IPv6 default gateway - address. - """ - for k in ('GATEWAY6', 'IP6_GATEWAY'): - context = {'ETH0_' + k: IP6_GW} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_gateway6('eth0') - self.assertEqual(IP6_GW, val) - - def test_get_mask(self): - """ - Verify get_mask('device') correctly returns IPv4 subnet mask. - """ - context = {'ETH0_MASK': '255.255.0.0'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mask('eth0') - self.assertEqual('255.255.0.0', val) - - def test_get_mask_emptystring(self): - """ - Verify get_mask('device') correctly returns IPv4 subnet mask. - It returns default value '255.255.255.0' if ETH0_MASK has empty string. - """ - context = {'ETH0_MASK': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mask('eth0') - self.assertEqual('255.255.255.0', val) - - def test_get_network(self): - """ - Verify get_network('device') correctly returns IPv4 network address. - """ - context = {'ETH0_NETWORK': '1.2.3.0'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_network('eth0', MACADDR) - self.assertEqual('1.2.3.0', val) - - def test_get_network_emptystring(self): - """ - Verify get_network('device') correctly returns IPv4 network address. - It returns network address created by MAC address if ETH0_NETWORK has - empty string. - """ - context = {'ETH0_NETWORK': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_network('eth0', MACADDR) - self.assertEqual('10.18.1.0', val) - - def test_get_field(self): - """ - Verify get_field('device', 'name') returns *context* value. - """ - context = {'ETH9_DUMMY': 'DUMMY_VALUE'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') - self.assertEqual('DUMMY_VALUE', val) - - def test_get_field_withdefaultvalue(self): - """ - Verify get_field('device', 'name', 'default value') returns *context* - value. - """ - context = {'ETH9_DUMMY': 'DUMMY_VALUE'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') - self.assertEqual('DUMMY_VALUE', val) - - def test_get_field_withdefaultvalue_emptycontext(self): - """ - Verify get_field('device', 'name', 'default value') returns *default* - value if context value is empty string. - """ - context = {'ETH9_DUMMY': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') - self.assertEqual('DEFAULT_VALUE', val) - - def test_get_field_emptycontext(self): - """ - Verify get_field('device', 'name') returns None if context value is - empty string. - """ - context = {'ETH9_DUMMY': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') - self.assertEqual(None, val) - - def test_get_field_nonecontext(self): - """ - Verify get_field('device', 'name') returns None if context value is - None. - """ - context = {'ETH9_DUMMY': None} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') - self.assertEqual(None, val) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_gateway(self, m_get_phys_by_mac): - """Test rendering with/without IPv4 gateway""" - self.maxDiff = None - # empty ETH0_GATEWAY - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_GATEWAY - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY': '1.2.3.5', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'gateway4': '1.2.3.5', - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_gateway6(self, m_get_phys_by_mac): - """Test rendering with/without IPv6 gateway""" - self.maxDiff = None - # empty ETH0_GATEWAY6 - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY6': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_GATEWAY6 - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY6': IP6_GW, } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'gateway6': IP6_GW, - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_ipv6address(self, m_get_phys_by_mac): - """Test rendering with/without IPv6 address""" - self.maxDiff = None - # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_IP6': '', - 'ETH0_IP6_ULA': '', - 'ETH0_IP6_PREFIX_LENGTH': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_IP6_ULA': IP6_ULA, } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [ - IP_BY_MACADDR + '/' + IP4_PREFIX, - IP6_GLOBAL + '/' + IP6_PREFIX, - IP6_ULA + '/' + IP6_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_dns(self, m_get_phys_by_mac): - """Test rendering with/without DNS server, search domain""" - self.maxDiff = None - # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'DNS': '', - 'ETH0_DNS': '', - 'ETH0_SEARCH_DOMAIN': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']}, - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_mtu(self, m_get_phys_by_mac): - """Test rendering with/without MTU""" - self.maxDiff = None - # empty ETH0_MTU - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MTU': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_MTU - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MTU': '1280', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'mtu': '1280', - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_eth0(self, m_get_phys_by_mac): - for nic in self.system_nics: - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork({}, mock.Mock()) - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_distro_passed_through(self, m_get_physical_nics_by_mac): - ds.OpenNebulaNetwork({}, mock.sentinel.distro) - self.assertEqual( - [mock.call(mock.sentinel.distro)], - m_get_physical_nics_by_mac.call_args_list, - ) - - def test_eth0_override(self): - self.maxDiff = None - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': '', - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_IP6': '', - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_IP6_ULA': '', - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': '', - } - for nic in self.system_nics: - net = ds.OpenNebulaNetwork(context, mock.Mock(), - system_nics_by_mac={MACADDR: nic}) - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/16'], - 'gateway4': '1.2.3.5', - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} - - self.assertEqual(expected, net.gen_conf()) - - def test_eth0_v4v6_override(self): - self.maxDiff = None - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': IP6_GW, - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '1280', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', - } - for nic in self.system_nics: - net = ds.OpenNebulaNetwork(context, mock.Mock(), - system_nics_by_mac={MACADDR: nic}) - - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [ - IP_BY_MACADDR + '/16', - IP6_GLOBAL + '/' + IP6_PREFIX, - IP6_ULA + '/' + IP6_PREFIX], - 'gateway4': '1.2.3.5', - 'gateway6': IP6_GW, - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']}, - 'mtu': '1280'}}} - - self.assertEqual(expected, net.gen_conf()) - - def test_multiple_nics(self): - """Test rendering multiple nics with names that differ from context.""" - self.maxDiff = None - MAC_1 = "02:00:0a:12:01:01" - MAC_2 = "02:00:0a:12:01:02" - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': IP6_GW, - 'ETH0_IP': '10.18.1.1', - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': MAC_2, - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '1280', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': 'example.com', - 'ETH3_DNS': '10.3.1.2', - 'ETH3_GATEWAY': '10.3.0.1', - 'ETH3_GATEWAY6': '', - 'ETH3_IP': '10.3.1.3', - 'ETH3_IP6': '', - 'ETH3_IP6_PREFIX_LENGTH': '', - 'ETH3_IP6_ULA': '', - 'ETH3_MAC': MAC_1, - 'ETH3_MASK': '255.255.0.0', - 'ETH3_MTU': '', - 'ETH3_NETWORK': '10.3.0.0', - 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org', - } - net = ds.OpenNebulaNetwork( - context, - mock.Mock(), - system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'} - ) - - expected = { - 'version': 2, - 'ethernets': { - 'enp1s2': { - 'match': {'macaddress': MAC_2}, - 'addresses': [ - '10.18.1.1/16', - IP6_GLOBAL + '/64', - IP6_ULA + '/64'], - 'gateway4': '1.2.3.5', - 'gateway6': IP6_GW, - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com']}, - 'mtu': '1280'}, - 'enp0s25': { - 'match': {'macaddress': MAC_1}, - 'addresses': ['10.3.1.3/16'], - 'gateway4': '10.3.0.1', - 'nameservers': { - 'addresses': ['10.3.1.2', '1.2.3.8'], - 'search': [ - 'third.example.com', - 'third.example.org']}}}} - - self.assertEqual(expected, net.gen_conf()) - - -class TestParseShellConfig: - @pytest.mark.allow_subp_for("bash") - def test_no_seconds(self): - cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"]) - # we could test 'sleep 2', but that would make the test run slower. - ret = ds.parse_shell_config(cfg) - assert ret == {"foo": "bar", "xx": "foo"} - - -class TestGetPhysicalNicsByMac: - @pytest.mark.parametrize( - "interfaces_by_mac,physical_devs,expected_return", - [ - # No interfaces => empty return - ({}, [], {}), - # Only virtual interface => empty return - ({"mac1": "virtual0"}, [], {}), - # Only physical interface => it is returned - ({"mac2": "physical0"}, ["physical0"], {"mac2": "physical0"}), - # Combination of physical and virtual => only physical returned - ( - {"mac3": "physical1", "mac4": "virtual1"}, - ["physical1"], - {"mac3": "physical1"}, - ), - ], - ) - def test(self, interfaces_by_mac, physical_devs, expected_return): - distro = mock.Mock() - distro.networking.is_physical.side_effect = ( - lambda devname: devname in physical_devs - ) - with mock.patch( - DS_PATH + ".net.get_interfaces_by_mac", - return_value=interfaces_by_mac, - ): - assert expected_return == ds.get_physical_nics_by_mac(distro) - - -def populate_context_dir(path, variables): - data = "# Context variables generated by OpenNebula\n" - for k, v in variables.items(): - data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) - populate_dir(path, {'context.sh': data}) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py deleted file mode 100644 index a9829c75..00000000 --- a/tests/unittests/test_datasource/test_openstack.py +++ /dev/null @@ -1,724 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import httpretty as hp -import json -import re -from io import StringIO -from urllib.parse import urlparse - -from cloudinit.tests import helpers as test_helpers - -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET -from cloudinit.sources import DataSourceOpenStack as ds -from cloudinit.sources.helpers import openstack -from cloudinit import util - -BASE_URL = "http://169.254.169.254" -PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' -EC2_META = { - 'ami-id': 'ami-00000001', - 'ami-launch-index': '0', - 'ami-manifest-path': 'FIXME', - 'hostname': 'sm-foo-test.novalocal', - 'instance-action': 'none', - 'instance-id': 'i-00000001', - 'instance-type': 'm1.tiny', - 'local-hostname': 'sm-foo-test.novalocal', - 'local-ipv4': '0.0.0.0', - 'public-hostname': 'sm-foo-test.novalocal', - 'public-ipv4': '0.0.0.1', - 'reservation-id': 'r-iru5qm4m', -} -USER_DATA = b'#!/bin/sh\necho This is user data\n' -VENDOR_DATA = { - 'magic': '', -} -VENDOR_DATA2 = { - 'static': {} -} -OSTACK_META = { - 'availability_zone': 'nova', - 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, - {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], - 'hostname': 'sm-foo-test.novalocal', - 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c', -} -CONTENT_0 = b'This is contents of /etc/foo.cfg\n' -CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' -OS_FILES = { - 'openstack/content/0000': CONTENT_0, - 'openstack/content/0001': CONTENT_1, - 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), - 'openstack/latest/network_data.json': json.dumps( - {'links': [], 'networks': [], 'services': []}), - 'openstack/latest/user_data': USER_DATA, - 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA), - 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2), -} -EC2_FILES = { - 'latest/user-data': USER_DATA, -} -EC2_VERSIONS = [ - 'latest', -] - -MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.' - - -# TODO _register_uris should leverage test_ec2.register_mock_metaserver. -def _register_uris(version, ec2_files, ec2_meta, os_files): - """Registers a set of url patterns into httpretty that will mimic the - same data returned by the openstack metadata service (and ec2 service).""" - - def match_ec2_url(uri, headers): - path = uri.path.strip("/") - if len(path) == 0: - return (200, headers, "\n".join(EC2_VERSIONS)) - path = uri.path.lstrip("/") - if path in ec2_files: - return (200, headers, ec2_files.get(path)) - if path == 'latest/meta-data/': - buf = StringIO() - for (k, v) in ec2_meta.items(): - if isinstance(v, (list, tuple)): - buf.write("%s/" % (k)) - else: - buf.write("%s" % (k)) - buf.write("\n") - return (200, headers, buf.getvalue()) - if path.startswith('latest/meta-data/'): - value = None - pieces = path.split("/") - if path.endswith("/"): - pieces = pieces[2:-1] - value = util.get_cfg_by_path(ec2_meta, pieces) - else: - pieces = pieces[2:] - value = util.get_cfg_by_path(ec2_meta, pieces) - if value is not None: - return (200, headers, str(value)) - return (404, headers, '') - - def match_os_uri(uri, headers): - path = uri.path.strip("/") - if path == 'openstack': - return (200, headers, "\n".join([openstack.OS_LATEST])) - path = uri.path.lstrip("/") - if path in os_files: - return (200, headers, os_files.get(path)) - return (404, headers, '') - - def get_request_callback(method, uri, headers): - uri = urlparse(uri) - path = uri.path.lstrip("/").split("/") - if path[0] == 'openstack': - return match_os_uri(uri, headers) - return match_ec2_url(uri, headers) - - hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), - body=get_request_callback) - - -def _read_metadata_service(): - return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1) - - -class TestOpenStackDataSource(test_helpers.HttprettyTestCase): - - with_logs = True - VERSION = 'latest' - - def setUp(self): - super(TestOpenStackDataSource, self).setUp() - self.tmp = self.tmp_dir() - - def test_successful(self): - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertEqual(2, len(f['files'])) - self.assertEqual(USER_DATA, f.get('userdata')) - self.assertEqual(EC2_META, f.get('ec2-metadata')) - self.assertEqual(2, f.get('version')) - metadata = f['metadata'] - self.assertEqual('nova', metadata.get('availability_zone')) - self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname')) - self.assertEqual('sm-foo-test.novalocal', - metadata.get('local-hostname')) - self.assertEqual('sm-foo-test', metadata.get('name')) - self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', - metadata.get('uuid')) - self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', - metadata.get('instance-id')) - - def test_no_ec2(self): - _register_uris(self.VERSION, {}, {}, OS_FILES) - f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertEqual(USER_DATA, f.get('userdata')) - self.assertEqual({}, f.get('ec2-metadata')) - self.assertEqual(2, f.get('version')) - - def test_bad_metadata(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.NonReadable, _read_metadata_service) - - def test_bad_uuid(self): - os_files = copy.deepcopy(OS_FILES) - os_meta = copy.deepcopy(OSTACK_META) - os_meta.pop('uuid') - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = json.dumps(os_meta) - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - def test_userdata_empty(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('user_data'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('userdata')) - - def test_vendordata_empty(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data.json'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - f = _read_metadata_service() - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('vendordata')) - - def test_vendordata2_empty(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data2.json'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - f = _read_metadata_service() - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('vendordata2')) - - def test_vendordata_invalid(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - def test_vendordata2_invalid(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data2.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - def test_metadata_invalid(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_datasource(self, m_dhcp): - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertTrue(found) - self.assertEqual(2, ds_os.version) - md = dict(ds_os.metadata) - md.pop('instance-id', None) - md.pop('local-hostname', None) - self.assertEqual(OSTACK_META, md) - self.assertEqual(EC2_META, ds_os.ec2_metadata) - self.assertEqual(USER_DATA, ds_os.userdata_raw) - self.assertEqual(2, len(ds_os.files)) - self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure) - self.assertIsNone(ds_os.vendordata_raw) - m_dhcp.assert_not_called() - - @hp.activate - @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_local_datasource(self, m_dhcp, m_net): - """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os_local = ds.DataSourceOpenStackLocal( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'broadcast-address': '192.168.2.255'}] - - self.assertIsNone(ds_os_local.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os_local.get_data() - self.assertTrue(found) - self.assertEqual(2, ds_os_local.version) - md = dict(ds_os_local.metadata) - md.pop('instance-id', None) - md.pop('local-hostname', None) - self.assertEqual(OSTACK_META, md) - self.assertEqual(EC2_META, ds_os_local.ec2_metadata) - self.assertEqual(USER_DATA, ds_os_local.userdata_raw) - self.assertEqual(2, len(ds_os_local.files)) - self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) - self.assertIsNone(ds_os_local.vendordata_raw) - m_dhcp.assert_called_with('eth9', None) - - def test_bad_datasource_meta(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) - self.assertIn( - 'InvalidMetaDataException: Broken metadata address' - ' http://169.254.169.25', - self.logs.getvalue()) - - def test_no_datasource(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files.pop(k) - _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) - ds_os.ds_cfg = { - 'max_wait': 0, - 'timeout': 0, - } - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) - - def test_network_config_disabled_by_datasource_config(self): - """The network_config can be disabled from datasource config.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os.ds_cfg = {'apply_network_config': False} - sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], - 'networks': [], 'services': []} - ds_os.network_json = sample_json # Ignore this content from metadata - with test_helpers.mock.patch(mock_path) as m_convert_json: - self.assertIsNone(ds_os.network_config) - m_convert_json.assert_not_called() - - def test_network_config_from_network_json(self): - """The datasource gets network_config from network_data.json.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - example_cfg = {'version': 1, 'config': []} - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], - 'networks': [], 'services': []} - ds_os.network_json = sample_json - with test_helpers.mock.patch(mock_path) as m_convert_json: - m_convert_json.return_value = example_cfg - self.assertEqual(example_cfg, ds_os.network_config) - self.assertIn( - 'network config provided via network_json', self.logs.getvalue()) - m_convert_json.assert_called_with(sample_json, known_macs=None) - - def test_network_config_cached(self): - """The datasource caches the network_config property.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - example_cfg = {'version': 1, 'config': []} - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os._network_config = example_cfg - with test_helpers.mock.patch(mock_path) as m_convert_json: - self.assertEqual(example_cfg, ds_os.network_config) - m_convert_json.assert_not_called() - - def test_disabled_datasource(self): - os_files = copy.deepcopy(OS_FILES) - os_meta = copy.deepcopy(OSTACK_META) - os_meta['meta'] = { - 'dsmode': 'disabled', - } - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = json.dumps(os_meta) - _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) - ds_os.ds_cfg = { - 'max_wait': 0, - 'timeout': 0, - } - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) - - @hp.activate - def test_wb__crawl_metadata_does_not_persist(self): - """_crawl_metadata returns current metadata and does not cache.""" - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - crawled_data = ds_os._crawl_metadata() - self.assertEqual(UNSET, ds_os.ec2_metadata) - self.assertIsNone(ds_os.userdata_raw) - self.assertEqual(0, len(ds_os.files)) - self.assertIsNone(ds_os.vendordata_raw) - self.assertEqual( - ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', - 'userdata', 'vendordata', 'vendordata2', 'version'], - sorted(crawled_data.keys())) - self.assertEqual('local', crawled_data['dsmode']) - self.assertEqual(EC2_META, crawled_data['ec2-metadata']) - self.assertEqual(2, len(crawled_data['files'])) - md = copy.deepcopy(crawled_data['metadata']) - md.pop('instance-id') - md.pop('local-hostname') - self.assertEqual(OSTACK_META, md) - self.assertEqual( - json.loads(OS_FILES['openstack/latest/network_data.json']), - crawled_data['networkdata']) - self.assertEqual(USER_DATA, crawled_data['userdata']) - self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) - self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2']) - self.assertEqual(2, crawled_data['version']) - - -class TestVendorDataLoading(test_helpers.TestCase): - def cvj(self, data): - return convert_vendordata(data) - - def test_vd_load_none(self): - # non-existant vendor-data should return none - self.assertIsNone(self.cvj(None)) - - def test_vd_load_string(self): - self.assertEqual(self.cvj("foobar"), "foobar") - - def test_vd_load_list(self): - data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] - self.assertEqual(self.cvj(data), data) - - def test_vd_load_dict_no_ci(self): - self.assertIsNone(self.cvj({'foo': 'bar'})) - - def test_vd_load_dict_ci_dict(self): - self.assertRaises(ValueError, self.cvj, - {'foo': 'bar', 'cloud-init': {'x': 1}}) - - def test_vd_load_dict_ci_string(self): - data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} - self.assertEqual(self.cvj(data), data['cloud-init']) - - def test_vd_load_dict_ci_list(self): - data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} - self.assertEqual(self.cvj(data), data['cloud-init']) - - -@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86') -class TestDetectOpenStack(test_helpers.CiTestCase): - - def test_detect_openstack_non_intel_x86(self, m_is_x86): - """Return True on non-intel platforms because dmi isn't conclusive.""" - m_is_x86.return_value = False - self.assertTrue( - ds.detect_openstack(), 'Expected detect_openstack == True') - - @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env, - m_is_x86): - """Return False on EC2 platforms.""" - m_is_x86.return_value = True - # No product_name in proc/1/environ - m_proc_env.return_value = {'HOME': '/'} - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' on EC2 - if dmi_key == 'chassis-asset-tag': - return '' # Empty string on EC2 - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertFalse( - ds.detect_openstack(), 'Expected detect_openstack == False on EC2') - m_proc_env.assert_called_with(1) - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_intel_product_name_compute(self, m_dmi, - m_is_x86): - """Return True on OpenStack compute and nova instances.""" - m_is_x86.return_value = True - openstack_product_names = ['OpenStack Nova', 'OpenStack Compute'] - - for product_name in openstack_product_names: - m_dmi.return_value = product_name - self.assertTrue( - ds.detect_openstack(), 'Failed to detect_openstack') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi, - m_is_x86): - """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud - if dmi_key == 'chassis-asset-tag': - return 'OpenTelekomCloud' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on OpenTelekomCloud') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi, - m_is_x86): - """Return True on OpenStack reporting SAP CCloud VM asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'VMware Virtual Platform' # SAP CCloud uses VMware - if dmi_key == 'chassis-asset-tag': - return 'SAP CCloud VM' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on SAP CCloud VM') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, - m_is_x86): - """Return True on OpenStack reporting Oracle cloud asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'Standard PC (i440FX + PIIX, 1996)' # No match - if dmi_key == 'chassis-asset-tag': - return 'OracleCloud.com' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(accept_oracle=True), - 'Expected detect_openstack == True on OracleCloud.com') - self.assertFalse( - ds.detect_openstack(accept_oracle=False), - 'Expected detect_openstack == False.') - - def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi, - m_is_x86, - chassis_tag): - """Return True on OpenStack reporting generic asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'Generic OpenStack Platform' - if dmi_key == 'chassis-asset-tag': - return chassis_tag - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on Generic OpenStack Platform') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, - m_is_x86): - self._test_detect_openstack_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, 'OpenStack Nova') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, - m_is_x86): - self._test_detect_openstack_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, 'OpenStack Compute') - - @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, - m_is_x86): - """Return True when nova product_name specified in /proc/1/environ.""" - m_is_x86.return_value = True - # Nova product_name in proc/1/environ - m_proc_env.return_value = { - 'HOME': '/', 'product_name': 'OpenStack Nova'} - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' - if dmi_key == 'chassis-asset-tag': - return '' # Nothin 'openstackish' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on OpenTelekomCloud') - m_proc_env.assert_called_with(1) - - -class TestMetadataReader(test_helpers.HttprettyTestCase): - """Test the MetadataReader.""" - burl = 'http://169.254.169.254/' - md_base = { - 'availability_zone': 'myaz1', - 'hostname': 'sm-foo-test.novalocal', - "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], - 'launch_index': 0, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} - - def register(self, path, body=None, status=200): - content = body if not isinstance(body, str) else body.encode('utf-8') - hp.register_uri( - hp.GET, self.burl + "openstack" + path, status=status, - body=content) - - def register_versions(self, versions): - self.register("", '\n'.join(versions)) - self.register("/", '\n'.join(versions)) - - def register_version(self, version, data): - content = '\n'.join(sorted(data.keys())) - self.register(version, content) - self.register(version + "/", content) - for path, content in data.items(): - self.register("/%s/%s" % (version, path), content) - self.register("/%s/%s" % (version, path), content) - if 'user_data' not in data: - self.register("/%s/user_data" % version, "nodata", status=404) - - def test__find_working_version(self): - """Test a working version ignores unsupported.""" - unsup = "2016-11-09" - self.register_versions( - [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, - openstack.OS_LATEST]) - self.assertEqual( - openstack.OS_LIBERTY, - openstack.MetadataReader(self.burl)._find_working_version()) - - def test__find_working_version_uses_latest(self): - """'latest' should be used if no supported versions.""" - unsup1, unsup2 = ("2016-11-09", '2017-06-06') - self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) - self.assertEqual( - openstack.OS_LATEST, - openstack.MetadataReader(self.burl)._find_working_version()) - - def test_read_v2_os_ocata(self): - """Validate return value of read_v2 for os_ocata data.""" - md = copy.deepcopy(self.md_base) - md['devices'] = [] - network_data = {'links': [], 'networks': [], 'services': []} - vendor_data = {} - vendor_data2 = {"static": {}} - - data = { - 'meta_data.json': json.dumps(md), - 'network_data.json': json.dumps(network_data), - 'vendor_data.json': json.dumps(vendor_data), - 'vendor_data2.json': json.dumps(vendor_data2), - } - - self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) - self.register_version(openstack.OS_OCATA, data) - - mock_read_ec2 = test_helpers.mock.MagicMock( - return_value={'instance-id': 'unused-ec2'}) - expected_md = copy.deepcopy(md) - expected_md.update( - {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) - expected = { - 'userdata': '', # Annoying, no user-data results in empty string. - 'version': 2, - 'metadata': expected_md, - 'vendordata': vendor_data, - 'vendordata2': vendor_data2, - 'networkdata': network_data, - 'ec2-metadata': mock_read_ec2.return_value, - 'files': {}, - } - reader = openstack.MetadataReader(self.burl) - reader._read_ec2_metadata = mock_read_ec2 - self.assertEqual(expected, reader.read_v2()) - self.assertEqual(1, mock_read_ec2.call_count) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py deleted file mode 100644 index ad7446f8..00000000 --- a/tests/unittests/test_datasource/test_ovf.py +++ /dev/null @@ -1,1046 +0,0 @@ -# Copyright (C) 2016 Canonical Ltd. -# -# Author: Scott Moser -# -# This file is part of cloud-init. See LICENSE file for license information. - -import base64 -import os - -from collections import OrderedDict -from textwrap import dedent - -from cloudinit import subp -from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call -from cloudinit.helpers import Paths -from cloudinit.sources import DataSourceOVF as dsovf -from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( - CustomScriptNotFound) -from cloudinit.safeyaml import YAMLError - -MPATH = 'cloudinit.sources.DataSourceOVF.' - -NOT_FOUND = None - -OVF_ENV_CONTENT = """ - - - - ESX Server - 3.0.1 - VMware, Inc. - en_US - - - -{properties} - - -""" - - -def fill_properties(props, template=OVF_ENV_CONTENT): - lines = [] - prop_tmpl = '' - for key, val in props.items(): - lines.append(prop_tmpl.format(key=key, val=val)) - indent = " " - properties = ''.join([indent + line + "\n" for line in lines]) - return template.format(properties=properties) - - -class TestReadOvfEnv(CiTestCase): - def test_with_b64_userdata(self): - user_data = "#!/bin/sh\necho hello world\n" - user_data_b64 = base64.b64encode(user_data.encode()).decode() - props = {"user-data": user_data_b64, "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual(user_data.encode(), ud) - self.assertEqual({'password': "passw0rd"}, cfg) - - def test_with_non_b64_userdata(self): - user_data = "my-user-data" - props = {"user-data": user_data, "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual(user_data.encode(), ud) - self.assertEqual({}, cfg) - - def test_with_no_userdata(self): - props = {"password": "passw0rd", "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertIsNone(ud) - - def test_with_b64_network_config_enable_read_network(self): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - eng.vmware.com - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env, True) - self.assertEqual("inst-001", md["instance-id"]) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertEqual( - {'version': 2, 'ethernets': - {'nics': - {'nameservers': - {'addresses': ['127.0.0.53'], - 'search': ['eng.vmware.com', 'vmware.com']}, - 'match': {'name': 'eth*'}, - 'gateway4': '10.10.10.253', - 'dhcp4': False, - 'addresses': ['10.10.10.1/24']}}}, - md["network-config"]) - self.assertIsNone(ud) - - def test_with_non_b64_network_config_enable_read_network(self): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - eng.vmware.com - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - props = {"network-config": network_config, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env, True) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertIsNone(ud) - - def test_with_b64_network_config_disable_read_network(self): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - eng.vmware.com - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertIsNone(ud) - - -class TestMarkerFiles(CiTestCase): - - def setUp(self): - super(TestMarkerFiles, self).setUp() - self.tdir = self.tmp_dir() - - def test_false_when_markerid_none(self): - """Return False when markerid provided is None.""" - self.assertFalse( - dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)) - - def test_markerid_file_exist(self): - """Return False when markerid file path does not exist, - True otherwise.""" - self.assertFalse( - dsovf.check_marker_exists('123', self.tdir)) - - marker_file = self.tmp_path('.markerfile-123.txt', self.tdir) - util.write_file(marker_file, '') - self.assertTrue( - dsovf.check_marker_exists('123', self.tdir) - ) - - def test_marker_file_setup(self): - """Test creation of marker files.""" - markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir) - self.assertFalse(os.path.exists(markerfilepath)) - dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir) - self.assertTrue(os.path.exists(markerfilepath)) - - -class TestDatasourceOVF(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestDatasourceOVF, self).setUp() - self.datasource = dsovf.DataSourceOVF - self.tdir = self.tmp_dir() - - def test_get_data_false_on_none_dmi_data(self): - """When dmi for system-product-name is None, get_data returns False.""" - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': None, - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: No system-product-name found', self.logs.getvalue()) - - def test_get_data_vmware_customization_disabled(self): - """When vmware customization is disabled via sys_cfg and - allow_raw_data is disabled via ds_cfg, log a message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True, - 'datasource': {'OVF': {'allow_raw_data': False}}}, - distro={}, paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [MISC] - MARKER-ID = 12345345 - """) - util.write_file(conf_file, conf_content) - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: Customization for VMware platform is disabled.', - self.logs.getvalue()) - - def test_get_data_vmware_customization_sys_cfg_disabled(self): - """When vmware customization is disabled via sys_cfg and - no meta data is found, log a message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True, - 'datasource': {'OVF': {'allow_raw_data': True}}}, - distro={}, paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [MISC] - MARKER-ID = 12345345 - """) - util.write_file(conf_file, conf_content) - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: Customization using VMware config is disabled.', - self.logs.getvalue()) - - def test_get_data_allow_raw_data_disabled(self): - """When allow_raw_data is disabled via ds_cfg and - meta data is found, log a message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False, - 'datasource': {'OVF': {'allow_raw_data': False}}}, - distro={}, paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - util.write_file(metadata_file, "This is meta data") - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: Customization using raw data is disabled.', - self.logs.getvalue()) - - def test_get_data_vmware_customization_enabled(self): - """When cloud-init workflow for vmware is enabled via sys_cfg log a - message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345345 - """) - util.write_file(conf_file, conf_content) - with mock.patch(MPATH + 'get_tools_config', return_value='true'): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) - - def test_get_data_cust_script_disabled(self): - """If custom script is disabled by VMware tools configuration, - raise a RuntimeError. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - """) - util.write_file(conf_file, conf_content) - # Prepare the custom sript - customscript = self.tmp_path('test-script', self.tdir) - util.write_file(customscript, "This is the post cust script") - - with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(RuntimeError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - self.assertIn('Custom script is disabled by VM Administrator', - str(context.exception)) - - def test_get_data_cust_script_enabled(self): - """If custom script is enabled by VMware tools configuration, - execute the script. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - """) - util.write_file(conf_file, conf_content) - - # Mock custom script is enabled by return true when calling - # get_tools_config - with mock.patch(MPATH + 'get_tools_config', return_value="true"): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - # Verify custom script is trying to be executed - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) - - def test_get_data_force_run_post_script_is_yes(self): - """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if - enable-custom-scripts is not defined in VM Tools configuration - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts - # default value is TRUE - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - DEFAULT-RUN-POST-CUST-SCRIPT = yes - """) - util.write_file(conf_file, conf_content) - - # Mock get_tools_config(section, key, defaultVal) to return - # defaultVal - def my_get_tools_config(*args, **kwargs): - return args[2] - - with mock.patch(MPATH + 'get_tools_config', - side_effect=my_get_tools_config): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - # Verify custom script still runs although it is - # disabled by VMware Tools - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) - - def test_get_data_non_vmware_seed_platform_info(self): - """Platform info properly reports when on non-vmware platforms.""" - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) - # Write ovf-env.xml seed file - seed_dir = self.tmp_path('seed', dir=self.tdir) - ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) - util.write_file(ovf_env, OVF_ENV_CONTENT) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - - self.assertEqual('ovf', ds.cloud_name) - self.assertEqual('ovf', ds.platform_type) - with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'): - with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: - with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: - m_iso9660.return_value = NOT_FOUND - m_guestd.return_value = NOT_FOUND - self.assertTrue(ds.get_data()) - self.assertEqual( - 'ovf (%s/seed/ovf-env.xml)' % self.tdir, - ds.subplatform) - - def test_get_data_vmware_seed_platform_info(self): - """Platform info properly reports when on VMware platform.""" - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) - # Write ovf-env.xml seed file - seed_dir = self.tmp_path('seed', dir=self.tdir) - ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) - util.write_file(ovf_env, OVF_ENV_CONTENT) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - - self.assertEqual('ovf', ds.cloud_name) - self.assertEqual('ovf', ds.platform_type) - with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'): - with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: - with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: - m_iso9660.return_value = NOT_FOUND - m_guestd.return_value = NOT_FOUND - self.assertTrue(ds.get_data()) - self.assertEqual( - 'vmware (%s/seed/ovf-env.xml)' % self.tdir, - ds.subplatform) - - @mock.patch('cloudinit.subp.subp') - @mock.patch('cloudinit.sources.DataSource.persist_instance_data') - def test_get_data_vmware_guestinfo_with_network_config( - self, m_persist, m_subp - ): - self._test_get_data_with_network_config(guestinfo=False, iso=True) - - @mock.patch('cloudinit.subp.subp') - @mock.patch('cloudinit.sources.DataSource.persist_instance_data') - def test_get_data_iso9660_with_network_config(self, m_persist, m_subp): - self._test_get_data_with_network_config(guestinfo=True, iso=False) - - def _test_get_data_with_network_config(self, guestinfo, iso): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - with mock.patch(MPATH + 'transport_vmware_guestinfo', - return_value=env if guestinfo else NOT_FOUND): - with mock.patch(MPATH + 'transport_iso9660', - return_value=env if iso else NOT_FOUND): - self.assertTrue(ds.get_data()) - self.assertEqual('inst-001', ds.metadata['instance-id']) - self.assertEqual( - {'version': 2, 'ethernets': - {'nics': - {'nameservers': - {'addresses': ['127.0.0.53'], - 'search': ['vmware.com']}, - 'match': {'name': 'eth*'}, - 'gateway4': '10.10.10.253', - 'dhcp4': False, - 'addresses': ['10.10.10.1/24']}}}, - ds.network_config) - - def test_get_data_cloudinit_metadata_json(self): - """Test metadata can be loaded to cloud-init metadata and network. - The metadata format is json. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - { - "instance-id": "cloud-vm", - "local-hostname": "my-host.domain.com", - "network": { - "version": 2, - "ethernets": { - "eths": { - "match": { - "name": "ens*" - }, - "dhcp4": true - } - } - } - } - """) - util.write_file(metadata_file, metadata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], - 'get_nics_to_enable': ''}, - ds._get_data) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) - self.assertEqual(2, ds.network_config['version']) - self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) - - def test_get_data_cloudinit_metadata_yaml(self): - """Test metadata can be loaded to cloud-init metadata and network. - The metadata format is yaml. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """) - util.write_file(metadata_file, metadata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], - 'get_nics_to_enable': ''}, - ds._get_data) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) - self.assertEqual(2, ds.network_config['version']) - self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) - - def test_get_data_cloudinit_metadata_not_valid(self): - """Test metadata is not JSON or YAML format. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = "[This is not json or yaml format]a=b" - util.write_file(metadata_file, metadata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(YAMLError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [ - self.tdir + '/test-meta', '', '' - ], - 'get_nics_to_enable': ''}, - ds.get_data) - - self.assertIn("expected '', but found ''", - str(context.exception)) - - def test_get_data_cloudinit_metadata_not_found(self): - """Test metadata file can't be found. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Don't prepare the meta data file - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(FileNotFoundError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - - self.assertIn('is not found', str(context.exception)) - - def test_get_data_cloudinit_userdata(self): - """Test user data can be loaded to cloud-init user data. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - USERDATA = test-user - """) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """) - util.write_file(metadata_file, metadata_content) - - # Prepare the user data file - userdata_file = self.tmp_path('test-user', self.tdir) - userdata_content = "This is the user data" - util.write_file(userdata_file, userdata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', - self.tdir + '/test-user', ''], - 'get_nics_to_enable': ''}, - ds._get_data) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual(userdata_content, ds.userdata_raw) - - def test_get_data_cloudinit_userdata_not_found(self): - """Test userdata file can't be found. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - USERDATA = test-user - """) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """) - util.write_file(metadata_file, metadata_content) - - # Don't prepare the user data file - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(FileNotFoundError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - - self.assertIn('is not found', str(context.exception)) - - -class TestTransportIso9660(CiTestCase): - - def setUp(self): - super(TestTransportIso9660, self).setUp() - self.add_patch('cloudinit.util.find_devs_with', - 'm_find_devs_with') - self.add_patch('cloudinit.util.mounts', 'm_mounts') - self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb') - self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env', - 'm_get_ovf_env') - self.m_get_ovf_env.return_value = ('myfile', 'mycontent') - - def test_find_already_mounted(self): - """Check we call get_ovf_env from on matching mounted devices""" - mounts = { - '/dev/sr9': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', - } - } - self.m_mounts.return_value = mounts - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - - def test_find_already_mounted_skips_non_iso9660(self): - """Check we call get_ovf_env ignoring non iso9660""" - mounts = { - '/dev/xvdb': { - 'fstype': 'vfat', - 'mountpoint': 'wark/foobar', - 'opts': 'defaults,noatime', - }, - '/dev/xvdc': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', - } - } - # We use an OrderedDict here to ensure we check xvdb before xvdc - # as we're not mocking the regex matching, however, if we place - # an entry in the results then we can be reasonably sure that - # we're skipping an entry which fails to match. - self.m_mounts.return_value = ( - OrderedDict(sorted(mounts.items(), key=lambda t: t[0]))) - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - - def test_find_already_mounted_matches_kname(self): - """Check we dont regex match on basename of the device""" - mounts = { - '/dev/foo/bar/xvdc': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', - } - } - # we're skipping an entry which fails to match. - self.m_mounts.return_value = mounts - - self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) - - def test_mount_cb_called_on_blkdevs_with_iso9660(self): - """Check we call mount_cb on blockdevs with iso9660 only""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/sr0'] - self.m_mount_cb.return_value = ("myfile", "mycontent") - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - self.m_mount_cb.assert_called_with( - "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") - - def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self): - """Check we call mount_cb on blockdevs with iso9660 and match regex""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = [ - '/dev/abc', '/dev/my-cdrom', '/dev/sr0'] - self.m_mount_cb.return_value = ("myfile", "mycontent") - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - self.m_mount_cb.assert_called_with( - "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") - - def test_mount_cb_not_called_no_matches(self): - """Check we don't call mount_cb if nothing matches""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/vg/myovf'] - - self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) - self.assertEqual(0, self.m_mount_cb.call_count) - - def test_mount_cb_called_require_iso_false(self): - """Check we call mount_cb on blockdevs with require_iso=False""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/xvdz'] - self.m_mount_cb.return_value = ("myfile", "mycontent") - - self.assertEqual( - "mycontent", dsovf.transport_iso9660(require_iso=False)) - - self.m_mount_cb.assert_called_with( - "/dev/xvdz", dsovf.get_ovf_env, mtype=None) - - def test_maybe_cdrom_device_none(self): - """Test maybe_cdrom_device returns False for none/empty input""" - self.assertFalse(dsovf.maybe_cdrom_device(None)) - self.assertFalse(dsovf.maybe_cdrom_device('')) - - def test_maybe_cdrom_device_non_string_exception(self): - """Test maybe_cdrom_device raises ValueError on non-string types""" - with self.assertRaises(ValueError): - dsovf.maybe_cdrom_device({'a': 'eleven'}) - - def test_maybe_cdrom_device_false_on_multi_dir_paths(self): - """Test maybe_cdrom_device is false on /dev[/.*]/* paths""" - self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) - - def test_maybe_cdrom_device_true_on_hd_partitions(self): - """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1')) - self.assertTrue(dsovf.maybe_cdrom_device('hdz9')) - - def test_maybe_cdrom_device_true_on_valid_relative_paths(self): - """Test maybe_cdrom_device normalizes paths""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9')) - self.assertTrue(dsovf.maybe_cdrom_device('///sr0')) - self.assertTrue(dsovf.maybe_cdrom_device('/sr0')) - self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda')) - - def test_maybe_cdrom_device_true_on_xvd_partitions(self): - """Test maybe_cdrom_device returns true on xvd*""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda')) - self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1')) - self.assertTrue(dsovf.maybe_cdrom_device('xvdza1')) - - -@mock.patch(MPATH + "subp.which") -@mock.patch(MPATH + "subp.subp") -class TestTransportVmwareGuestinfo(CiTestCase): - """Test the com.vmware.guestInfo transport implemented in - transport_vmware_guestinfo.""" - - rpctool = 'vmware-rpctool' - with_logs = True - rpctool_path = '/not/important/vmware-rpctool' - - def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which): - m_which.return_value = None - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(0, m_subp.call_count, - "subp should not be called if no rpctool in path.") - - def test_notfound_on_exit_code_1(self, m_subp, m_which): - """If vmware-rpctool exits 1, then must return not found.""" - m_which.return_value = self.rpctool_path - m_subp.side_effect = subp.ProcessExecutionError( - stdout="", stderr="No value found", exit_code=1, cmd=["unused"]) - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - self.assertNotIn("WARNING", self.logs.getvalue(), - "exit code of 1 by rpctool should not cause warning.") - - def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): - """If vmware-rpctool exited 0 with no stdout is normal not-found. - - This isn't actually a case I've seen. normally on "not found", - rpctool would exit 1 with 'No value found' on stderr. But cover - the case where it exited 0 and just wrote nothing to stdout. - """ - m_which.return_value = self.rpctool_path - m_subp.return_value = ('', '') - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - - def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which): - """If vmware-rpctool exits non zero or 1, warnings should be logged.""" - m_which.return_value = self.rpctool_path - m_subp.side_effect = subp.ProcessExecutionError( - stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]) - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - self.assertIn("WARNING", self.logs.getvalue(), - "exit code of 2 by rpctool should log WARNING.") - - def test_found_when_guestinfo_present(self, m_subp, m_which): - """When there is a ovf info, transport should return it.""" - m_which.return_value = self.rpctool_path - content = fill_properties({}) - m_subp.return_value = (content, '') - self.assertEqual(content, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - -# -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py deleted file mode 100644 index d017510e..00000000 --- a/tests/unittests/test_datasource/test_rbx.py +++ /dev/null @@ -1,238 +0,0 @@ -import json - -from cloudinit import helpers -from cloudinit import distros -from cloudinit.sources import DataSourceRbxCloud as ds -from cloudinit.tests.helpers import mock, CiTestCase, populate_dir -from cloudinit import subp - -DS_PATH = "cloudinit.sources.DataSourceRbxCloud" - -CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ - "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ - "tToyGP41.s1" - -CLOUD_METADATA = { - "vm": { - "memory": 4, - "cpu": 2, - "name": "vm-image-builder", - "_id": "5beab44f680cffd11f0e60fc" - }, - "additionalMetadata": { - "username": "guru", - "sshKeys": ["ssh-rsa ..."], - "password": { - "sha512": CRYPTO_PASS - } - }, - "disk": [ - {"size": 10, "type": "ssd", - "name": "vm-image-builder-os", - "_id": "5beab450680cffd11f0e60fe"}, - {"size": 2, "type": "ssd", - "name": "ubuntu-1804-bionic", - "_id": "5bef002c680cffd11f107590"} - ], - "netadp": [ - { - "ip": [{"address": "62.181.8.174"}], - "network": { - "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, - "routing": [], - "gateway": "62.181.8.1", - "netmask": "255.255.248.0", - "name": "public", - "type": "public", - "_id": "5784e97be2627505227b578c" - }, - "speed": 1000, - "type": "hv", - "macaddress": "00:15:5D:FF:0F:03", - "_id": "5beab450680cffd11f0e6102" - }, - { - "ip": [{"address": "10.209.78.11"}], - "network": { - "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, - "routing": [], - "gateway": "10.209.78.1", - "netmask": "255.255.255.0", - "name": "network-determined-bardeen", - "type": "private", - "_id": "5beaec64680cffd11f0e7c31" - }, - "speed": 1000, - "type": "hv", - "macaddress": "00:15:5D:FF:0F:24", - "_id": "5bec18c6680cffd11f0f0d8b" - } - ], - "dvddrive": [{"iso": {}}] -} - - -class TestRbxDataSource(CiTestCase): - parsed_user = None - allowed_subp = ['bash'] - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def setUp(self): - super(TestRbxDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp} - ) - - # defaults for few tests - self.ds = ds.DataSourceRbxCloud - self.seed_dir = self.paths.seed_dir - self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} - - def test_seed_read_user_data_callback_empty_file(self): - populate_user_metadata(self.seed_dir, '') - populate_cloud_metadata(self.seed_dir, {}) - results = ds.read_user_data_callback(self.seed_dir) - - self.assertIsNone(results) - - def test_seed_read_user_data_callback_valid_disk(self): - populate_user_metadata(self.seed_dir, '') - populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) - results = ds.read_user_data_callback(self.seed_dir) - - self.assertNotEqual(results, None) - self.assertTrue('userdata' in results) - self.assertTrue('metadata' in results) - self.assertTrue('cfg' in results) - - def test_seed_read_user_data_callback_userdata(self): - userdata = "#!/bin/sh\nexit 1" - populate_user_metadata(self.seed_dir, userdata) - populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) - - results = ds.read_user_data_callback(self.seed_dir) - - self.assertNotEqual(results, None) - self.assertTrue('userdata' in results) - self.assertEqual(results['userdata'], userdata) - - def test_generate_network_config(self): - expected = { - 'version': 1, - 'config': [ - { - 'subnets': [ - {'control': 'auto', - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'netmask': '255.255.248.0', - 'address': '62.181.8.174', - 'type': 'static', 'gateway': '62.181.8.1'} - ], - 'type': 'physical', - 'name': 'eth0', - 'mac_address': '00:15:5d:ff:0f:03' - }, - { - 'subnets': [ - {'control': 'auto', - 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], - 'netmask': '255.255.255.0', - 'address': '10.209.78.11', - 'type': 'static', - 'gateway': '10.209.78.1'} - ], - 'type': 'physical', - 'name': 'eth1', - 'mac_address': '00:15:5d:ff:0f:24' - } - ] - } - self.assertTrue( - ds.generate_network_config(CLOUD_METADATA['netadp']), - expected - ) - - @mock.patch(DS_PATH + '.subp.subp') - def test_gratuitous_arp_run_standard_arping(self, m_subp): - """Test handle run arping & parameters.""" - items = [ - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104' - }, - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104', - }, - ] - ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) - self.assertEqual([ - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]), - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]) - ], m_subp.call_args_list - ) - - @mock.patch(DS_PATH + '.subp.subp') - def test_handle_rhel_like_arping(self, m_subp): - """Test handle on RHEL-like distros.""" - items = [ - { - 'source': '172.16.6.104', - 'destination': '172.17.0.2', - } - ] - ds.gratuitous_arp(items, self._fetch_distro('fedora')) - self.assertEqual([ - mock.call( - ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] - )], - m_subp.call_args_list - ) - - @mock.patch( - DS_PATH + '.subp.subp', - side_effect=subp.ProcessExecutionError() - ) - def test_continue_on_arping_error(self, m_subp): - """Continue when command error""" - items = [ - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104' - }, - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104', - }, - ] - ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) - self.assertEqual([ - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]), - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]) - ], m_subp.call_args_list - ) - - -def populate_cloud_metadata(path, data): - populate_dir(path, {'cloud.json': json.dumps(data)}) - - -def populate_user_metadata(path, data): - populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py deleted file mode 100644 index f9e968c5..00000000 --- a/tests/unittests/test_datasource/test_scaleway.py +++ /dev/null @@ -1,473 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import json - -import httpretty -import requests - -from cloudinit import helpers -from cloudinit import settings -from cloudinit import sources -from cloudinit.sources import DataSourceScaleway - -from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase - - -class DataResponses(object): - """ - Possible responses of the API endpoint - 169.254.42.42/user_data/cloud-init and - 169.254.42.42/vendor_data/cloud-init. - """ - - FAKE_USER_DATA = '#!/bin/bash\necho "user-data"' - - @staticmethod - def rate_limited(method, uri, headers): - return 429, headers, '' - - @staticmethod - def api_error(method, uri, headers): - return 500, headers, '' - - @classmethod - def get_ok(cls, method, uri, headers): - return 200, headers, cls.FAKE_USER_DATA - - @staticmethod - def empty(method, uri, headers): - """ - No user data for this server. - """ - return 404, headers, '' - - -class MetadataResponses(object): - """ - Possible responses of the metadata API. - """ - - FAKE_METADATA = { - 'id': '00000000-0000-0000-0000-000000000000', - 'hostname': 'scaleway.host', - 'tags': [ - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ], - 'ssh_public_keys': [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - } - - @classmethod - def get_ok(cls, method, uri, headers): - return 200, headers, json.dumps(cls.FAKE_METADATA) - - -class TestOnScaleway(CiTestCase): - - def setUp(self): - super(TestOnScaleway, self).setUp() - self.tmp = self.tmp_dir() - - def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): - mock, faked = fake_dmi - mock.return_value = 'Scaleway' if faked else 'Whatever' - - mock, faked = fake_file_exists - mock.return_value = faked - - mock, faked = fake_cmdline - mock.return_value = \ - 'initrd=initrd showopts scaleway nousb' if faked \ - else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - self.install_mocks( - fake_dmi=(m_read_dmi_data, False), - fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, False) - ) - self.assertFalse(DataSourceScaleway.on_scaleway()) - - # When not on Scaleway, get_data() returns False. - datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) - ) - self.assertFalse(datasource.get_data()) - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - """ - dmidecode returns "Scaleway". - """ - # dmidecode returns "Scaleway" - self.install_mocks( - fake_dmi=(m_read_dmi_data, True), - fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, False) - ) - self.assertTrue(DataSourceScaleway.on_scaleway()) - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - """ - /var/run/scaleway exists. - """ - self.install_mocks( - fake_dmi=(m_read_dmi_data, False), - fake_file_exists=(m_file_exists, True), - fake_cmdline=(m_get_cmdline, False) - ) - self.assertTrue(DataSourceScaleway.on_scaleway()) - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - """ - "scaleway" in /proc/cmdline. - """ - self.install_mocks( - fake_dmi=(m_read_dmi_data, False), - fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, True) - ) - self.assertTrue(DataSourceScaleway.on_scaleway()) - - -def get_source_address_adapter(*args, **kwargs): - """ - Scaleway user/vendor data API requires to be called with a privileged port. - - If the unittests are run as non-root, the user doesn't have the permission - to bind on ports below 1024. - - This function removes the bind on a privileged address, since anyway the - HTTP call is mocked by httpretty. - """ - kwargs.pop('source_address') - return requests.adapters.HTTPAdapter(*args, **kwargs) - - -class TestDataSourceScaleway(HttprettyTestCase): - - def setUp(self): - tmp = self.tmp_dir() - self.datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) - ) - super(TestDataSourceScaleway, self).setUp() - - self.metadata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] - self.userdata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] - self.vendordata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] - - self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', - '_m_on_scaleway', return_value=True) - self.add_patch( - 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', - '_m_find_fallback_nic', return_value='scalewaynic0') - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) - def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): - """ - get_data() returns metadata, user data and vendor data. - """ - m_get_cmdline.return_value = 'scaleway' - - # Make user data API return a valid response - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.userdata_url, - body=DataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.get_ok) - self.datasource.get_data() - - self.assertEqual(self.datasource.get_instance_id(), - MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - self.assertEqual(self.datasource.get_hostname(), - MetadataResponses.FAKE_METADATA['hostname']) - self.assertEqual(self.datasource.get_userdata_raw(), - DataResponses.FAKE_USER_DATA) - self.assertEqual(self.datasource.get_vendordata_raw(), - DataResponses.FAKE_USER_DATA) - self.assertIsNone(self.datasource.availability_zone) - self.assertIsNone(self.datasource.region) - self.assertEqual(sleep.call_count, 0) - - def test_ssh_keys_empty(self): - """ - get_public_ssh_keys() should return empty list if no ssh key are - available - """ - self.datasource.metadata['tags'] = [] - self.datasource.metadata['ssh_public_keys'] = [] - self.assertEqual(self.datasource.get_public_ssh_keys(), []) - - def test_ssh_keys_only_tags(self): - """ - get_public_ssh_keys() should return list of keys available in tags - """ - self.datasource.metadata['tags'] = [ - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", - ] - self.datasource.metadata['ssh_public_keys'] = [] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - ].sort()) - - def test_ssh_keys_only_conf(self): - """ - get_public_ssh_keys() should return list of keys available in - ssh_public_keys field - """ - self.datasource.metadata['tags'] = [] - self.datasource.metadata['ssh_public_keys'] = [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - - def test_ssh_keys_both(self): - """ - get_public_ssh_keys() should return a merge of keys available - in ssh_public_keys and tags - """ - self.datasource.metadata['tags'] = [ - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ] - - self.datasource.metadata['ssh_public_keys'] = [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) - def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): - """ - get_data() returns metadata, but no user data nor vendor data. - """ - m_get_cmdline.return_value = 'scaleway' - - # Make user and vendor data APIs return HTTP/404, which means there is - # no user / vendor data for the server. - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.userdata_url, - body=DataResponses.empty) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.empty) - self.datasource.get_data() - self.assertIsNone(self.datasource.get_userdata_raw()) - self.assertIsNone(self.datasource.get_vendordata_raw()) - self.assertEqual(sleep.call_count, 0) - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) - def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): - """ - get_data() is rate limited two times by the metadata API when fetching - user data. - """ - m_get_cmdline.return_value = 'scaleway' - - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.empty) - - httpretty.register_uri( - httpretty.GET, self.userdata_url, - responses=[ - httpretty.Response(body=DataResponses.rate_limited), - httpretty.Response(body=DataResponses.rate_limited), - httpretty.Response(body=DataResponses.get_ok), - ] - ) - self.datasource.get_data() - self.assertEqual(self.datasource.get_userdata_raw(), - DataResponses.FAKE_USER_DATA) - self.assertEqual(sleep.call_count, 2) - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_ok(self, m_get_cmdline, fallback_nic): - """ - network_config will only generate IPv4 config if no ipv6 data is - available in the metadata - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None - - netcfg = self.datasource.network_config - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] - } - ] - } - self.assertEqual(netcfg, resp) - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): - """ - network_config will only generate IPv4/v6 configs if ipv6 data is - available in the metadata - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = { - 'address': '2000:abc:4444:9876::42:999', - 'gateway': '2000:abc:4444:9876::42:000', - 'netmask': '127', - } - - netcfg = self.datasource.network_config - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [ - { - 'type': 'dhcp4' - }, - { - 'type': 'static', - 'address': '2000:abc:4444:9876::42:999', - 'gateway': '2000:abc:4444:9876::42:000', - 'netmask': '127', - } - ] - } - ] - } - self.assertEqual(netcfg, resp) - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_existing(self, m_get_cmdline, fallback_nic): - """ - network_config() should return the same data if a network config - already exists - """ - m_get_cmdline.return_value = 'scaleway' - self.datasource._network_config = '0xdeadbeef' - - netcfg = self.datasource.network_config - self.assertEqual(netcfg, '0xdeadbeef') - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_unset(self, m_get_cmdline, fallback_nic): - """ - _network_config will be set to sources.UNSET after the first boot. - Make sure it behave correctly. - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None - self.datasource._network_config = sources.UNSET - - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] - } - ] - } - - netcfg = self.datasource.network_config - self.assertEqual(netcfg, resp) - - @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, - logwarning): - """ - network_config() should return config data if cached data is None - rather than sources.UNSET - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None - self.datasource._network_config = None - - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] - } - ] - } - - netcfg = self.datasource.network_config - self.assertEqual(netcfg, resp) - logwarning.assert_called_with('Found None as cached _network_config. ' - 'Resetting to %s', sources.UNSET) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py deleted file mode 100644 index 9c499672..00000000 --- a/tests/unittests/test_datasource/test_smartos.py +++ /dev/null @@ -1,1163 +0,0 @@ -# Copyright (C) 2013 Canonical Ltd. -# Copyright 2019 Joyent, Inc. -# -# Author: Ben Howard -# -# This file is part of cloud-init. See LICENSE file for license information. - -'''This is a testcase for the SmartOS datasource. - -It replicates a serial console and acts like the SmartOS console does in -order to validate return responses. - -''' - -from binascii import crc32 -import json -import multiprocessing -import os -import os.path -import re -import signal -import stat -import unittest -import uuid - -from cloudinit import serial -from cloudinit.sources import DataSourceSmartOS -from cloudinit.sources.DataSourceSmartOS import ( - convert_smartos_network_data as convert_net, - SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, - identify_file) -from cloudinit.event import EventScope, EventType - -from cloudinit import helpers as c_helpers -from cloudinit.util import (b64e, write_file) -from cloudinit.subp import (subp, ProcessExecutionError, which) - -from cloudinit.tests.helpers import ( - CiTestCase, mock, FilesystemMockingTestCase, skipIf) - - -try: - import serial as _pyserial - assert _pyserial # avoid pyflakes error F401: import unused - HAS_PYSERIAL = True -except ImportError: - HAS_PYSERIAL = False - -DSMOS = 'cloudinit.sources.DataSourceSmartOS' -SDC_NICS = json.loads(""" -[ - { - "nic_tag": "external", - "primary": true, - "mtu": 1500, - "model": "virtio", - "gateway": "8.12.42.1", - "netmask": "255.255.255.0", - "ip": "8.12.42.102", - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "gateways": [ - "8.12.42.1" - ], - "vlan_id": 324, - "mac": "90:b8:d0:f5:e4:f5", - "interface": "net0", - "ips": [ - "8.12.42.102/24" - ] - }, - { - "nic_tag": "sdc_overlay/16187209", - "gateway": "192.168.128.1", - "model": "virtio", - "mac": "90:b8:d0:a5:ff:cd", - "netmask": "255.255.252.0", - "ip": "192.168.128.93", - "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9", - "gateways": [ - "192.168.128.1" - ], - "vlan_id": 2, - "mtu": 8500, - "interface": "net1", - "ips": [ - "192.168.128.93/22" - ] - } -] -""") - - -SDC_NICS_ALT = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "8.12.42.51/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "10.210.1.217/24" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_DHCP = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "8.12.42.51/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "dhcp" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_MIP = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "8.12.42.51/24", - "8.12.42.52/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "10.210.1.217/24", - "10.210.1.151/24" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_MIP_IPV6 = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64", - "8.12.42.51/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "10.210.1.217/24" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_IPV4_IPV6 = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": ["8.12.42.1", "2001::1", "2001::2"], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64", - "8.12.42.52/32"], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": ["10.210.1.217/24"], - "gateways": ["10.210.1.210"], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_SINGLE_GATEWAY = json.loads(""" -[ - { - "interface":"net0", - "mac":"90:b8:d0:d8:82:b4", - "vlan_id":324, - "nic_tag":"external", - "gateway":"8.12.42.1", - "gateways":["8.12.42.1"], - "netmask":"255.255.255.0", - "ip":"8.12.42.26", - "ips":["8.12.42.26/24"], - "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model":"virtio", - "mtu":1500, - "primary":true - }, - { - "interface":"net1", - "mac":"90:b8:d0:0a:51:31", - "vlan_id":600, - "nic_tag":"internal", - "netmask":"255.255.255.0", - "ip":"10.210.1.27", - "ips":["10.210.1.27/24"], - "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model":"virtio", - "mtu":1500 - } -] -""") - - -MOCK_RETURNS = { - 'hostname': 'test-host', - 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', - 'disable_iptables_flag': None, - 'enable_motd_sys_info': None, - 'test-var1': 'some data', - 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), - 'sdc:datacenter_name': 'somewhere2', - 'sdc:operator-script': '\n'.join(['bin/true', '']), - 'sdc:uuid': str(uuid.uuid4()), - 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), - 'user-data': '\n'.join(['something', '']), - 'user-script': '\n'.join(['/bin/true', '']), - 'sdc:nics': json.dumps(SDC_NICS), -} - -DMI_DATA_RETURN = 'smartdc' - -# Useful for calculating the length of a frame body. A SUCCESS body will be -# followed by more characters or be one character less if SUCCESS with no -# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. -SUCCESS_LEN = len('0123abcd SUCCESS ') -NOTFOUND_LEN = len('0123abcd NOTFOUND') - - -class PsuedoJoyentClient(object): - def __init__(self, data=None): - if data is None: - data = MOCK_RETURNS.copy() - self.data = data - self._is_open = False - return - - def get(self, key, default=None, strip=False): - if key in self.data: - r = self.data[key] - if strip: - r = r.strip() - else: - r = default - return r - - def get_json(self, key, default=None): - result = self.get(key, default=default) - if result is None: - return default - return json.loads(result) - - def exists(self): - return True - - def open_transport(self): - assert(not self._is_open) - self._is_open = True - - def close_transport(self): - assert(self._is_open) - self._is_open = False - - -class TestSmartOSDataSource(FilesystemMockingTestCase): - jmc_cfact = None - get_smartos_environ = None - - def setUp(self): - super(TestSmartOSDataSource, self).setUp() - - self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") - self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") - self.legacy_user_d = self.tmp_path('legacy_user_tmp') - os.mkdir(self.legacy_user_d) - self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", - autospec=False, new=self.legacy_user_d) - self.add_patch(DSMOS + ".identify_file", "m_identify_file", - return_value="text/plain") - - def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, - sys_cfg=None, ds_cfg=None): - self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) - self.get_smartos_environ.return_value = mode - - tmpd = self.tmp_dir() - dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), - 'run_dir': self.tmp_path('run_dir')} - for d in dirs.values(): - os.mkdir(d) - paths = c_helpers.Paths(dirs) - - if sys_cfg is None: - sys_cfg = {} - - if ds_cfg is not None: - sys_cfg['datasource'] = sys_cfg.get('datasource', {}) - sys_cfg['datasource']['SmartOS'] = ds_cfg - - return DataSourceSmartOS.DataSourceSmartOS( - sys_cfg, distro=None, paths=paths) - - def test_no_base64(self): - ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} - dsrc = self._get_ds(ds_cfg=ds_cfg) - ret = dsrc.get_data() - self.assertTrue(ret) - - def test_uuid(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['sdc:uuid'], - dsrc.metadata['instance-id']) - - def test_platform_info(self): - """All platform-related attributes are properly set.""" - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - self.assertEqual('joyent', dsrc.cloud_name) - self.assertEqual('joyent', dsrc.platform_type) - self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform) - - def test_root_keys(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['root_authorized_keys'], - dsrc.metadata['public-keys']) - - def test_hostname_b64(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['hostname'], - dsrc.metadata['local-hostname']) - - def test_hostname(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['hostname'], - dsrc.metadata['local-hostname']) - - def test_hostname_if_no_sdc_hostname(self): - my_returns = MOCK_RETURNS.copy() - my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['hostname'], - dsrc.metadata['local-hostname']) - - def test_sdc_hostname_if_no_hostname(self): - my_returns = MOCK_RETURNS.copy() - my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] - del my_returns['hostname'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['sdc:hostname'], - dsrc.metadata['local-hostname']) - - def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): - my_returns = MOCK_RETURNS.copy() - del my_returns['hostname'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['sdc:uuid'], - dsrc.metadata['local-hostname']) - - def test_userdata(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-data'], - dsrc.metadata['legacy-user-data']) - self.assertEqual(MOCK_RETURNS['cloud-init:user-data'], - dsrc.userdata_raw) - - def test_sdc_nics(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']), - dsrc.metadata['network-data']) - - def test_sdc_scripts(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-script'], - dsrc.metadata['user-script']) - - legacy_script_f = "%s/user-script" % self.legacy_user_d - print("legacy_script_f=%s" % legacy_script_f) - self.assertTrue(os.path.exists(legacy_script_f)) - self.assertTrue(os.path.islink(legacy_script_f)) - user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] - self.assertEqual(user_script_perm, '700') - - def test_scripts_shebanged(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-script'], - dsrc.metadata['user-script']) - - legacy_script_f = "%s/user-script" % self.legacy_user_d - self.assertTrue(os.path.exists(legacy_script_f)) - self.assertTrue(os.path.islink(legacy_script_f)) - shebang = None - with open(legacy_script_f, 'r') as f: - shebang = f.readlines()[0].strip() - self.assertEqual(shebang, "#!/bin/bash") - user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] - self.assertEqual(user_script_perm, '700') - - def test_scripts_shebang_not_added(self): - """ - Test that the SmartOS requirement that plain text scripts - are executable. This test makes sure that plain texts scripts - with out file magic have it added appropriately by cloud-init. - """ - - my_returns = MOCK_RETURNS.copy() - my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', - 'print("hi")', '']) - - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['user-script'], - dsrc.metadata['user-script']) - - legacy_script_f = "%s/user-script" % self.legacy_user_d - self.assertTrue(os.path.exists(legacy_script_f)) - self.assertTrue(os.path.islink(legacy_script_f)) - shebang = None - with open(legacy_script_f, 'r') as f: - shebang = f.readlines()[0].strip() - self.assertEqual(shebang, "#!/usr/bin/perl") - - def test_userdata_removed(self): - """ - User-data in the SmartOS world is supposed to be written to a file - each and every boot. This tests to make sure that in the event the - legacy user-data is removed, the existing user-data is backed-up - and there is no /var/db/user-data left. - """ - - user_data_f = "%s/mdata-user-data" % self.legacy_user_d - with open(user_data_f, 'w') as f: - f.write("PREVIOUS") - - my_returns = MOCK_RETURNS.copy() - del my_returns['user-data'] - - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertFalse(dsrc.metadata.get('legacy-user-data')) - - found_new = False - for root, _dirs, files in os.walk(self.legacy_user_d): - for name in files: - name_f = os.path.join(root, name) - permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] - if re.match(r'.*\/mdata-user-data$', name_f): - found_new = True - print(name_f) - self.assertEqual(permissions, '400') - - self.assertFalse(found_new) - - def test_vendor_data_not_default(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['sdc:vendor-data'], - dsrc.metadata['vendor-data']) - - def test_default_vendor_data(self): - my_returns = MOCK_RETURNS.copy() - def_op_script = my_returns['sdc:vendor-data'] - del my_returns['sdc:vendor-data'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data']) - - # we expect default vendor-data is a boothook - self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook")) - - def test_disable_iptables_flag(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['disable_iptables_flag'], - dsrc.metadata['iptables_disable']) - - def test_motd_sys_info(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'], - dsrc.metadata['motd_sys_info']) - - def test_default_ephemeral(self): - # Test to make sure that the builtin config has the ephemeral - # configuration. - dsrc = self._get_ds() - cfg = dsrc.get_config_obj() - - ret = dsrc.get_data() - self.assertTrue(ret) - - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) - - def test_override_disk_aliases(self): - # Test to make sure that the built-in DS is overriden - builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG - - mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}} - - # expect that these values are in builtin, or this is pointless - for k in mydscfg: - self.assertIn(k, builtin) - - dsrc = self._get_ds(ds_cfg=mydscfg) - ret = dsrc.get_data() - self.assertTrue(ret) - - self.assertEqual(mydscfg['disk_aliases']['FOO'], - dsrc.ds_cfg['disk_aliases']['FOO']) - - self.assertEqual(dsrc.device_name_to_device('FOO'), - mydscfg['disk_aliases']['FOO']) - - def test_reconfig_network_on_boot(self): - # Test to ensure that network is configured from metadata on each boot - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - self.assertSetEqual( - {EventType.BOOT_NEW_INSTANCE, - EventType.BOOT, - EventType.BOOT_LEGACY}, - dsrc.default_update_events[EventScope.NETWORK] - ) - - -class TestIdentifyFile(CiTestCase): - """Test the 'identify_file' utility.""" - @skipIf(not which("file"), "command 'file' not available.") - def test_file_happy_path(self): - """Test file is available and functional on plain text.""" - fname = self.tmp_path("myfile") - write_file(fname, "plain text content here\n") - with self.allow_subp(["file"]): - self.assertEqual("text/plain", identify_file(fname)) - - @mock.patch(DSMOS + ".subp.subp") - def test_returns_none_on_error(self, m_subp): - """On 'file' execution error, None should be returned.""" - m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) - fname = self.tmp_path("myfile") - write_file(fname, "plain text content here\n") - self.assertEqual(None, identify_file(fname)) - self.assertEqual( - [mock.call(["file", "--brief", "--mime-type", fname])], - m_subp.call_args_list) - - -class ShortReader(object): - """Implements a 'read' interface for bytes provided. - much like io.BytesIO but the 'endbyte' acts as if EOF. - When it is reached a short will be returned.""" - def __init__(self, initial_bytes, endbyte=b'\0'): - self.data = initial_bytes - self.index = 0 - self.len = len(self.data) - self.endbyte = endbyte - - @property - def emptied(self): - return self.index >= self.len - - def read(self, size=-1): - """Read size bytes but not past a null.""" - if size == 0 or self.index >= self.len: - return b'' - - rsize = size - if size < 0 or size + self.index > self.len: - rsize = self.len - self.index - - next_null = self.data.find(self.endbyte, self.index, rsize) - if next_null >= 0: - rsize = next_null - self.index + 1 - i = self.index - self.index += rsize - ret = self.data[i:i + rsize] - if len(ret) and ret[-1:] == self.endbyte: - ret = ret[:-1] - return ret - - -class TestJoyentMetadataClient(FilesystemMockingTestCase): - - invalid = b'invalid command\n' - failure = b'FAILURE\n' - v2_ok = b'V2_OK\n' - - def setUp(self): - super(TestJoyentMetadataClient, self).setUp() - - self.serial = mock.MagicMock(spec=serial.Serial) - self.request_id = 0xabcdef12 - self.metadata_value = 'value' - self.response_parts = { - 'command': 'SUCCESS', - 'crc': 'b5a9ff00', - 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), - 'payload': b64e(self.metadata_value), - 'request_id': '{0:08x}'.format(self.request_id), - } - - def make_response(): - payloadstr = '' - if 'payload' in self.response_parts: - payloadstr = ' {0}'.format(self.response_parts['payload']) - return ('V2 {length} {crc} {request_id} ' - '{command}{payloadstr}\n'.format( - payloadstr=payloadstr, - **self.response_parts).encode('ascii')) - - self.metasource_data = None - - def read_response(length): - if not self.metasource_data: - self.metasource_data = make_response() - self.metasource_data_len = len(self.metasource_data) - resp = self.metasource_data[:length] - self.metasource_data = self.metasource_data[length:] - return resp - - self.serial.read.side_effect = read_response - self.patched_funcs.enter_context( - mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', - mock.Mock(return_value=self.request_id))) - - def _get_client(self): - return DataSourceSmartOS.JoyentMetadataClient( - fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) - - def _get_serial_client(self): - self.serial.timeout = 1 - return DataSourceSmartOS.JoyentMetadataSerialClient(None, - fp=self.serial) - - def assertEndsWith(self, haystack, prefix): - self.assertTrue(haystack.endswith(prefix), - "{0} does not end with '{1}'".format( - repr(haystack), prefix)) - - def assertStartsWith(self, haystack, prefix): - self.assertTrue(haystack.startswith(prefix), - "{0} does not start with '{1}'".format( - repr(haystack), prefix)) - - def assertNoMoreSideEffects(self, obj): - self.assertRaises(StopIteration, obj) - - def test_get_metadata_writes_a_single_line(self): - client = self._get_client() - client.get('some_key') - self.assertEqual(1, self.serial.write.call_count) - written_line = self.serial.write.call_args[0][0] - self.assertEndsWith(written_line.decode('ascii'), - b'\n'.decode('ascii')) - self.assertEqual(1, written_line.count(b'\n')) - - def _get_written_line(self, key='some_key'): - client = self._get_client() - client.get(key) - return self.serial.write.call_args[0][0] - - def test_get_metadata_writes_bytes(self): - self.assertIsInstance(self._get_written_line(), bytes) - - def test_get_metadata_line_starts_with_v2(self): - foo = self._get_written_line() - self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) - - def test_get_metadata_uses_get_command(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - self.assertEqual('GET', parts[4]) - - def test_get_metadata_base64_encodes_argument(self): - key = 'my_key' - parts = self._get_written_line(key).decode('ascii').strip().split(' ') - self.assertEqual(b64e(key), parts[5]) - - def test_get_metadata_calculates_length_correctly(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - expected_length = len(' '.join(parts[3:])) - self.assertEqual(expected_length, int(parts[1])) - - def test_get_metadata_uses_appropriate_request_id(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - request_id = parts[3] - self.assertEqual(8, len(request_id)) - self.assertEqual(request_id, request_id.lower()) - - def test_get_metadata_uses_random_number_for_request_id(self): - line = self._get_written_line() - request_id = line.decode('ascii').strip().split(' ')[3] - self.assertEqual('{0:08x}'.format(self.request_id), request_id) - - def test_get_metadata_checksums_correctly(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - expected_checksum = '{0:08x}'.format( - crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff) - checksum = parts[2] - self.assertEqual(expected_checksum, checksum) - - def test_get_metadata_reads_a_line(self): - client = self._get_client() - client.get('some_key') - self.assertEqual(self.metasource_data_len, self.serial.read.call_count) - - def test_get_metadata_returns_valid_value(self): - client = self._get_client() - value = client.get('some_key') - self.assertEqual(self.metadata_value, value) - - def test_get_metadata_throws_exception_for_incorrect_length(self): - self.response_parts['length'] = 0 - client = self._get_client() - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') - - def test_get_metadata_throws_exception_for_incorrect_crc(self): - self.response_parts['crc'] = 'deadbeef' - client = self._get_client() - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') - - def test_get_metadata_throws_exception_for_request_id_mismatch(self): - self.response_parts['request_id'] = 'deadbeef' - client = self._get_client() - client._checksum = lambda _: self.response_parts['crc'] - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') - - def test_get_metadata_returns_None_if_value_not_found(self): - self.response_parts['payload'] = '' - self.response_parts['command'] = 'NOTFOUND' - self.response_parts['length'] = NOTFOUND_LEN - client = self._get_client() - client._checksum = lambda _: self.response_parts['crc'] - self.assertIsNone(client.get('some_key')) - - def test_negotiate(self): - client = self._get_client() - reader = ShortReader(self.v2_ok) - client.fp.read.side_effect = reader.read - client._negotiate() - self.assertTrue(reader.emptied) - - def test_negotiate_short_response(self): - client = self._get_client() - # chopped '\n' from v2_ok. - reader = ShortReader(self.v2_ok[:-1] + b'\0') - client.fp.read.side_effect = reader.read - self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, - client._negotiate) - self.assertTrue(reader.emptied) - - def test_negotiate_bad_response(self): - client = self._get_client() - reader = ShortReader(b'garbage\n' + self.v2_ok) - client.fp.read.side_effect = reader.read - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client._negotiate) - self.assertEqual(self.v2_ok, client.fp.read()) - - def test_serial_open_transport(self): - client = self._get_serial_client() - reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) - client.fp.read.side_effect = reader.read - client.open_transport() - self.assertTrue(reader.emptied) - - def test_flush_failure(self): - client = self._get_serial_client() - reader = ShortReader(b'garbage' + b'\0' + self.failure + - self.invalid + self.v2_ok) - client.fp.read.side_effect = reader.read - client.open_transport() - self.assertTrue(reader.emptied) - - def test_flush_many_timeouts(self): - client = self._get_serial_client() - reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) - client.fp.read.side_effect = reader.read - client.open_transport() - self.assertTrue(reader.emptied) - - def test_list_metadata_returns_list(self): - parts = ['foo', 'bar'] - value = b64e('\n'.join(parts)) - self.response_parts['payload'] = value - self.response_parts['crc'] = '40873553' - self.response_parts['length'] = SUCCESS_LEN + len(value) - client = self._get_client() - self.assertEqual(client.list(), parts) - - def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): - del self.response_parts['payload'] - self.response_parts['length'] = SUCCESS_LEN - 1 - self.response_parts['crc'] = '14e563ba' - client = self._get_client() - self.assertEqual(client.list(), []) - - -class TestNetworkConversion(CiTestCase): - def test_convert_simple(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.102/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '192.168.128.93/22'}], - 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} - found = convert_net(SDC_NICS) - self.assertEqual(expected, found) - - def test_convert_simple_alt(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_ALT) - self.assertEqual(expected, found) - - def test_convert_simple_dhcp(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'dhcp4'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_DHCP) - self.assertEqual(expected, found) - - def test_convert_simple_multi_ip(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}, - {'type': 'static', - 'address': '8.12.42.52/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}, - {'type': 'static', - 'address': '10.210.1.151/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_MIP) - self.assertEqual(expected, found) - - def test_convert_with_dns(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'dhcp4'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, - {'type': 'nameserver', - 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} - found = convert_net( - network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], - dns_domain="local") - self.assertEqual(expected, found) - - def test_convert_simple_multi_ipv6(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'address': - '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, - {'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_MIP_IPV6) - self.assertEqual(expected, found) - - def test_convert_simple_both_ipv4_ipv6(self): - expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', - 'type': 'static'}, - {'address': '8.12.42.51/24', - 'gateway': '8.12.42.1', - 'type': 'static'}, - {'address': '2001::11/64', 'type': 'static'}, - {'address': '8.12.42.52/32', 'type': 'static'}]}, - {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.217/24', - 'type': 'static'}]}]} - found = convert_net(SDC_NICS_IPV4_IPV6) - self.assertEqual(expected, found) - - def test_gateways_not_on_all_nics(self): - expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '8.12.42.26/24', - 'gateway': '8.12.42.1', 'type': 'static'}]}, - {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.27/24', - 'type': 'static'}]}]} - found = convert_net(SDC_NICS_SINGLE_GATEWAY) - self.assertEqual(expected, found) - - def test_routes_on_all_nics(self): - routes = [ - {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, - {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] - expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '8.12.42.26/24', - 'gateway': '8.12.42.1', 'type': 'static', - 'routes': [{'network': '3.0.0.0/8', - 'gateway': '8.12.42.3'}, - {'network': '4.0.0.0/8', - 'gateway': '10.210.1.4'}]}]}, - {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', - 'routes': [{'network': '3.0.0.0/8', - 'gateway': '8.12.42.3'}, - {'network': '4.0.0.0/8', - 'gateway': '10.210.1.4'}]}]}]} - found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) - self.maxDiff = None - self.assertEqual(expected, found) - - -@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, - "Only supported on KVM and bhyve guests under SmartOS") -@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), - "Requires write access to " + SERIAL_DEVICE) -@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available") -class TestSerialConcurrency(CiTestCase): - """ - This class tests locking on an actual serial port, and as such can only - be run in a kvm or bhyve guest running on a SmartOS host. A test run on - a metadata socket will not be valid because a metadata socket ensures - there is only one session over a connection. In contrast, in the - absence of proper locking multiple processes opening the same serial - port can corrupt each others' exchanges with the metadata server. - - This takes on the order of 2 to 3 minutes to run. - """ - allowed_subp = ['mdata-get'] - - def setUp(self): - self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) - self.mdata_proc.start() - super(TestSerialConcurrency, self).setUp() - - def tearDown(self): - # os.kill() rather than mdata_proc.terminate() to avoid console spam. - os.kill(self.mdata_proc.pid, signal.SIGKILL) - self.mdata_proc.join() - super(TestSerialConcurrency, self).tearDown() - - def start_mdata_loop(self): - """ - The mdata-get command is repeatedly run in a separate process so - that it may try to race with metadata operations performed in the - main test process. Use of mdata-get is better than two processes - using the protocol implementation in DataSourceSmartOS because we - are testing to be sure that cloud-init and mdata-get respect each - others locks. - """ - rcs = list(range(0, 256)) - while True: - subp(['mdata-get', 'sdc:routes'], rcs=rcs) - - def test_all_keys(self): - self.assertIsNotNone(self.mdata_proc.pid) - ds = DataSourceSmartOS - keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] - keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) - - client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) - self.assertIsNotNone(client) - - # The behavior that we are testing for was observed mdata-get running - # 10 times at roughly the same time as cloud-init fetched each key - # once. cloud-init would regularly see failures before making it - # through all keys once. - for _ in range(0, 3): - for key in keys: - # We don't care about the return value, just that it doesn't - # thrown any exceptions. - client.get(key) - - self.assertIsNone(self.mdata_proc.exitcode) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_upcloud.py b/tests/unittests/test_datasource/test_upcloud.py deleted file mode 100644 index cec48b4b..00000000 --- a/tests/unittests/test_datasource/test_upcloud.py +++ /dev/null @@ -1,314 +0,0 @@ -# Author: Antti Myyrä -# -# This file is part of cloud-init. See LICENSE file for license information. - -import json - -from cloudinit import helpers -from cloudinit import settings -from cloudinit import sources -from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ - DataSourceUpCloudLocal - -from cloudinit.tests.helpers import mock, CiTestCase - -UC_METADATA = json.loads(""" -{ - "cloud_name": "upcloud", - "instance_id": "00322b68-0096-4042-9406-faad61922128", - "hostname": "test.example.com", - "platform": "servers", - "subplatform": "metadata (http://169.254.169.254)", - "public_keys": [ - "ssh-rsa AAAAB.... test1@example.com", - "ssh-rsa AAAAB.... test2@example.com" - ], - "region": "fi-hel2", - "network": { - "interfaces": [ - { - "index": 1, - "ip_addresses": [ - { - "address": "94.237.105.53", - "dhcp": true, - "dns": [ - "94.237.127.9", - "94.237.40.9" - ], - "family": "IPv4", - "floating": false, - "gateway": "94.237.104.1", - "network": "94.237.104.0/22" - }, - { - "address": "94.237.105.50", - "dhcp": false, - "dns": null, - "family": "IPv4", - "floating": true, - "gateway": "", - "network": "94.237.105.50/32" - } - ], - "mac": "3a:d6:ba:4a:36:e7", - "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", - "type": "public" - }, - { - "index": 2, - "ip_addresses": [ - { - "address": "10.6.3.27", - "dhcp": true, - "dns": null, - "family": "IPv4", - "floating": false, - "gateway": "10.6.0.1", - "network": "10.6.0.0/22" - } - ], - "mac": "3a:d6:ba:4a:84:cc", - "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", - "type": "utility" - }, - { - "index": 3, - "ip_addresses": [ - { - "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", - "dhcp": true, - "dns": [ - "2a04:3540:53::1", - "2a04:3544:53::1" - ], - "family": "IPv6", - "floating": false, - "gateway": "2a04:3545:1000:720::1", - "network": "2a04:3545:1000:720::/64" - } - ], - "mac": "3a:d6:ba:4a:63:e7", - "network_id": "03000000-0000-4000-8046-000000000000", - "type": "public" - }, - { - "index": 4, - "ip_addresses": [ - { - "address": "172.30.1.10", - "dhcp": true, - "dns": null, - "family": "IPv4", - "floating": false, - "gateway": "172.30.1.1", - "network": "172.30.1.0/24" - } - ], - "mac": "3a:d6:ba:4a:8a:e1", - "network_id": "035a0a4a-7704-4de5-820d-189fc8132714", - "type": "private" - } - ], - "dns": [ - "94.237.127.9", - "94.237.40.9" - ] - }, - "storage": { - "disks": [ - { - "id": "014efb65-223b-4d44-8f0a-c29535b88dcf", - "serial": "014efb65223b4d448f0a", - "size": 10240, - "type": "disk", - "tier": "maxiops" - } - ] - }, - "tags": [], - "user_data": "", - "vendor_data": "" -} -""") - -UC_METADATA["user_data"] = b"""#cloud-config -runcmd: -- [touch, /root/cloud-init-worked ] -""" - -MD_URL = 'http://169.254.169.254/metadata/v1.json' - - -def _mock_dmi(): - return True, "00322b68-0096-4042-9406-faad61922128" - - -class TestUpCloudMetadata(CiTestCase): - """ - Test reading the meta-data - """ - def setUp(self): - super(TestUpCloudMetadata, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): - ds = DataSourceUpCloud( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - if get_sysinfo: - ds._get_sysinfo = get_sysinfo - return ds - - @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo') - def test_returns_false_not_on_upcloud(self, m_read_sysinfo): - m_read_sysinfo.return_value = (False, None) - ds = self.get_ds(get_sysinfo=None) - self.assertEqual(False, ds.get_data()) - self.assertTrue(m_read_sysinfo.called) - - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - def test_metadata(self, mock_readmd): - mock_readmd.return_value = UC_METADATA.copy() - - ds = self.get_ds() - ds.perform_dhcp_setup = False - - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw()) - self.assertEqual(UC_METADATA.get('vendor_data'), - ds.get_vendordata_raw()) - self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) - self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) - - self.assertEqual(UC_METADATA.get('public_keys'), - ds.get_public_ssh_keys()) - self.assertIsInstance(ds.get_public_ssh_keys(), list) - - -class TestUpCloudNetworkSetup(CiTestCase): - """ - Test reading the meta-data on networked context - """ - - def setUp(self): - super(TestUpCloudNetworkSetup, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): - ds = DataSourceUpCloudLocal( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - if get_sysinfo: - ds._get_sysinfo = get_sysinfo - return ds - - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - def test_network_configured_metadata(self, m_net, m_dhcp, - m_fallback_nic, mock_readmd): - mock_readmd.return_value = UC_METADATA.copy() - - m_fallback_nic.return_value = 'eth1' - m_dhcp.return_value = [{ - 'interface': 'eth1', 'fixed-address': '10.6.3.27', - 'routers': '10.6.0.1', 'subnet-mask': '22', - 'broadcast-address': '10.6.3.255'} - ] - - ds = self.get_ds() - - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(m_dhcp.called) - m_dhcp.assert_called_with('eth1', None) - - m_net.assert_called_once_with( - broadcast='10.6.3.255', interface='eth1', - ip='10.6.3.27', prefix_or_mask='22', - router='10.6.0.1', static_routes=None - ) - - self.assertTrue(mock_readmd.called) - - self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) - self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) - - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_network_configuration(self, m_get_by_mac, mock_readmd): - mock_readmd.return_value = UC_METADATA.copy() - - raw_ifaces = UC_METADATA.get('network').get('interfaces') - self.assertEqual(4, len(raw_ifaces)) - - m_get_by_mac.return_value = { - raw_ifaces[0].get('mac'): 'eth0', - raw_ifaces[1].get('mac'): 'eth1', - raw_ifaces[2].get('mac'): 'eth2', - raw_ifaces[3].get('mac'): 'eth3', - } - - ds = self.get_ds() - ds.perform_dhcp_setup = False - - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - netcfg = ds.network_config - - self.assertEqual(1, netcfg.get('version')) - - config = netcfg.get('config') - self.assertIsInstance(config, list) - self.assertEqual(5, len(config)) - self.assertEqual('physical', config[3].get('type')) - - self.assertEqual(raw_ifaces[2].get('mac'), config[2] - .get('mac_address')) - self.assertEqual(1, len(config[2].get('subnets'))) - self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0] - .get('type')) - - self.assertEqual(2, len(config[0].get('subnets'))) - self.assertEqual('static', config[0].get('subnets')[1].get('type')) - - dns = config[4] - self.assertEqual('nameserver', dns.get('type')) - self.assertEqual(2, len(dns.get('address'))) - self.assertEqual( - UC_METADATA.get('network').get('dns')[1], - dns.get('address')[1] - ) - - -class TestUpCloudDatasourceLoading(CiTestCase): - def test_get_datasource_list_returns_in_local(self): - deps = (sources.DEP_FILESYSTEM, ) - ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceUpCloudLocal]) - - def test_get_datasource_list_returns_in_normal(self): - deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) - ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceUpCloud]) - - def test_list_sources_finds_ds(self): - found = sources.list_sources( - ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), - ['cloudinit.sources']) - self.assertEqual([DataSourceUpCloud], - found) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py deleted file mode 100644 index 52f910b5..00000000 --- a/tests/unittests/test_datasource/test_vmware.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright (c) 2021 VMware, Inc. All Rights Reserved. -# -# Authors: Andrew Kutz -# -# This file is part of cloud-init. See LICENSE file for license information. - -import base64 -import gzip -import os - -import pytest - -from cloudinit import dmi, helpers, safeyaml -from cloudinit import settings -from cloudinit.sources import DataSourceVMware -from cloudinit.tests.helpers import ( - mock, - CiTestCase, - FilesystemMockingTestCase, - populate_dir, -) - - -PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" -PRODUCT_NAME = "VMware7,1" -PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" -REROOT_FILES = { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, -} - -VMW_MULTIPLE_KEYS = [ - "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", - "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", -] -VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" - -VMW_METADATA_YAML = """instance-id: cloud-vm -local-hostname: cloud-vm -network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes -""" - -VMW_USERDATA_YAML = """## template: jinja -#cloud-config -users: -- default -""" - -VMW_VENDORDATA_YAML = """## template: jinja -#cloud-config -runcmd: -- echo "Hello, world." -""" - - -@pytest.yield_fixture(autouse=True) -def common_patches(): - with mock.patch('cloudinit.util.platform.platform', return_value='Linux'): - with mock.patch.multiple( - 'cloudinit.dmi', - is_container=mock.Mock(return_value=False), - is_FreeBSD=mock.Mock(return_value=False) - ): - yield - - -class TestDataSourceVMware(CiTestCase): - """ - Test common functionality that is not transport specific. - """ - - def setUp(self): - super(TestDataSourceVMware, self).setUp() - self.tmp = self.tmp_dir() - - def test_no_data_access_method(self): - ds = get_ds(self.tmp) - ds.vmware_rpctool = None - ret = ds.get_data() - self.assertFalse(ret) - - def test_get_host_info(self): - host_info = DataSourceVMware.get_host_info() - self.assertTrue(host_info) - self.assertTrue(host_info["hostname"]) - self.assertTrue(host_info["local-hostname"]) - self.assertTrue(host_info["local_hostname"]) - self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) - - -class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): - """ - Test the envvar transport. - """ - - def setUp(self): - super(TestDataSourceVMwareEnvVars, self).setUp() - self.tmp = self.tmp_dir() - os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" - self.create_system_files() - - def tearDown(self): - del os.environ[DataSourceVMware.VMX_GUESTINFO] - return super(TestDataSourceVMwareEnvVars, self).tearDown() - - def create_system_files(self): - rootd = self.tmp_dir() - populate_dir( - rootd, - { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - }, - ) - self.assertTrue(self.reRoot(rootd)) - - def assert_get_data_ok(self, m_fn, m_fn_call_count=6): - ds = get_ds(self.tmp) - ds.vmware_rpctool = None - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(m_fn_call_count, m_fn.call_count) - self.assertEqual( - ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR - ) - return ds - - def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): - ds = self.assert_get_data_ok(m_fn, m_fn_call_count) - assert_metadata(self, ds, metadata) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_subplatform(self, m_fn): - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) - self.assertEqual( - ds.subplatform, - "%s (%s)" - % ( - DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, - DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), - ), - ) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_only(self, m_fn): - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_userdata_only(self, m_fn): - m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_vendordata_only(self, m_fn): - m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_base64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_b64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_gzip_base64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gzip+base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_gz_b64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gz+b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_metadata_single_ssh_key(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_SINGLE_KEY - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_metadata_multiple_ssh_keys(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_MULTIPLE_KEYS - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - -class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): - """ - Test the guestinfo transport on a VMware platform. - """ - - def setUp(self): - super(TestDataSourceVMwareGuestInfo, self).setUp() - self.tmp = self.tmp_dir() - self.create_system_files() - - def create_system_files(self): - rootd = self.tmp_dir() - populate_dir( - rootd, - { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, - }, - ) - self.assertTrue(self.reRoot(rootd)) - - def assert_get_data_ok(self, m_fn, m_fn_call_count=6): - ds = get_ds(self.tmp) - ds.vmware_rpctool = "vmware-rpctool" - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(m_fn_call_count, m_fn.call_count) - self.assertEqual( - ds.data_access_method, - DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, - ) - return ds - - def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): - ds = self.assert_get_data_ok(m_fn, m_fn_call_count) - assert_metadata(self, ds, metadata) - - def test_ds_valid_on_vmware_platform(self): - system_type = dmi.read_dmi_data("system-product-name") - self.assertEqual(system_type, PRODUCT_NAME) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_subplatform(self, m_fn): - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) - self.assertEqual( - ds.subplatform, - "%s (%s)" - % ( - DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, - DataSourceVMware.get_guestinfo_key_name("metadata"), - ), - ) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_userdata_only(self, m_fn): - m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_vendordata_only(self, m_fn): - m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_metadata_single_ssh_key(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_SINGLE_KEY - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_metadata_multiple_ssh_keys(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_MULTIPLE_KEYS - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_base64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_b64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_gzip_base64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gzip+base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_gz_b64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gz+b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - -class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): - """ - Test the guestinfo transport on a non-VMware platform. - """ - - def setUp(self): - super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() - self.tmp = self.tmp_dir() - self.create_system_files() - - def create_system_files(self): - rootd = self.tmp_dir() - populate_dir( - rootd, - { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - }, - ) - self.assertTrue(self.reRoot(rootd)) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_ds_invalid_on_non_vmware_platform(self, m_fn): - system_type = dmi.read_dmi_data("system-product-name") - self.assertEqual(system_type, None) - - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - ds = get_ds(self.tmp) - ds.vmware_rpctool = "vmware-rpctool" - ret = ds.get_data() - self.assertFalse(ret) - - -def assert_metadata(test_obj, ds, metadata): - test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) - test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) - - expected_public_keys = metadata.get("public_keys") - if not isinstance(expected_public_keys, list): - expected_public_keys = [expected_public_keys] - - test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) - test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) - - -def get_ds(temp_dir): - ds = DataSourceVMware.DataSourceVMware( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) - ) - ds.vmware_rpctool = "vmware-rpctool" - return ds - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/test_datasource/test_vultr.py deleted file mode 100644 index 63235009..00000000 --- a/tests/unittests/test_datasource/test_vultr.py +++ /dev/null @@ -1,337 +0,0 @@ -# Author: Eric Benner -# -# This file is part of cloud-init. See LICENSE file for license information. - -# Vultr Metadata API: -# https://www.vultr.com/metadata/ - -import json - -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import DataSourceVultr -from cloudinit.sources.helpers import vultr - -from cloudinit.tests.helpers import mock, CiTestCase - -# Vultr metadata test data -VULTR_V1_1 = { - 'bgp': { - 'ipv4': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - }, - 'ipv6': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - } - }, - 'hostname': 'CLOUDINIT_1', - 'instanceid': '42506325', - 'interfaces': [ - { - 'ipv4': { - 'additional': [ - ], - 'address': '108.61.89.242', - 'gateway': '108.61.89.1', - 'netmask': '255.255.255.0' - }, - 'ipv6': { - 'additional': [ - ], - 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', - 'network': '2001:19f0:5:56c2::', - 'prefix': '64' - }, - 'mac': '56:00:03:15:c4:65', - 'network-type': 'public' - } - ], - 'public-keys': [ - 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' - ], - 'region': { - 'regioncode': 'EWR' - }, - 'user-defined': [ - ], - 'startup-script': 'echo No configured startup script', - 'raid1-script': '', - 'user-data': [ - ], - 'vendor-data': [ - { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' - ] - }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } - } - ] -} - -VULTR_V1_2 = { - 'bgp': { - 'ipv4': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - }, - 'ipv6': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - } - }, - 'hostname': 'CLOUDINIT_2', - 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', - 'instanceid': '42872224', - 'interfaces': [ - { - 'ipv4': { - 'additional': [ - ], - 'address':'45.76.7.171', - 'gateway':'45.76.6.1', - 'netmask':'255.255.254.0' - }, - 'ipv6':{ - 'additional': [ - ], - 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', - 'network':'2001:19f0:5:28a7::', - 'prefix':'64' - }, - 'mac':'56:00:03:1b:4e:ca', - 'network-type':'public' - }, - { - 'ipv4': { - 'additional': [ - ], - 'address':'10.1.112.3', - 'gateway':'', - 'netmask':'255.255.240.0' - }, - 'ipv6':{ - 'additional': [ - ], - 'network':'', - 'prefix':'' - }, - 'mac':'5a:00:03:1b:4e:ca', - 'network-type':'private', - 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', - 'networkid':'net5e7155329d730' - } - ], - 'public-keys': [ - 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' - ], - 'region': { - 'regioncode': 'EWR' - }, - 'user-defined': [ - ], - 'startup-script': 'echo No configured startup script', - 'user-data': [ - ], - - 'vendor-data': [ - { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' - ] - }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } - } - ] -} - -SSH_KEYS_1 = [ - "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" -] - -# Expected generated objects - -# Expected config -EXPECTED_VULTR_CONFIG = { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' - ] - }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } -} - -# Expected network config object from generator -EXPECTED_VULTR_NETWORK_1 = { - 'version': 1, - 'config': [ - { - 'type': 'nameserver', - 'address': ['108.61.10.10'] - }, - { - 'name': 'eth0', - 'type': 'physical', - 'mac_address': '56:00:03:15:c4:65', - 'accept-ra': 1, - 'subnets': [ - {'type': 'dhcp', 'control': 'auto'}, - {'type': 'ipv6_slaac', 'control': 'auto'} - ], - } - ] -} - -EXPECTED_VULTR_NETWORK_2 = { - 'version': 1, - 'config': [ - { - 'type': 'nameserver', - 'address': ['108.61.10.10'] - }, - { - 'name': 'eth0', - 'type': 'physical', - 'mac_address': '56:00:03:1b:4e:ca', - 'accept-ra': 1, - 'subnets': [ - {'type': 'dhcp', 'control': 'auto'}, - {'type': 'ipv6_slaac', 'control': 'auto'} - ], - }, - { - 'name': 'eth1', - 'type': 'physical', - 'mac_address': '5a:00:03:1b:4e:ca', - 'subnets': [ - { - "type": "static", - "control": "auto", - "address": "10.1.112.3", - "netmask": "255.255.240.0" - } - ], - } - ] -} - - -INTERFACE_MAP = { - '56:00:03:15:c4:65': 'eth0', - '56:00:03:1b:4e:ca': 'eth0', - '5a:00:03:1b:4e:ca': 'eth1' -} - - -class TestDataSourceVultr(CiTestCase): - def setUp(self): - super(TestDataSourceVultr, self).setUp() - - # Stored as a dict to make it easier to maintain - raw1 = json.dumps(VULTR_V1_1['vendor-data'][0]) - raw2 = json.dumps(VULTR_V1_2['vendor-data'][0]) - - # Make expected format - VULTR_V1_1['vendor-data'] = [raw1] - VULTR_V1_2['vendor-data'] = [raw2] - - self.tmp = self.tmp_dir() - - # Test the datasource itself - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') - @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') - def test_datasource(self, - mock_getmeta, - mock_isvultr, - mock_netmap): - mock_getmeta.return_value = VULTR_V1_2 - mock_isvultr.return_value = True - mock_netmap.return_value = INTERFACE_MAP - - source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - - # Test for failure - self.assertEqual(True, source._get_data()) - - # Test instance id - self.assertEqual("42872224", source.metadata['instanceid']) - - # Test hostname - self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) - - # Test ssh keys - self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) - - # Test vendor data generation - orig_val = self.maxDiff - self.maxDiff = None - - vendordata = source.vendordata_raw - - # Test vendor config - self.assertEqual( - EXPECTED_VULTR_CONFIG, - json.loads(vendordata[0].replace("#cloud-config", ""))) - - self.maxDiff = orig_val - - # Test network config generation - self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) - - # Test network config generation - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_network_config(self, mock_netmap): - mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_1['interfaces'] - - self.assertEqual(EXPECTED_VULTR_NETWORK_1, - vultr.generate_network_config(interf)) - - # Test Private Networking config generation - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_private_network_config(self, mock_netmap): - mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_2['interfaces'] - - self.assertEqual(EXPECTED_VULTR_NETWORK_2, - vultr.generate_network_config(interf)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_dhclient_hook.py b/tests/unittests/test_dhclient_hook.py new file mode 100644 index 00000000..14549111 --- /dev/null +++ b/tests/unittests/test_dhclient_hook.py @@ -0,0 +1,105 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.dhclient_hook.""" + +from cloudinit import dhclient_hook as dhc +from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir + +import argparse +import json +import os +from unittest import mock + + +class TestDhclientHook(CiTestCase): + + ex_env = { + 'interface': 'eth0', + 'new_dhcp_lease_time': '3600', + 'new_host_name': 'x1', + 'new_ip_address': '10.145.210.163', + 'new_subnet_mask': '255.255.255.0', + 'old_host_name': 'x1', + 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', + 'pid': '614', + 'reason': 'BOUND', + } + + # some older versions of dhclient put the same content, + # but in upper case with DHCP4_ instead of new_ + ex_env_dhcp4 = { + 'REASON': 'BOUND', + 'DHCP4_dhcp_lease_time': '3600', + 'DHCP4_host_name': 'x1', + 'DHCP4_ip_address': '10.145.210.163', + 'DHCP4_subnet_mask': '255.255.255.0', + 'INTERFACE': 'eth0', + 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', + 'pid': '614', + } + + expected = { + 'dhcp_lease_time': '3600', + 'host_name': 'x1', + 'ip_address': '10.145.210.163', + 'subnet_mask': '255.255.255.0'} + + def setUp(self): + super(TestDhclientHook, self).setUp() + self.tmp = self.tmp_dir() + + def test_handle_args(self): + """quick test of call to handle_args.""" + nic = 'eth0' + args = argparse.Namespace(event=dhc.UP, interface=nic) + with mock.patch.dict("os.environ", clear=True, values=self.ex_env): + dhc.handle_args(dhc.NAME, args, data_d=self.tmp) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_up_creates_dir(self): + """If dir does not exist, run_hook should create it.""" + subd = self.tmp_path("subdir", self.tmp) + nic = 'eth1' + dhc.run_hook(nic, 'up', data_d=subd, env=self.ex_env) + self.assertEqual( + set([nic + ".json"]), set(dir2dict(subd + os.path.sep))) + + def test_run_hook_up(self): + """Test expected use of run_hook_up.""" + nic = 'eth0' + dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_up_dhcp4_prefix(self): + """Test run_hook filters correctly with older DHCP4_ data.""" + nic = 'eth0' + dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env_dhcp4) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_down_deletes(self): + """down should delete the created json file.""" + nic = 'eth1' + populate_dir( + self.tmp, {nic + ".json": "{'abcd'}", 'myfile.txt': 'text'}) + dhc.run_hook(nic, 'down', data_d=self.tmp, env={'old_host_name': 'x1'}) + self.assertEqual( + set(['myfile.txt']), + set(dir2dict(self.tmp + os.path.sep))) + + def test_get_parser(self): + """Smoke test creation of get_parser.""" + # cloud-init main uses 'action'. + event, interface = (dhc.UP, 'mynic0') + self.assertEqual( + argparse.Namespace(event=event, interface=interface, + action=(dhc.NAME, dhc.handle_args)), + dhc.get_parser().parse_args([event, interface])) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/test_distros/__init__.py deleted file mode 100644 index 5394aa56..00000000 --- a/tests/unittests/test_distros/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import copy - -from cloudinit import distros -from cloudinit import helpers -from cloudinit import settings - - -def _get_distro(dtype, system_info=None): - """Return a Distro class of distro 'dtype'. - - cfg is format of CFG_BUILTIN['system_info']. - - example: _get_distro("debian") - """ - if system_info is None: - system_info = copy.deepcopy(settings.CFG_BUILTIN['system_info']) - system_info['distro'] = dtype - paths = helpers.Paths(system_info['paths']) - distro_cls = distros.fetch(dtype) - return distro_cls(dtype, system_info, paths) diff --git a/tests/unittests/test_distros/test_arch.py b/tests/unittests/test_distros/test_arch.py deleted file mode 100644 index a95ba3b5..00000000 --- a/tests/unittests/test_distros/test_arch.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.distros.arch import _render_network -from cloudinit import util - -from cloudinit.tests.helpers import (CiTestCase, dir2dict) - -from . import _get_distro - - -class TestArch(CiTestCase): - - def test_get_distro(self): - distro = _get_distro("arch") - hostname = "myhostname" - hostfile = self.tmp_path("hostfile") - distro._write_hostname(hostname, hostfile) - self.assertEqual(hostname + "\n", util.load_file(hostfile)) - - -class TestRenderNetwork(CiTestCase): - def test_basic_static(self): - """Just the most basic static config. - - note 'lo' should not be rendered as an interface.""" - entries = {'eth0': {'auto': True, - 'dns-nameservers': ['8.8.8.8'], - 'bootproto': 'static', - 'address': '10.0.0.2', - 'gateway': '10.0.0.1', - 'netmask': '255.255.255.0'}, - 'lo': {'auto': True}} - target = self.tmp_dir() - devs = _render_network(entries, target=target) - files = dir2dict(target, prefix=target) - self.assertEqual(['eth0'], devs) - self.assertEqual( - {'/etc/netctl/eth0': '\n'.join([ - "Address=10.0.0.2/255.255.255.0", - "Connection=ethernet", - "DNS=('8.8.8.8')", - "Gateway=10.0.0.1", - "IP=static", - "Interface=eth0", ""]), - '/etc/resolv.conf': 'nameserver 8.8.8.8\n'}, files) diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py deleted file mode 100644 index 3a68f2a9..00000000 --- a/tests/unittests/test_distros/test_bsd_utils.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import cloudinit.distros.bsd_utils as bsd_utils - -from cloudinit.tests.helpers import (CiTestCase, ExitStack, mock) - -RC_FILE = """ -if something; then - do something here -fi -hostname={hostname} -""" - - -class TestBsdUtils(CiTestCase): - - def setUp(self): - super().setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.load_file = patches.enter_context( - mock.patch.object(bsd_utils.util, 'load_file')) - - self.write_file = patches.enter_context( - mock.patch.object(bsd_utils.util, 'write_file')) - - def test_get_rc_config_value(self): - self.load_file.return_value = 'hostname=foo\n' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - self.load_file.assert_called_with('/etc/rc.conf') - - self.load_file.return_value = 'hostname=foo' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - - self.load_file.return_value = 'hostname="foo"' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - - self.load_file.return_value = "hostname='foo'" - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - - self.load_file.return_value = 'hostname=\'foo"' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"") - - self.load_file.return_value = '' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None) - - self.load_file.return_value = RC_FILE.format(hostname='foo') - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo") - - def test_set_rc_config_value_unchanged(self): - # bsd_utils.set_rc_config_value('hostname', 'foo') - # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') - - self.load_file.return_value = RC_FILE.format(hostname='foo') - self.write_file.assert_not_called() - - def test_set_rc_config_value(self): - bsd_utils.set_rc_config_value('hostname', 'foo') - self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') - - self.load_file.return_value = RC_FILE.format(hostname='foo') - bsd_utils.set_rc_config_value('hostname', 'bar') - self.write_file.assert_called_with( - '/etc/rc.conf', - RC_FILE.format(hostname='bar') - ) diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py deleted file mode 100644 index 685f08ba..00000000 --- a/tests/unittests/test_distros/test_create_users.py +++ /dev/null @@ -1,236 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import re - -from cloudinit import distros -from cloudinit import ssh_util -from cloudinit.tests.helpers import (CiTestCase, mock) -from tests.unittests.util import abstract_to_concrete - - -@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False) -@mock.patch("cloudinit.distros.subp.subp") -class TestCreateUser(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestCreateUser, self).setUp() - self.dist = abstract_to_concrete(distros.Distro)( - name='test', cfg=None, paths=None - ) - - def _useradd2call(self, args): - # return a mock call for the useradd command in args - # with expected 'logstring'. - args = ['useradd'] + args - logcmd = [a for a in args] - for i in range(len(args)): - if args[i] in ('--password',): - logcmd[i + 1] = 'REDACTED' - return mock.call(args, logstring=logcmd) - - def test_basic(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - - def test_no_home(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, no_create_home=True) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-M']), - mock.call(['passwd', '-l', user])]) - - def test_system_user(self, m_subp, m_is_snappy): - # system user should have no home and get --system - user = 'foouser' - self.dist.create_user(user, system=True) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '--system', '-M']), - mock.call(['passwd', '-l', user])]) - - def test_explicit_no_home_false(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, no_create_home=False) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - - def test_unlocked(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, lock_passwd=False) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m'])]) - - def test_set_password(self, m_subp, m_is_snappy): - user = 'foouser' - password = 'passfoo' - self.dist.create_user(user, passwd=password) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '--password', password, '-m']), - mock.call(['passwd', '-l', user])]) - - @mock.patch("cloudinit.distros.util.is_group") - def test_group_added(self, m_is_group, m_subp, m_is_snappy): - m_is_group.return_value = False - user = 'foouser' - self.dist.create_user(user, groups=['group1']) - expected = [ - mock.call(['groupadd', 'group1']), - self._useradd2call([user, '--groups', 'group1', '-m']), - mock.call(['passwd', '-l', user])] - self.assertEqual(m_subp.call_args_list, expected) - - @mock.patch("cloudinit.distros.util.is_group") - def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy): - ex_groups = ['existing_group'] - groups = ['group1', ex_groups[0]] - m_is_group.side_effect = lambda m: m in ex_groups - user = 'foouser' - self.dist.create_user(user, groups=groups) - expected = [ - mock.call(['groupadd', 'group1']), - self._useradd2call([user, '--groups', ','.join(groups), '-m']), - mock.call(['passwd', '-l', user])] - self.assertEqual(m_subp.call_args_list, expected) - - @mock.patch("cloudinit.distros.util.is_group") - def test_create_groups_with_whitespace_string( - self, m_is_group, m_subp, m_is_snappy): - # groups supported as a comma delimeted string even with white space - m_is_group.return_value = False - user = 'foouser' - self.dist.create_user(user, groups='group1, group2') - expected = [ - mock.call(['groupadd', 'group1']), - mock.call(['groupadd', 'group2']), - self._useradd2call([user, '--groups', 'group1,group2', '-m']), - mock.call(['passwd', '-l', user])] - self.assertEqual(m_subp.call_args_list, expected) - - def test_explicit_sudo_false(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, sudo=False) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_setup_ssh_authorized_keys_with_string( - self, m_setup_user_keys, m_subp, m_is_snappy): - """ssh_authorized_keys allows string and calls setup_user_keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys='mykey') - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - m_setup_user_keys.assert_called_once_with(set(['mykey']), user) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_setup_ssh_authorized_keys_with_list( - self, m_setup_user_keys, m_subp, m_is_snappy): - """ssh_authorized_keys allows lists and calls setup_user_keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2']) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_setup_ssh_authorized_keys_with_integer( - self, m_setup_user_keys, m_subp, m_is_snappy): - """ssh_authorized_keys warns on non-iterable/string type.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys=-1) - m_setup_user_keys.assert_called_once_with(set([]), user) - match = re.match( - r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for' - ' \'ssh_authorized_keys\'.*', - self.logs.getvalue(), - re.DOTALL) - self.assertIsNotNone( - match, 'Missing ssh_authorized_keys invalid type warning') - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_create_user_with_ssh_redirect_user_no_cloud_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): - """Log a warning when trying to redirect a user no cloud ssh keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_redirect_user='someuser') - self.assertIn( - 'WARNING: Unable to disable SSH logins for foouser given ' - 'ssh_redirect_user: someuser. No cloud public-keys present.\n', - self.logs.getvalue()) - m_setup_user_keys.assert_not_called() - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_create_user_with_ssh_redirect_user_with_cloud_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): - """Disable ssh when ssh_redirect_user and cloud ssh keys are set.""" - user = 'foouser' - self.dist.create_user( - user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1']) - disable_prefix = ssh_util.DISABLE_USER_OPTS - disable_prefix = disable_prefix.replace('$USER', 'someuser') - disable_prefix = disable_prefix.replace('$DISABLE_USER', user) - m_setup_user_keys.assert_called_once_with( - set(['key1']), 'foouser', options=disable_prefix) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): - """Do not disable ssh_authorized_keys when ssh_redirect_user is set.""" - user = 'foouser' - self.dist.create_user( - user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser', - cloud_public_ssh_keys=['key1']) - disable_prefix = ssh_util.DISABLE_USER_OPTS - disable_prefix = disable_prefix.replace('$USER', 'someuser') - disable_prefix = disable_prefix.replace('$DISABLE_USER', user) - self.assertEqual( - m_setup_user_keys.call_args_list, - [mock.call(set(['auth1']), user), # not disabled - mock.call(set(['key1']), 'foouser', options=disable_prefix)]) - - @mock.patch("cloudinit.distros.subp.which") - def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, - m_is_snappy): - """Lock uses usermod --lock if no 'passwd' cmd available.""" - m_which.side_effect = lambda m: m in ('usermod',) - self.dist.lock_passwd("bob") - self.assertEqual( - [mock.call(['usermod', '--lock', 'bob'])], - m_subp.call_args_list) - - @mock.patch("cloudinit.distros.subp.which") - def test_lock_with_passwd_if_available(self, m_which, m_subp, - m_is_snappy): - """Lock with only passwd will use passwd.""" - m_which.side_effect = lambda m: m in ('passwd',) - self.dist.lock_passwd("bob") - self.assertEqual( - [mock.call(['passwd', '-l', 'bob'])], - m_subp.call_args_list) - - @mock.patch("cloudinit.distros.subp.which") - def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, - m_is_snappy): - """Lock with no commands available raises RuntimeError.""" - m_which.return_value = None - with self.assertRaises(RuntimeError): - self.dist.lock_passwd("bob") - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/test_distros/test_debian.py deleted file mode 100644 index a88c2686..00000000 --- a/tests/unittests/test_distros/test_debian.py +++ /dev/null @@ -1,174 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from itertools import count, cycle -from unittest import mock - -import pytest - -from cloudinit import distros, util -from cloudinit.distros.debian import ( - APT_GET_COMMAND, - APT_GET_WRAPPER, -) -from cloudinit.tests.helpers import FilesystemMockingTestCase -from cloudinit import subp - - -@mock.patch("cloudinit.distros.debian.subp.subp") -class TestDebianApplyLocale(FilesystemMockingTestCase): - - def setUp(self): - super(TestDebianApplyLocale, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - self.patchUtils(self.new_root) - self.spath = self.tmp_path('etc/default/locale', self.new_root) - cls = distros.fetch("debian") - self.distro = cls("debian", {}, None) - - def test_no_rerun(self, m_subp): - """If system has defined locale, no re-run is expected.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=%s\n' % locale, omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - m_subp.assert_not_called() - - def test_no_regen_on_c_utf8(self, m_subp): - """If locale is set to C.UTF8, do not attempt to call locale-gen""" - m_subp.return_value = (None, None) - locale = 'C.UTF-8' - util.write_file(self.spath, 'LANG=%s\n' % 'en_US.UTF-8', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_if_different(self, m_subp): - """If system has different locale, locale-gen should be called.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=fr_FR.UTF-8', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_if_no_file(self, m_subp): - """If system has no locale file, locale-gen should be called.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_on_unset_system_locale(self, m_subp): - """If system has unset locale, locale-gen should be called.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_on_mismatched_keys(self, m_subp): - """If key is LC_ALL and system has only LANG, rerun is expected.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath, keyname='LC_ALL') - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LC_ALL=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_falseish_locale_raises_valueerror(self, m_subp): - """locale as None or "" is invalid and should raise ValueError.""" - - with self.assertRaises(ValueError) as ctext_m: - self.distro.apply_locale(None) - m_subp.assert_not_called() - - self.assertEqual( - 'Failed to provide locale value.', str(ctext_m.exception)) - - with self.assertRaises(ValueError) as ctext_m: - self.distro.apply_locale("") - m_subp.assert_not_called() - self.assertEqual( - 'Failed to provide locale value.', str(ctext_m.exception)) - - -@mock.patch.dict('os.environ', {}, clear=True) -@mock.patch("cloudinit.distros.debian.subp.which", return_value=True) -@mock.patch("cloudinit.distros.debian.subp.subp") -class TestPackageCommand: - distro = distros.fetch("debian")("debian", {}, None) - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - return_value=True) - def test_simple_command(self, m_apt_avail, m_subp, m_which): - self.distro.package_command('update') - apt_args = [APT_GET_WRAPPER['command']] - apt_args.extend(APT_GET_COMMAND) - apt_args.append('update') - expected_call = { - 'args': apt_args, - 'capture': False, - 'env': {'DEBIAN_FRONTEND': 'noninteractive'}, - } - assert m_subp.call_args == mock.call(**expected_call) - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=[False, False, True]) - @mock.patch("cloudinit.distros.debian.time.sleep") - def test_wait_for_lock(self, m_sleep, m_apt_avail, m_subp, m_which): - self.distro._wait_for_apt_command("stub", {"args": "stub2"}) - assert m_sleep.call_args_list == [mock.call(1), mock.call(1)] - assert m_subp.call_args_list == [mock.call(args='stub2')] - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - return_value=False) - @mock.patch("cloudinit.distros.debian.time.sleep") - @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) - def test_lock_wait_timeout( - self, m_time, m_sleep, m_apt_avail, m_subp, m_which - ): - with pytest.raises(TimeoutError): - self.distro._wait_for_apt_command("stub", "stub2", timeout=5) - assert m_subp.call_args_list == [] - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=cycle([True, False])) - @mock.patch("cloudinit.distros.debian.time.sleep") - def test_lock_exception_wait(self, m_sleep, m_apt_avail, m_subp, m_which): - exception = subp.ProcessExecutionError( - exit_code=100, stderr="Could not get apt lock" - ) - m_subp.side_effect = [exception, exception, "return_thing"] - ret = self.distro._wait_for_apt_command("stub", {"args": "stub2"}) - assert ret == "return_thing" - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=cycle([True, False])) - @mock.patch("cloudinit.distros.debian.time.sleep") - @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) - def test_lock_exception_timeout( - self, m_time, m_sleep, m_apt_avail, m_subp, m_which - ): - m_subp.side_effect = subp.ProcessExecutionError( - exit_code=100, stderr="Could not get apt lock" - ) - with pytest.raises(TimeoutError): - self.distro._wait_for_apt_command( - "stub", {"args": "stub2"}, timeout=5 - ) diff --git a/tests/unittests/test_distros/test_dragonflybsd.py b/tests/unittests/test_distros/test_dragonflybsd.py deleted file mode 100644 index df2c00f4..00000000 --- a/tests/unittests/test_distros/test_dragonflybsd.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python3 - - -import cloudinit.util -from cloudinit.tests.helpers import mock - - -def test_find_dragonflybsd_part(): - assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3" - - -@mock.patch("cloudinit.util.is_DragonFlyBSD") -@mock.patch("cloudinit.subp.subp") -def test_parse_mount(mock_subp, m_is_DragonFlyBSD): - mount_out = """ -vbd0s3 on / (hammer2, local) -devfs on /dev (devfs, nosymfollow, local) -/dev/vbd0s0a on /boot (ufs, local) -procfs on /proc (procfs, local) -tmpfs on /var/run/shm (tmpfs, local) -""" - - mock_subp.return_value = (mount_out, "") - m_is_DragonFlyBSD.return_value = True - assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/") diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py deleted file mode 100644 index be565b04..00000000 --- a/tests/unittests/test_distros/test_freebsd.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) -from cloudinit.tests.helpers import (CiTestCase, mock) - -import os - - -class TestDeviceLookUp(CiTestCase): - - @mock.patch('cloudinit.subp.subp') - def test_find_freebsd_part_label(self, mock_subp): - glabel_out = ''' -gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 - label/rootfs N/A da0p2 - label/swap N/A da0p3 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/label/rootfs") - self.assertEqual("da0p2", res) - - @mock.patch('cloudinit.subp.subp') - def test_find_freebsd_part_gpt(self, mock_subp): - glabel_out = ''' - gpt/bootfs N/A vtbd0p1 -gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 - gpt/swapfs N/A vtbd0p2 - gpt/rootfs N/A vtbd0p3 - iso9660/cidata N/A vtbd2 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/gpt/rootfs") - self.assertEqual("vtbd0p3", res) - - def test_get_path_dev_freebsd_label(self): - mnt_list = ''' -/dev/label/rootfs / ufs rw 1 1 -devfs /dev devfs rw,multilabel 0 0 -fdescfs /dev/fd fdescfs rw 0 0 -/dev/da1s1 /mnt/resource ufs rw 2 2 -''' - with mock.patch.object(os.path, 'exists', - return_value=True): - res = get_path_dev_freebsd('/etc', mnt_list) - self.assertIsNotNone(res) diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py deleted file mode 100644 index 336150bc..00000000 --- a/tests/unittests/test_distros/test_generic.py +++ /dev/null @@ -1,315 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import distros -from cloudinit import util - -from cloudinit.tests import helpers - -import os -import pytest -import shutil -import tempfile -from unittest import mock - -unknown_arch_info = { - 'arches': ['default'], - 'failsafe': {'primary': 'http://fs-primary-default', - 'security': 'http://fs-security-default'} -} - -package_mirrors = [ - {'arches': ['i386', 'amd64'], - 'failsafe': {'primary': 'http://fs-primary-intel', - 'security': 'http://fs-security-intel'}, - 'search': { - 'primary': ['http://%(ec2_region)s.ec2/', - 'http://%(availability_zone)s.clouds/'], - 'security': ['http://security-mirror1-intel', - 'http://security-mirror2-intel']}}, - {'arches': ['armhf', 'armel'], - 'failsafe': {'primary': 'http://fs-primary-arm', - 'security': 'http://fs-security-arm'}}, - unknown_arch_info -] - -gpmi = distros._get_package_mirror_info -gapmi = distros._get_arch_package_mirror_info - - -class TestGenericDistro(helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestGenericDistro, self).setUp() - # Make a temp directoy for tests to use. - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def _write_load_sudoers(self, _user, rules): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - os.makedirs(os.path.join(self.tmp, "etc")) - os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d')) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - d.write_sudo_rules("harlowja", rules) - contents = util.load_file(d.ci_sudoers_fn) - return contents - - def _count_in(self, lines_look_for, text_content): - found_amount = 0 - for e in lines_look_for: - for line in text_content.splitlines(): - line = line.strip() - if line == e: - found_amount += 1 - return found_amount - - def test_sudoers_ensure_rules(self): - rules = 'ALL=(ALL:ALL) ALL' - contents = self._write_load_sudoers('harlowja', rules) - expected = ['harlowja ALL=(ALL:ALL) ALL'] - self.assertEqual(len(expected), self._count_in(expected, contents)) - not_expected = [ - 'harlowja A', - 'harlowja L', - 'harlowja L', - ] - self.assertEqual(0, self._count_in(not_expected, contents)) - - def test_sudoers_ensure_rules_list(self): - rules = [ - 'ALL=(ALL:ALL) ALL', - 'B-ALL=(ALL:ALL) ALL', - 'C-ALL=(ALL:ALL) ALL', - ] - contents = self._write_load_sudoers('harlowja', rules) - expected = [ - 'harlowja ALL=(ALL:ALL) ALL', - 'harlowja B-ALL=(ALL:ALL) ALL', - 'harlowja C-ALL=(ALL:ALL) ALL', - ] - self.assertEqual(len(expected), self._count_in(expected, contents)) - not_expected = [ - 'harlowja A', - 'harlowja L', - 'harlowja L', - ] - self.assertEqual(0, self._count_in(not_expected, contents)) - - def test_sudoers_ensure_new(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") - self.assertIn("includedir /b", contents) - self.assertTrue(os.path.isdir("/b")) - - def test_sudoers_ensure_append(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - util.write_file("/etc/sudoers", "josh, josh\n") - d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") - self.assertIn("includedir /b", contents) - self.assertTrue(os.path.isdir("/b")) - self.assertIn("josh", contents) - self.assertEqual(2, contents.count("josh")) - - def test_sudoers_ensure_only_one_includedir(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - for char in ['#', '@']: - util.write_file("/etc/sudoers", "{}includedir /b".format(char)) - d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") - self.assertIn("includedir /b", contents) - self.assertTrue(os.path.isdir("/b")) - self.assertEqual(1, contents.count("includedir /b")) - - def test_arch_package_mirror_info_unknown(self): - """for an unknown arch, we should get back that with arch 'default'.""" - arch_mirrors = gapmi(package_mirrors, arch="unknown") - self.assertEqual(unknown_arch_info, arch_mirrors) - - def test_arch_package_mirror_info_known(self): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - self.assertEqual(package_mirrors[0], arch_mirrors) - - def test_systemd_in_use(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - os.makedirs('/run/systemd/system') - self.assertTrue(d.uses_systemd()) - - def test_systemd_not_in_use(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - self.assertFalse(d.uses_systemd()) - - def test_systemd_symlink(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - os.makedirs('/run/systemd') - os.symlink('/', '/run/systemd/system') - self.assertFalse(d.uses_systemd()) - - @mock.patch('cloudinit.distros.debian.read_system_locale') - def test_get_locale_ubuntu(self, m_locale): - """Test ubuntu distro returns locale set to C.UTF-8""" - m_locale.return_value = 'C.UTF-8' - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - locale = d.get_locale() - self.assertEqual('C.UTF-8', locale) - - def test_get_locale_rhel(self): - """Test rhel distro returns NotImplementedError exception""" - cls = distros.fetch("rhel") - d = cls("rhel", {}, None) - with self.assertRaises(NotImplementedError): - d.get_locale() - - def test_expire_passwd_uses_chpasswd(self): - """Test ubuntu.expire_passwd uses the passwd command.""" - for d_name in ("ubuntu", "rhel"): - cls = distros.fetch(d_name) - d = cls(d_name, {}, None) - with mock.patch("cloudinit.subp.subp") as m_subp: - d.expire_passwd("myuser") - m_subp.assert_called_once_with(["passwd", "--expire", "myuser"]) - - def test_expire_passwd_freebsd_uses_pw_command(self): - """Test FreeBSD.expire_passwd uses the pw command.""" - cls = distros.fetch("freebsd") - d = cls("freebsd", {}, None) - with mock.patch("cloudinit.subp.subp") as m_subp: - d.expire_passwd("myuser") - m_subp.assert_called_once_with( - ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]) - - -class TestGetPackageMirrors: - - def return_first(self, mlist): - if not mlist: - return None - return mlist[0] - - def return_second(self, mlist): - if not mlist: - return None - - return mlist[1] if len(mlist) > 1 else None - - def return_none(self, _mlist): - return None - - def return_last(self, mlist): - if not mlist: - return None - return(mlist[-1]) - - @pytest.mark.parametrize( - "allow_ec2_mirror, platform_type, mirrors", - [ - (True, "ec2", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (True, "other", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (False, "ec2", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (False, "other", [ - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror2-intel'} - ]) - ]) - def test_get_package_mirror_info_az_ec2(self, - allow_ec2_mirror, - platform_type, - mirrors): - flag_path = "cloudinit.distros." \ - "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" - with mock.patch(flag_path, allow_ec2_mirror): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - data_source_mock = mock.Mock( - availability_zone="us-east-1a", - platform_type=platform_type) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == mirrors[0]) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_second) - assert(results == mirrors[1]) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_none) - assert(results == package_mirrors[0]['failsafe']) - - def test_get_package_mirror_info_az_non_ec2(self): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - data_source_mock = mock.Mock(availability_zone="nova.cloudvendor") - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == { - 'primary': 'http://nova.cloudvendor.clouds/', - 'security': 'http://security-mirror1-intel'} - ) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_last) - assert(results == { - 'primary': 'http://nova.cloudvendor.clouds/', - 'security': 'http://security-mirror2-intel'} - ) - - def test_get_package_mirror_info_none(self): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - data_source_mock = mock.Mock(availability_zone=None) - - # because both search entries here replacement based on - # availability-zone, the filter will be called with an empty list and - # failsafe should be taken. - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == { - 'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror1-intel'} - ) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_last) - assert(results == { - 'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror2-intel'} - ) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_gentoo.py b/tests/unittests/test_distros/test_gentoo.py deleted file mode 100644 index 37a4f51f..00000000 --- a/tests/unittests/test_distros/test_gentoo.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import util -from cloudinit import atomic_helper -from cloudinit.tests.helpers import CiTestCase -from . import _get_distro - - -class TestGentoo(CiTestCase): - - def test_write_hostname(self): - distro = _get_distro("gentoo") - hostname = "myhostname" - hostfile = self.tmp_path("hostfile") - distro._write_hostname(hostname, hostfile) - self.assertEqual('hostname="myhostname"\n', util.load_file(hostfile)) - - def test_write_existing_hostname_with_comments(self): - distro = _get_distro("gentoo") - hostname = "myhostname" - contents = '#This is the hostname\nhostname="localhost"' - hostfile = self.tmp_path("hostfile") - atomic_helper.write_file(hostfile, contents, omode="w") - distro._write_hostname(hostname, hostfile) - self.assertEqual('#This is the hostname\nhostname="myhostname"\n', - util.load_file(hostfile)) diff --git a/tests/unittests/test_distros/test_hostname.py b/tests/unittests/test_distros/test_hostname.py deleted file mode 100644 index f6d4dbe5..00000000 --- a/tests/unittests/test_distros/test_hostname.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import unittest - -from cloudinit.distros.parsers import hostname - - -BASE_HOSTNAME = ''' -# My super-duper-hostname - -blahblah - -''' -BASE_HOSTNAME = BASE_HOSTNAME.strip() - - -class TestHostnameHelper(unittest.TestCase): - def test_parse_same(self): - hn = hostname.HostnameConf(BASE_HOSTNAME) - self.assertEqual(str(hn).strip(), BASE_HOSTNAME) - self.assertEqual(hn.hostname, 'blahblah') - - def test_no_adjust_hostname(self): - hn = hostname.HostnameConf(BASE_HOSTNAME) - prev_name = hn.hostname - hn.set_hostname("") - self.assertEqual(hn.hostname, prev_name) - - def test_adjust_hostname(self): - hn = hostname.HostnameConf(BASE_HOSTNAME) - prev_name = hn.hostname - self.assertEqual(prev_name, 'blahblah') - hn.set_hostname("bbbbd") - self.assertEqual(hn.hostname, 'bbbbd') - expected_out = ''' -# My super-duper-hostname - -bbbbd -''' - self.assertEqual(str(hn).strip(), expected_out.strip()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/test_distros/test_hosts.py deleted file mode 100644 index 8aaa6e48..00000000 --- a/tests/unittests/test_distros/test_hosts.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import unittest - -from cloudinit.distros.parsers import hosts - - -BASE_ETC = ''' -# Example -127.0.0.1 localhost -192.168.1.10 foo.mydomain.org foo -192.168.1.10 bar.mydomain.org bar -146.82.138.7 master.debian.org master -209.237.226.90 www.opensource.org -''' -BASE_ETC = BASE_ETC.strip() - - -class TestHostsHelper(unittest.TestCase): - def test_parse(self): - eh = hosts.HostsConf(BASE_ETC) - self.assertEqual(eh.get_entry('127.0.0.1'), [['localhost']]) - self.assertEqual(eh.get_entry('192.168.1.10'), - [['foo.mydomain.org', 'foo'], - ['bar.mydomain.org', 'bar']]) - eh = str(eh) - self.assertTrue(eh.startswith('# Example')) - - def test_add(self): - eh = hosts.HostsConf(BASE_ETC) - eh.add_entry('127.0.0.0', 'blah') - self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) - eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3') - self.assertEqual(eh.get_entry('127.0.0.3'), - [['blah', 'blah2', 'blah3']]) - - def test_del(self): - eh = hosts.HostsConf(BASE_ETC) - eh.add_entry('127.0.0.0', 'blah') - self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) - - eh.del_entries('127.0.0.0') - self.assertEqual(eh.get_entry('127.0.0.0'), []) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_manage_service.py b/tests/unittests/test_distros/test_manage_service.py deleted file mode 100644 index 47e7cfb0..00000000 --- a/tests/unittests/test_distros/test_manage_service.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import (CiTestCase, mock) -from tests.unittests.util import TestingDistro - - -class TestManageService(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestManageService, self).setUp() - self.dist = TestingDistro() - - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) - @mock.patch("cloudinit.distros.subp.subp") - def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd): - self.dist.init_cmd = ['systemctl'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['systemctl', 'start', 'myssh'], - capture=True) - - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) - @mock.patch("cloudinit.distros.subp.subp") - def test_manage_service_service_initcmd(self, m_subp, m_sysd): - self.dist.init_cmd = ['service'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['service', 'myssh', 'start'], capture=True) - - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=True) - @mock.patch("cloudinit.distros.subp.subp") - def test_manage_service_systemctl(self, m_subp, m_sysd): - self.dist.init_cmd = ['ignore'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['systemctl', 'start', 'myssh'], - capture=True) - -# vi: ts=4 sw=4 expandtab diff --git a/tests/unittests/test_distros/test_netbsd.py b/tests/unittests/test_distros/test_netbsd.py deleted file mode 100644 index 11a68d2a..00000000 --- a/tests/unittests/test_distros/test_netbsd.py +++ /dev/null @@ -1,17 +0,0 @@ -import cloudinit.distros.netbsd - -import pytest -import unittest.mock as mock - - -@pytest.mark.parametrize('with_pkgin', (True, False)) -@mock.patch("cloudinit.distros.netbsd.os") -def test_init(m_os, with_pkgin): - print(with_pkgin) - m_os.path.exists.return_value = with_pkgin - cfg = {} - - distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None) - expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None - assert distro.pkg_cmd_upgrade_prefix == expectation - assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py deleted file mode 100644 index e4eba179..00000000 --- a/tests/unittests/test_distros/test_netconfig.py +++ /dev/null @@ -1,916 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import os -import re -from io import StringIO -from textwrap import dedent -from unittest import mock - -from cloudinit import distros -from cloudinit.distros.parsers.sys_conf import SysConf -from cloudinit import helpers -from cloudinit import settings -from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, dir2dict) -from cloudinit import subp -from cloudinit import util -from cloudinit import safeyaml - -BASE_NET_CFG = ''' -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5 - broadcast 192.168.1.0 - gateway 192.168.1.254 - netmask 255.255.255.0 - network 192.168.0.0 - -auto eth1 -iface eth1 inet dhcp -''' - -BASE_NET_CFG_FROM_V2 = ''' -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5/24 - gateway 192.168.1.254 - -auto eth1 -iface eth1 inet dhcp -''' - -BASE_NET_CFG_IPV6 = ''' -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5 - netmask 255.255.255.0 - network 192.168.0.0 - broadcast 192.168.1.0 - gateway 192.168.1.254 - -iface eth0 inet6 static - address 2607:f0d0:1002:0011::2 - netmask 64 - gateway 2607:f0d0:1002:0011::1 - -iface eth1 inet static - address 192.168.1.6 - netmask 255.255.255.0 - network 192.168.0.0 - broadcast 192.168.1.0 - gateway 192.168.1.254 - -iface eth1 inet6 static - address 2607:f0d0:1002:0011::3 - netmask 64 - gateway 2607:f0d0:1002:0011::1 -''' - -V1_NET_CFG = {'config': [{'name': 'eth0', - - 'subnets': [{'address': '192.168.1.5', - 'broadcast': '192.168.1.0', - 'gateway': '192.168.1.254', - 'netmask': '255.255.255.0', - 'type': 'static'}], - 'type': 'physical'}, - {'name': 'eth1', - 'subnets': [{'control': 'auto', 'type': 'dhcp4'}], - 'type': 'physical'}], - 'version': 1} - -V1_NET_CFG_WITH_DUPS = """\ -# same value in interface specific dns and global dns -# should produce single entry in network file -version: 1 -config: - - type: physical - name: eth0 - subnets: - - type: static - address: 192.168.0.102/24 - dns_nameservers: [1.2.3.4] - dns_search: [test.com] - interface: eth0 - - type: nameserver - address: [1.2.3.4] - search: [test.com] -""" - -V1_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5/24 - broadcast 192.168.1.0 - gateway 192.168.1.254 - -auto eth1 -iface eth1 inet dhcp -""" - -V1_NET_CFG_IPV6_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet6 static - address 2607:f0d0:1002:0011::2/64 - gateway 2607:f0d0:1002:0011::1 - -auto eth1 -iface eth1 inet dhcp -""" - -V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', - 'subnets': [{'address': - '2607:f0d0:1002:0011::2', - 'gateway': - '2607:f0d0:1002:0011::1', - 'netmask': '64', - 'type': 'static6'}], - 'type': 'physical'}, - {'name': 'eth1', - 'subnets': [{'control': 'auto', - 'type': 'dhcp4'}], - 'type': 'physical'}], - 'version': 1} - - -V1_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -network: - version: 2 - ethernets: - eth0: - addresses: - - 192.168.1.5/24 - gateway4: 192.168.1.254 - eth1: - dhcp4: true -""" - -V1_TO_V2_NET_CFG_IPV6_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -network: - version: 2 - ethernets: - eth0: - addresses: - - 2607:f0d0:1002:0011::2/64 - gateway6: 2607:f0d0:1002:0011::1 - eth1: - dhcp4: true -""" - -V2_NET_CFG = { - 'ethernets': { - 'eth7': { - 'addresses': ['192.168.1.5/24'], - 'gateway4': '192.168.1.254'}, - 'eth9': { - 'dhcp4': True} - }, - 'version': 2 -} - - -V2_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -network: - ethernets: - eth7: - addresses: - - 192.168.1.5/24 - gateway4: 192.168.1.254 - eth9: - dhcp4: true - version: 2 -""" - - -class WriteBuffer(object): - def __init__(self): - self.buffer = StringIO() - self.mode = None - self.omode = None - - def write(self, text): - self.buffer.write(text) - - def __str__(self): - return self.buffer.getvalue() - - -class TestNetCfgDistroBase(FilesystemMockingTestCase): - - def setUp(self): - super(TestNetCfgDistroBase, self).setUp() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') - - def _get_distro(self, dname, renderers=None): - cls = distros.fetch(dname) - cfg = settings.CFG_BUILTIN - cfg['system_info']['distro'] = dname - if renderers: - cfg['system_info']['network'] = {'renderers': renderers} - paths = helpers.Paths({}) - return cls(dname, cfg.get('system_info'), paths) - - def assertCfgEquals(self, blob1, blob2): - b1 = dict(SysConf(blob1.strip().splitlines())) - b2 = dict(SysConf(blob2.strip().splitlines())) - self.assertEqual(b1, b2) - for (k, v) in b1.items(): - self.assertIn(k, b2) - for (k, v) in b2.items(): - self.assertIn(k, b1) - for (k, v) in b1.items(): - self.assertEqual(v, b2[k]) - - -class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroFreeBSD, self).setUp() - self.distro = self._get_distro('freebsd', renderers=['freebsd']) - - def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.freebsd.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - util.ensure_dir('/etc') - util.ensure_file('/etc/rc.conf') - util.ensure_file('/etc/resolv.conf') - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual( - set(expected.split('\n')), - set(results[cfgpath].split('\n'))) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_apply_network_config_freebsd_standard(self, ifaces_mac): - ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'eth0', - } - rc_conf_expected = """\ -defaultrouter=192.168.1.254 -ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' -ifconfig_eth1=DHCP -""" - - expected_cfgs = { - '/etc/rc.conf': rc_conf_expected, - '/etc/resolv.conf': '' - } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_apply_network_config_freebsd_ifrename(self, ifaces_mac): - ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'vtnet0', - } - rc_conf_expected = """\ -ifconfig_vtnet0_name=eth0 -defaultrouter=192.168.1.254 -ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' -ifconfig_eth1=DHCP -""" - - V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG) - V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00' - - expected_cfgs = { - '/etc/rc.conf': rc_conf_expected, - '/etc/resolv.conf': '' - } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG_RENAME, - expected_cfgs=expected_cfgs.copy()) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_apply_network_config_freebsd_nameserver(self, ifaces_mac): - ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'eth0', - } - - V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG) - ns = ['1.2.3.4'] - V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns - expected_cfgs = { - '/etc/resolv.conf': 'nameserver 1.2.3.4\n' - } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG_DNS, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroUbuntuEni, self).setUp() - self.distro = self._get_distro('ubuntu', renderers=['eni']) - - def eni_path(self): - return '/etc/network/interfaces.d/50-cloud-init.cfg' - - def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.eni.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def test_apply_network_config_eni_ub(self): - expected_cfgs = { - self.eni_path(): V1_NET_CFG_OUTPUT, - } - # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify_eni(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_ipv6_ub(self): - expected_cfgs = { - self.eni_path(): V1_NET_CFG_IPV6_OUTPUT - } - self._apply_and_verify_eni(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): - def setUp(self): - super(TestNetCfgDistroUbuntuNetplan, self).setUp() - self.distro = self._get_distro('ubuntu', renderers=['netplan']) - self.devlist = ['eth0', 'lo'] - - def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.netplan.available', - return_value=True): - with mock.patch("cloudinit.net.netplan.get_devicelist", - return_value=self.devlist): - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' - - def test_apply_network_config_v1_to_netplan_ub(self): - expected_cfgs = { - self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT, - } - - # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_v1_ipv6_to_netplan_ub(self): - expected_cfgs = { - self.netplan_path(): V1_TO_V2_NET_CFG_IPV6_OUTPUT, - } - - # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_v2_passthrough_ub(self): - expected_cfgs = { - self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, - } - # ub_distro.apply_network_config(V2_NET_CFG, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V2_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroRedhat(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroRedhat, self).setUp() - self.distro = self._get_distro('rhel', renderers=['sysconfig']) - - def ifcfg_path(self, ifname): - return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname - - def control_path(self): - return '/etc/sysconfig/network' - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.sysconfig.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - self.assertCfgEquals(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def test_apply_network_config_rh(self): - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0 - GATEWAY=192.168.1.254 - IPADDR=192.168.1.5 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp - DEVICE=eth1 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - # rh_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_ipv6_rh(self): - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0 - IPV6ADDR=2607:f0d0:1002:0011::2/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 - IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp - DEVICE=eth1 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.control_path(): dedent("""\ - NETWORKING=yes - NETWORKING_IPV6=yes - IPV6_AUTOCONF=no - """), - } - # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - def test_vlan_render_unsupported(self): - """Render officially unsupported vlan names.""" - cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"], - 'match': {'macaddress': "00:16:3e:60:7c:df"}}}, - 'vlans': { - 'infra0': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, - } - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=00:16:3e:60:7c:df - IPADDR=192.10.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('infra0'): dedent("""\ - BOOTPROTO=none - DEVICE=infra0 - IPADDR=10.0.1.2 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) - - def test_vlan_render(self): - cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"]}}, - 'vlans': { - 'eth0.1001': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, - } - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0 - IPADDR=192.10.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('eth0.1001'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0.1001 - IPADDR=10.0.1.2 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) - - -class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroOpensuse, self).setUp() - self.distro = self._get_distro('opensuse', renderers=['sysconfig']) - - def ifcfg_path(self, ifname): - return '/etc/sysconfig/network/ifcfg-%s' % ifname - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.sysconfig.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - self.assertCfgEquals(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def test_apply_network_config_opensuse(self): - """Opensuse uses apply_network_config and renders sysconfig""" - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=static - IPADDR=192.168.1.5 - NETMASK=255.255.255.0 - STARTMODE=auto - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp4 - STARTMODE=auto - """), - } - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_ipv6_opensuse(self): - """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=static - IPADDR6=2607:f0d0:1002:0011::2/64 - STARTMODE=auto - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp4 - STARTMODE=auto - """), - } - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroArch(TestNetCfgDistroBase): - def setUp(self): - super(TestNetCfgDistroArch, self).setUp() - self.distro = self._get_distro('arch', renderers=['netplan']) - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False, with_netplan=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.netplan.available', - return_value=with_netplan): - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def netctl_path(self, iface): - return '/etc/netctl/%s' % iface - - def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' - - def test_apply_network_config_v1_without_netplan(self): - # Note that this is in fact an invalid netctl config: - # "Address=None/None" - # But this is what the renderer has been writing out for a long time, - # and the test's purpose is to assert that the netctl renderer is - # still being used in absence of netplan, not the correctness of the - # rendered netctl config. - expected_cfgs = { - self.netctl_path('eth0'): dedent("""\ - Address=192.168.1.5/255.255.255.0 - Connection=ethernet - DNS=() - Gateway=192.168.1.254 - IP=static - Interface=eth0 - """), - self.netctl_path('eth1'): dedent("""\ - Address=None/None - Connection=ethernet - DNS=() - Gateway= - IP=dhcp - Interface=eth1 - """), - } - - # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy(), - with_netplan=False) - - def test_apply_network_config_v1_with_netplan(self): - expected_cfgs = { - self.netplan_path(): dedent("""\ - # generated by cloud-init - network: - version: 2 - ethernets: - eth0: - addresses: - - 192.168.1.5/24 - gateway4: 192.168.1.254 - eth1: - dhcp4: true - """), - } - - with mock.patch( - 'cloudinit.net.netplan.get_devicelist', - return_value=[] - ): - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy(), - with_netplan=True) - - -class TestNetCfgDistroPhoton(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroPhoton, self).setUp() - self.distro = self._get_distro('photon', renderers=['networkd']) - - def create_conf_dict(self, contents): - content_dict = {} - for line in contents: - if line: - line = line.strip() - if line and re.search(r'^\[(.+)\]$', line): - content_dict[line] = [] - key = line - elif line: - assert key - content_dict[key].append(line) - - return content_dict - - def compare_dicts(self, actual, expected): - for k, v in actual.items(): - self.assertEqual(sorted(expected[k]), sorted(v)) - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.networkd.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - actual = self.create_conf_dict(results[cfgpath].splitlines()) - self.compare_dicts(actual, expected) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def nwk_file_path(self, ifname): - return '/etc/systemd/network/10-cloud-init-%s.network' % ifname - - def net_cfg_1(self, ifname): - ret = """\ - [Match] - Name=%s - [Network] - DHCP=no - [Address] - Address=192.168.1.5/24 - [Route] - Gateway=192.168.1.254""" % ifname - return ret - - def net_cfg_2(self, ifname): - ret = """\ - [Match] - Name=%s - [Network] - DHCP=ipv4""" % ifname - return ret - - def test_photon_network_config_v1(self): - tmp = self.net_cfg_1('eth0').splitlines() - expected_eth0 = self.create_conf_dict(tmp) - - tmp = self.net_cfg_2('eth1').splitlines() - expected_eth1 = self.create_conf_dict(tmp) - - expected_cfgs = { - self.nwk_file_path('eth0'): expected_eth0, - self.nwk_file_path('eth1'): expected_eth1, - } - - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs.copy()) - - def test_photon_network_config_v2(self): - tmp = self.net_cfg_1('eth7').splitlines() - expected_eth7 = self.create_conf_dict(tmp) - - tmp = self.net_cfg_2('eth9').splitlines() - expected_eth9 = self.create_conf_dict(tmp) - - expected_cfgs = { - self.nwk_file_path('eth7'): expected_eth7, - self.nwk_file_path('eth9'): expected_eth9, - } - - self._apply_and_verify(self.distro.apply_network_config, - V2_NET_CFG, - expected_cfgs.copy()) - - def test_photon_network_config_v1_with_duplicates(self): - expected = """\ - [Match] - Name=eth0 - [Network] - DHCP=no - DNS=1.2.3.4 - Domains=test.com - [Address] - Address=192.168.0.102/24""" - - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) - - expected = self.create_conf_dict(expected.splitlines()) - expected_cfgs = { - self.nwk_file_path('eth0'): expected, - } - - self._apply_and_verify(self.distro.apply_network_config, - net_cfg, - expected_cfgs.copy()) - - -def get_mode(path, target=None): - return os.stat(subp.target_path(target, path)).st_mode & 0o777 - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_opensuse.py b/tests/unittests/test_distros/test_opensuse.py deleted file mode 100644 index b9bb9b3e..00000000 --- a/tests/unittests/test_distros/test_opensuse.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase - -from . import _get_distro - - -class TestopenSUSE(CiTestCase): - - def test_get_distro(self): - distro = _get_distro("opensuse") - self.assertEqual(distro.osfamily, 'suse') diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/test_distros/test_photon.py deleted file mode 100644 index 1c3145ca..00000000 --- a/tests/unittests/test_distros/test_photon.py +++ /dev/null @@ -1,68 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from . import _get_distro -from cloudinit import util -from cloudinit.tests.helpers import mock -from cloudinit.tests.helpers import CiTestCase - -SYSTEM_INFO = { - 'paths': { - 'cloud_dir': '/var/lib/cloud/', - 'templates_dir': '/etc/cloud/templates/', - }, - 'network': {'renderers': 'networkd'}, -} - - -class TestPhoton(CiTestCase): - with_logs = True - distro = _get_distro('photon', SYSTEM_INFO) - expected_log_line = 'Rely on PhotonOS default network config' - - def test_network_renderer(self): - self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd') - - def test_get_distro(self): - self.assertEqual(self.distro.osfamily, 'photon') - - @mock.patch("cloudinit.distros.photon.subp.subp") - def test_write_hostname(self, m_subp): - hostname = 'myhostname' - hostfile = self.tmp_path('previous-hostname') - self.distro._write_hostname(hostname, hostfile) - self.assertEqual(hostname, util.load_file(hostfile)) - - ret = self.distro._read_hostname(hostfile) - self.assertEqual(ret, hostname) - - m_subp.return_value = (None, None) - hostfile += 'hostfile' - self.distro._write_hostname(hostname, hostfile) - - m_subp.return_value = (hostname, None) - ret = self.distro._read_hostname(hostfile) - self.assertEqual(ret, hostname) - - self.logs.truncate(0) - m_subp.return_value = (None, 'bla') - self.distro._write_hostname(hostname, None) - self.assertIn('Error while setting hostname', self.logs.getvalue()) - - @mock.patch('cloudinit.net.generate_fallback_config') - def test_fallback_netcfg(self, m_fallback_cfg): - - key = 'disable_fallback_netcfg' - # Don't use fallback if no setting given - self.logs.truncate(0) - assert(self.distro.generate_fallback_config() is None) - self.assertIn(self.expected_log_line, self.logs.getvalue()) - - self.logs.truncate(0) - self.distro._cfg[key] = True - assert(self.distro.generate_fallback_config() is None) - self.assertIn(self.expected_log_line, self.logs.getvalue()) - - self.logs.truncate(0) - self.distro._cfg[key] = False - assert(self.distro.generate_fallback_config() is not None) - self.assertNotIn(self.expected_log_line, self.logs.getvalue()) diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py deleted file mode 100644 index 7d940750..00000000 --- a/tests/unittests/test_distros/test_resolv.py +++ /dev/null @@ -1,65 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.distros.parsers import resolv_conf - -from cloudinit.tests.helpers import TestCase - -import re - - -BASE_RESOLVE = ''' -; generated by /sbin/dhclient-script -search blah.yahoo.com yahoo.com -nameserver 10.15.44.14 -nameserver 10.15.30.92 -''' -BASE_RESOLVE = BASE_RESOLVE.strip() - - -class TestResolvHelper(TestCase): - def test_parse_same(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - rp_r = str(rp).strip() - self.assertEqual(BASE_RESOLVE, rp_r) - - def test_local_domain(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIsNone(rp.local_domain) - - rp.local_domain = "bob" - self.assertEqual('bob', rp.local_domain) - self.assertIn('domain bob', str(rp)) - - def test_nameservers(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIn('10.15.44.14', rp.nameservers) - self.assertIn('10.15.30.92', rp.nameservers) - rp.add_nameserver('10.2') - self.assertIn('10.2', rp.nameservers) - self.assertIn('nameserver 10.2', str(rp)) - self.assertNotIn('10.3', rp.nameservers) - self.assertEqual(len(rp.nameservers), 3) - rp.add_nameserver('10.2') - rp.add_nameserver('10.3') - self.assertNotIn('10.3', rp.nameservers) - - def test_search_domains(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIn('yahoo.com', rp.search_domains) - self.assertIn('blah.yahoo.com', rp.search_domains) - rp.add_search_domain('bbb.y.com') - self.assertIn('bbb.y.com', rp.search_domains) - self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp))) - self.assertIn('bbb.y.com', rp.search_domains) - rp.add_search_domain('bbb.y.com') - self.assertEqual(len(rp.search_domains), 3) - rp.add_search_domain('bbb2.y.com') - self.assertEqual(len(rp.search_domains), 4) - rp.add_search_domain('bbb3.y.com') - self.assertEqual(len(rp.search_domains), 5) - rp.add_search_domain('bbb4.y.com') - self.assertEqual(len(rp.search_domains), 6) - self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com') - self.assertEqual(len(rp.search_domains), 6) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_sles.py b/tests/unittests/test_distros/test_sles.py deleted file mode 100644 index 33e3c457..00000000 --- a/tests/unittests/test_distros/test_sles.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase - -from . import _get_distro - - -class TestSLES(CiTestCase): - - def test_get_distro(self): - distro = _get_distro("sles") - self.assertEqual(distro.osfamily, 'suse') diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py deleted file mode 100644 index c1d5b693..00000000 --- a/tests/unittests/test_distros/test_sysconfig.py +++ /dev/null @@ -1,86 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import re - -from cloudinit.distros.parsers.sys_conf import SysConf - -from cloudinit.tests.helpers import TestCase - - -# Lots of good examples @ -# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt - -class TestSysConfHelper(TestCase): - # This function was added in 2.7, make it work for 2.6 - def assertRegMatches(self, text, regexp): - regexp = re.compile(regexp) - self.assertTrue(regexp.search(text), - msg="%s must match %s!" % (text, regexp.pattern)) - - def test_parse_no_change(self): - contents = '''# A comment -USESMBAUTH=no -KEYTABLE=/usr/lib/kbd/keytables/us.map -SHORTDATE=$(date +%y:%m:%d:%H:%M) -HOSTNAME=blahblah -NETMASK0=255.255.255.0 -# Inline comment -LIST=$LOGROOT/incremental-list -IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64' -ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256" -USEMD5=no''' - conf = SysConf(contents.splitlines()) - self.assertEqual(conf['HOSTNAME'], 'blahblah') - self.assertEqual(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)') - # Should be unquoted - self.assertEqual(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; ' - '-G ${DEVICE} rx 256 tx 256')) - self.assertEqual(contents, str(conf)) - - def test_parse_shell_vars(self): - contents = 'USESMBAUTH=$XYZ' - conf = SysConf(contents.splitlines()) - self.assertEqual(contents, str(conf)) - conf = SysConf('') - conf['B'] = '${ZZ}d apples' - # Should be quoted - self.assertEqual('B="${ZZ}d apples"', str(conf)) - conf = SysConf('') - conf['B'] = '$? d apples' - self.assertEqual('B="$? d apples"', str(conf)) - contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"' - conf = SysConf(contents.splitlines()) - self.assertEqual('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf)) - - def test_parse_adjust(self): - contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"' - conf = SysConf(contents.splitlines()) - # Should be unquoted - self.assertEqual('eth0-:0004::1/64 eth1-:0005::1/64', - conf['IPV6TO4_ROUTING']) - conf['IPV6TO4_ROUTING'] = "blah \tblah" - contents2 = str(conf).strip() - # Should be requoted due to whitespace - self.assertRegMatches(contents2, - r'IPV6TO4_ROUTING=[\']blah\s+blah[\']') - - def test_parse_no_adjust_shell(self): - conf = SysConf(''.splitlines()) - conf['B'] = ' $(time)' - contents = str(conf) - self.assertEqual('B= $(time)', contents) - - def test_parse_empty(self): - contents = '' - conf = SysConf(contents.splitlines()) - self.assertEqual('', str(conf).strip()) - - def test_parse_add_new(self): - contents = 'BLAH=b' - conf = SysConf(contents.splitlines()) - conf['Z'] = 'd' - contents = str(conf) - self.assertIn("Z=d", contents) - self.assertIn("BLAH=b", contents) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py deleted file mode 100644 index 50c86942..00000000 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ /dev/null @@ -1,372 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from unittest import mock - -from cloudinit import distros -from cloudinit.distros import ug_util -from cloudinit import helpers -from cloudinit import settings - -from cloudinit.tests.helpers import TestCase - - -bcfg = { - 'name': 'bob', - 'plain_text_passwd': 'ubuntu', - 'home': "/home/ubuntu", - 'shell': "/bin/bash", - 'lock_passwd': True, - 'gecos': "Ubuntu", - 'groups': ["foo"] -} - - -class TestUGNormalize(TestCase): - - def setUp(self): - super(TestUGNormalize, self).setUp() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') - - def _make_distro(self, dtype, def_user=None): - cfg = dict(settings.CFG_BUILTIN) - cfg['system_info']['distro'] = dtype - paths = helpers.Paths(cfg['system_info']['paths']) - distro_cls = distros.fetch(dtype) - if def_user: - cfg['system_info']['default_user'] = def_user.copy() - distro = distro_cls(dtype, cfg['system_info'], paths) - return distro - - def _norm(self, cfg, distro): - return ug_util.normalize_users_groups(cfg, distro) - - def test_group_dict(self): - distro = self._make_distro('ubuntu') - g = {'groups': - [{'ubuntu': ['foo', 'bar'], - 'bob': 'users'}, - 'cloud-users', - {'bob': 'users2'}]} - (_users, groups) = self._norm(g, distro) - self.assertIn('ubuntu', groups) - ub_members = groups['ubuntu'] - self.assertEqual(sorted(['foo', 'bar']), sorted(ub_members)) - self.assertIn('bob', groups) - b_members = groups['bob'] - self.assertEqual(sorted(['users', 'users2']), - sorted(b_members)) - - def test_basic_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': ['bob'], - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertEqual({}, users) - - def test_csv_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': 'bob,joe,steve', - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertIn('joe', groups) - self.assertIn('steve', groups) - self.assertEqual({}, users) - - def test_more_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': ['bob', 'joe', 'steve'] - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertIn('joe', groups) - self.assertIn('steve', groups) - self.assertEqual({}, users) - - def test_member_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': { - 'bob': ['s'], - 'joe': [], - 'steve': [], - } - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertEqual(['s'], groups['bob']) - self.assertEqual([], groups['joe']) - self.assertIn('joe', groups) - self.assertIn('steve', groups) - self.assertEqual({}, users) - - def test_users_simple_dict(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': { - 'default': True, - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - ug_cfg = { - 'users': { - 'default': 'yes', - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - ug_cfg = { - 'users': { - 'default': '1', - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - - def test_users_simple_dict_no(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': { - 'default': False, - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertEqual({}, users) - ug_cfg = { - 'users': { - 'default': 'no', - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertEqual({}, users) - - def test_users_simple_csv(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': 'joe,bob', - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - def test_users_simple(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - 'joe', - 'bob' - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - def test_users_old_user(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'user': 'zetta', - 'users': 'default' - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertNotIn('bob', users) # Bob is not the default now, zetta is - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - self.assertNotIn('default', users) - ug_cfg = { - 'user': 'zetta', - 'users': 'default, joe' - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertNotIn('bob', users) # Bob is not the default now, zetta is - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - self.assertNotIn('default', users) - ug_cfg = { - 'user': 'zetta', - 'users': ['bob', 'joe'] - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - ug_cfg = { - 'user': 'zetta', - 'users': { - 'bob': True, - 'joe': True, - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - ug_cfg = { - 'user': 'zetta', - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('zetta', users) - ug_cfg = {} - (users, groups) = self._norm(ug_cfg, distro) - self.assertEqual({}, users) - self.assertEqual({}, groups) - - def test_users_dict_default_additional(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': [ - {'name': 'default', 'blah': True} - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertEqual(",".join(distro.get_default_user()['groups']), - users['bob']['groups']) - self.assertEqual(True, users['bob']['blah']) - self.assertEqual(True, users['bob']['default']) - - def test_users_dict_extract(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': [ - 'default', - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - (name, config) = ug_util.extract_default(users) - self.assertEqual(name, 'bob') - expected_config = {} - def_config = None - try: - def_config = distro.get_default_user() - except NotImplementedError: - pass - if not def_config: - def_config = {} - expected_config.update(def_config) - - # Ignore these for now - expected_config.pop('name', None) - expected_config.pop('groups', None) - config.pop('groups', None) - self.assertEqual(config, expected_config) - - def test_users_dict_default(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': [ - 'default', - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertEqual(",".join(distro.get_default_user()['groups']), - users['bob']['groups']) - self.assertEqual(True, users['bob']['default']) - - def test_users_dict_trans(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', - 'tr-me': True}, - {'name': 'bob'}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'tr_me': True, 'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - def test_users_dict(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe'}, - {'name': 'bob'}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - @mock.patch('cloudinit.subp.subp') - def test_create_snap_user(self, mock_subp): - mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', - '')] - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', 'snapuser': 'joe@joe.com'}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) - username = distro.create_user(user, **config) - - snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com'] - mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) - self.assertEqual(username, 'joe') - - @mock.patch('cloudinit.subp.subp') - def test_create_snap_user_known(self, mock_subp): - mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', - '')] - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) - username = distro.create_user(user, **config) - - snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known', - 'joe@joe.com'] - mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) - self.assertEqual(username, 'joe') - - @mock.patch('cloudinit.util.system_is_snappy') - @mock.patch('cloudinit.util.is_group') - @mock.patch('cloudinit.subp.subp') - def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp, - mock_snappy): - mock_isgrp.return_value = False - mock_subp.return_value = True - mock_snappy.return_value = True - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', 'groups': 'users', 'create_groups': True}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) - distro.add_user(user, **config) - - groupcmd = ['groupadd', 'users', '--extrausers'] - addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m'] - - mock_subp.assert_any_call(groupcmd) - mock_subp.assert_any_call(addcmd, logstring=addcmd) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py new file mode 100644 index 00000000..674e7b98 --- /dev/null +++ b/tests/unittests/test_dmi.py @@ -0,0 +1,154 @@ +from tests.unittests import helpers +from cloudinit import dmi +from cloudinit import util +from cloudinit import subp + +import os +import tempfile +import shutil +from unittest import mock + + +class TestReadDMIData(helpers.FilesystemMockingTestCase): + + def setUp(self): + super(TestReadDMIData, self).setUp() + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + self.reRoot(self.new_root) + p = mock.patch("cloudinit.dmi.is_container", return_value=False) + self.addCleanup(p.stop) + self._m_is_container = p.start() + p = mock.patch("cloudinit.dmi.is_FreeBSD", return_value=False) + self.addCleanup(p.stop) + self._m_is_FreeBSD = p.start() + + def _create_sysfs_parent_directory(self): + util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id')) + + def _create_sysfs_file(self, key, content): + """Mocks the sys path found on Linux systems.""" + self._create_sysfs_parent_directory() + dmi_key = "/sys/class/dmi/id/{0}".format(key) + util.write_file(dmi_key, content) + + def _configure_dmidecode_return(self, key, content, error=None): + """ + In order to test a missing sys path and call outs to dmidecode, this + function fakes the results of dmidecode to test the results. + """ + def _dmidecode_subp(cmd): + if cmd[-1] != key: + raise subp.ProcessExecutionError() + return (content, error) + + self.patched_funcs.enter_context( + mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True)) + self.patched_funcs.enter_context( + mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp)) + + def _configure_kenv_return(self, key, content, error=None): + """ + In order to test a FreeBSD system call outs to kenv, this + function fakes the results of kenv to test the results. + """ + def _kenv_subp(cmd): + if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].freebsd: + raise subp.ProcessExecutionError() + return (content, error) + + self.patched_funcs.enter_context( + mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp)) + + def patch_mapping(self, new_mapping): + self.patched_funcs.enter_context( + mock.patch('cloudinit.dmi.DMIDECODE_TO_KERNEL', + new_mapping)) + + def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): + self.patch_mapping({'mapped-key': dmi.kdmi('mapped-value', None)}) + expected_dmi_value = 'sys-used-correctly' + self._create_sysfs_file('mapped-value', expected_dmi_value) + self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong') + self.assertEqual(expected_dmi_value, dmi.read_dmi_data('mapped-key')) + + def test_dmidecode_used_if_no_sysfs_file_on_disk(self): + self.patch_mapping({}) + self._create_sysfs_parent_directory() + expected_dmi_value = 'dmidecode-used' + self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) + with mock.patch("cloudinit.util.os.uname") as m_uname: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', 'x86_64') + self.assertEqual(expected_dmi_value, + dmi.read_dmi_data('use-dmidecode')) + + def test_dmidecode_not_used_on_arm(self): + self.patch_mapping({}) + print("current =%s", subp) + self._create_sysfs_parent_directory() + dmi_val = 'from-dmidecode' + dmi_name = 'use-dmidecode' + self._configure_dmidecode_return(dmi_name, dmi_val) + print("now =%s", subp) + + expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val} + found = {} + # we do not run the 'dmi-decode' binary on some arches + # verify that anything requested that is not in the sysfs dir + # will return None on those arches. + with mock.patch("cloudinit.util.os.uname") as m_uname: + for arch in expected: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', arch) + print("now2 =%s", subp) + found[arch] = dmi.read_dmi_data(dmi_name) + self.assertEqual(expected, found) + + def test_none_returned_if_neither_source_has_data(self): + self.patch_mapping({}) + self._configure_dmidecode_return('key', 'value') + self.assertIsNone(dmi.read_dmi_data('expect-fail')) + + def test_none_returned_if_dmidecode_not_in_path(self): + self.patched_funcs.enter_context( + mock.patch.object(subp, 'which', lambda _: False)) + self.patch_mapping({}) + self.assertIsNone(dmi.read_dmi_data('expect-fail')) + + def test_empty_string_returned_instead_of_foxfox(self): + # uninitialized dmi values show as \xff, return empty string + my_len = 32 + dmi_value = b'\xff' * my_len + b'\n' + expected = "" + dmi_key = 'system-product-name' + sysfs_key = 'product_name' + self._create_sysfs_file(sysfs_key, dmi_value) + self.assertEqual(expected, dmi.read_dmi_data(dmi_key)) + + def test_container_returns_none(self): + """In a container read_dmi_data should always return None.""" + + # first verify we get the value if not in container + self._m_is_container.return_value = False + key, val = ("system-product-name", "my_product") + self._create_sysfs_file('product_name', val) + self.assertEqual(val, dmi.read_dmi_data(key)) + + # then verify in container returns None + self._m_is_container.return_value = True + self.assertIsNone(dmi.read_dmi_data(key)) + + def test_container_returns_none_on_unknown(self): + """In a container even bogus keys return None.""" + self._m_is_container.return_value = True + self._create_sysfs_file('product_name', "should-be-ignored") + self.assertIsNone(dmi.read_dmi_data("bogus")) + self.assertIsNone(dmi.read_dmi_data("system-product-name")) + + def test_freebsd_uses_kenv(self): + """On a FreeBSD system, kenv is called.""" + self._m_is_FreeBSD.return_value = True + key, val = ("system-product-name", "my_product") + self._configure_kenv_return(key, val) + self.assertEqual(dmi.read_dmi_data(key), val) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 43603ea5..62c3e403 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -8,7 +8,7 @@ from uuid import uuid4 from cloudinit import safeyaml from cloudinit import subp from cloudinit import util -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) from cloudinit.sources import DataSourceIBMCloud as ds_ibm diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index 3f50f57d..e8e0b5b1 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -2,7 +2,7 @@ import httpretty as hp -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit import ec2_utils as eu from cloudinit import url_helper as uh diff --git a/tests/unittests/test_event.py b/tests/unittests/test_event.py new file mode 100644 index 00000000..3da4c70c --- /dev/null +++ b/tests/unittests/test_event.py @@ -0,0 +1,26 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests related to cloudinit.event module.""" +from cloudinit.event import EventType, EventScope, userdata_to_events + + +class TestEvent: + def test_userdata_to_events(self): + userdata = {'network': {'when': ['boot']}} + expected = {EventScope.NETWORK: {EventType.BOOT}} + assert expected == userdata_to_events(userdata) + + def test_invalid_scope(self, caplog): + userdata = {'networkasdfasdf': {'when': ['boot']}} + userdata_to_events(userdata) + assert ( + "'networkasdfasdf' is not a valid EventScope! Update data " + "will be ignored for 'networkasdfasdf' scope" + ) in caplog.text + + def test_invalid_event(self, caplog): + userdata = {'network': {'when': ['bootasdfasdf']}} + userdata_to_events(userdata) + assert ( + "'bootasdfasdf' is not a valid EventType! Update data " + "will be ignored for 'network' scope" + ) in caplog.text diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py new file mode 100644 index 00000000..d7a7226d --- /dev/null +++ b/tests/unittests/test_features.py @@ -0,0 +1,60 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=no-member,no-name-in-module +""" +This file is for testing the feature flag functionality itself, +NOT for testing any individual feature flag +""" +import pytest +import sys +from pathlib import Path + +import cloudinit + + +@pytest.yield_fixture() +def create_override(request): + """ + Create a feature overrides file and do some module wizardry to make + it seem like we're importing the features file for the first time. + + After creating the override file with the values passed by the test, + we need to reload cloudinit.features + to get all of the current features (including the overridden ones). + Once the test is complete, we remove the file we created and set + features and feature_overrides modules to how they were before + the test started + """ + override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py' + if override_path.exists(): + raise Exception("feature_overrides.py unexpectedly exists! " + "Remove it to run this test.") + with override_path.open('w') as f: + for key, value in request.param.items(): + f.write('{} = {}\n'.format(key, value)) + + sys.modules.pop('cloudinit.features', None) + + yield + + override_path.unlink() + sys.modules.pop('cloudinit.feature_overrides', None) + + +class TestFeatures: + def test_feature_without_override(self): + from cloudinit.features import ERROR_ON_USER_DATA_FAILURE + assert ERROR_ON_USER_DATA_FAILURE is True + + @pytest.mark.parametrize('create_override', + [{'ERROR_ON_USER_DATA_FAILURE': False}], + indirect=True) + def test_feature_with_override(self, create_override): + from cloudinit.features import ERROR_ON_USER_DATA_FAILURE + assert ERROR_ON_USER_DATA_FAILURE is False + + @pytest.mark.parametrize('create_override', + [{'SPAM': True}], + indirect=True) + def test_feature_only_in_override(self, create_override): + from cloudinit.features import SPAM + assert SPAM is True diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py deleted file mode 100644 index 1492361e..00000000 --- a/tests/unittests/test_filters/test_launch_index.py +++ /dev/null @@ -1,135 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -from itertools import filterfalse - -from cloudinit.tests import helpers - -from cloudinit.filters import launch_index -from cloudinit import user_data as ud -from cloudinit import util - - -def count_messages(root): - am = 0 - for m in root.walk(): - if ud.is_skippable(m): - continue - am += 1 - return am - - -class TestLaunchFilter(helpers.ResourceUsingTestCase): - - def assertCounts(self, message, expected_counts): - orig_message = copy.deepcopy(message) - for (index, count) in expected_counts.items(): - index = util.safe_int(index) - filtered_message = launch_index.Filter(index).apply(message) - self.assertEqual(count_messages(filtered_message), count) - # Ensure original message still ok/not modified - self.assertTrue(self.equivalentMessage(message, orig_message)) - - def equivalentMessage(self, msg1, msg2): - msg1_count = count_messages(msg1) - msg2_count = count_messages(msg2) - if msg1_count != msg2_count: - return False - # Do some basic payload checking - msg1_msgs = [m for m in msg1.walk()] - msg1_msgs = [m for m in filterfalse(ud.is_skippable, msg1_msgs)] - msg2_msgs = [m for m in msg2.walk()] - msg2_msgs = [m for m in filterfalse(ud.is_skippable, msg2_msgs)] - for i in range(0, len(msg2_msgs)): - m1_msg = msg1_msgs[i] - m2_msg = msg2_msgs[i] - if m1_msg.get_charset() != m2_msg.get_charset(): - return False - if m1_msg.is_multipart() != m2_msg.is_multipart(): - return False - m1_py = m1_msg.get_payload(decode=True) - m2_py = m2_msg.get_payload(decode=True) - if m1_py != m2_py: - return False - return True - - def testMultiEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_2.email') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - self.assertTrue(count_messages(message) > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 3: 1, - 2: 2, - None: 3, - -1: 0, - } - self.assertCounts(message, expected_counts) - - def testHeaderEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_header.email') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - self.assertTrue(count_messages(message) > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 5: 1, - -1: 0, - 'c': 1, - None: 1, - } - self.assertCounts(message, expected_counts) - - def testConfigEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_1.email') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - self.assertTrue(count_messages(message) > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 2: 1, - -1: 0, - None: 1, - } - self.assertCounts(message, expected_counts) - - def testNoneIndex(self): - test_data = helpers.readResource('filter_cloud_multipart.yaml') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - start_count = count_messages(message) - self.assertTrue(start_count > 0) - filtered_message = launch_index.Filter(None).apply(message) - self.assertTrue(self.equivalentMessage(message, filtered_message)) - - def testIndexes(self): - test_data = helpers.readResource('filter_cloud_multipart.yaml') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - start_count = count_messages(message) - self.assertTrue(start_count > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 2: 2, - 3: 2, - 1: 2, - 0: 1, - 4: 1, - 7: 0, - -1: 0, - 100: 0, - # None should just give all back - None: start_count, - # Non ints should be ignored - 'c': start_count, - # Strings should be converted - '1': 2, - } - self.assertCounts(message, expected_counts) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py index 451ffa91..ceada49a 100644 --- a/tests/unittests/test_gpg.py +++ b/tests/unittests/test_gpg.py @@ -4,6 +4,8 @@ from unittest import mock from cloudinit import gpg from cloudinit import subp +from tests.unittests.helpers import CiTestCase + TEST_KEY_HUMAN = ''' /etc/apt/cloud-init.gpg.d/my_key.gpg -------------------------------------------- @@ -79,3 +81,50 @@ class TestGPGCommands: test_call = mock.call( ["gpg", "--dearmor"], data='key', decode=False) assert test_call == m_subp.call_args + + @mock.patch("cloudinit.gpg.time.sleep") + @mock.patch("cloudinit.gpg.subp.subp") + class TestReceiveKeys(CiTestCase): + """Test the recv_key method.""" + + def test_retries_on_subp_exc(self, m_subp, m_sleep): + """retry should be done on gpg receive keys failure.""" + retries = (1, 2, 4) + my_exc = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + m_subp.side_effect = (my_exc, my_exc, ('', '')) + gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) + self.assertEqual( + [mock.call(1), mock.call(2)], m_sleep.call_args_list) + + def test_raises_error_after_retries(self, m_subp, m_sleep): + """If the final run fails, error should be raised.""" + naplen = 1 + keyid, keyserver = ("ABCD", "keyserver.example.com") + m_subp.side_effect = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + with self.assertRaises(ValueError) as rcm: + gpg.recv_key(keyid, keyserver, retries=(naplen,)) + self.assertIn(keyid, str(rcm.exception)) + self.assertIn(keyserver, str(rcm.exception)) + m_sleep.assert_called_with(naplen) + + def test_no_retries_on_none(self, m_subp, m_sleep): + """retry should not be done if retries is None.""" + m_subp.side_effect = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + with self.assertRaises(ValueError): + gpg.recv_key("ABCD", "keyserver.example.com", retries=None) + m_sleep.assert_not_called() + + def test_expected_gpg_command(self, m_subp, m_sleep): + """Verify gpg is called with expected args.""" + key, keyserver = ("DEADBEEF", "keyserver.example.com") + retries = (1, 2, 4) + m_subp.return_value = ('', '') + gpg.recv_key(key, keyserver, retries=retries) + m_subp.assert_called_once_with( + ['gpg', '--no-tty', + '--keyserver=%s' % keyserver, '--recv-keys', key], + capture=True) + m_sleep.assert_not_called() diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_handler/test_handler_apk_configure.py b/tests/unittests/test_handler/test_handler_apk_configure.py deleted file mode 100644 index 8acc0b33..00000000 --- a/tests/unittests/test_handler/test_handler_apk_configure.py +++ /dev/null @@ -1,299 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_apk_configure -Test creation of repositories file -""" - -import logging -import os -import textwrap - -from cloudinit import (cloud, helpers, util) - -from cloudinit.config import cc_apk_configure -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) - -REPO_FILE = "/etc/apk/repositories" -DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine" -CC_APK = 'cloudinit.config.cc_apk_configure' - - -class TestNoConfig(FilesystemMockingTestCase): - def setUp(self): - super(TestNoConfig, self).setUp() - self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos') - self.name = "apk-configure" - self.cloud_init = None - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - def test_no_config(self): - """ - Test that nothing is done if no apk-configure - configuration is provided. - """ - config = util.get_builtin_cfg() - - cc_apk_configure.handle(self.name, config, self.cloud_init, - self.log, self.args) - - self.assertEqual(0, self.m_write_repos.call_count) - - -class TestConfig(FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.new_root = self.tmp_dir() - self.new_root = self.reRoot(root=self.new_root) - for dirname in ['tmp', 'etc/apk']: - util.ensure_dir(os.path.join(self.new_root, dirname)) - self.paths = helpers.Paths({'templates_dir': self.new_root}) - self.name = "apk-configure" - self.cloud = cloud.Cloud(None, self.paths, None, None, None) - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - @mock.patch(CC_APK + '._write_repositories_file') - def test_no_repo_settings(self, m_write_repos): - """ - Test that nothing is written if the 'alpine-repo' key - is not present. - """ - config = {"apk_repos": {}} - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - self.assertEqual(0, m_write_repos.call_count) - - @mock.patch(CC_APK + '._write_repositories_file') - def test_empty_repo_settings(self, m_write_repos): - """ - Test that nothing is written if 'alpine_repo' list is empty. - """ - config = {"apk_repos": {"alpine_repo": []}} - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - self.assertEqual(0, m_write_repos.call_count) - - def test_only_main_repo(self): - """ - Test when only details of main repo is written to file. - """ - alpine_version = 'v3.12' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_main_and_community_repos(self): - """ - Test when only details of main and community repos are - written to file. - """ - alpine_version = 'edge' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_main_community_testing_repos(self): - """ - Test when details of main, community and testing repos - are written to file. - """ - alpine_version = 'v3.12' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - # - # Testing - using with non-Edge installation may cause problems! - # - {0}/edge/testing - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_edge_main_community_testing_repos(self): - """ - Test when details of main, community and testing repos - for Edge version of Alpine are written to file. - """ - alpine_version = 'edge' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - {0}/{1}/testing - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_main_community_testing_local_repos(self): - """ - Test when details of main, community, testing and - local repos are written to file. - """ - alpine_version = 'v3.12' - local_repo_url = 'http://some.mirror/whereever' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - }, - "local_repo_base_url": local_repo_url - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - # - # Testing - using with non-Edge installation may cause problems! - # - {0}/edge/testing - - # - # Local repo - # - {2}/{1} - - """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_edge_main_community_testing_local_repos(self): - """ - Test when details of main, community, testing and local repos - for Edge version of Alpine are written to file. - """ - alpine_version = 'edge' - local_repo_url = 'http://some.mirror/whereever' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - }, - "local_repo_base_url": local_repo_url - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - {0}/edge/testing - - # - # Local repo - # - {2}/{1} - - """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py deleted file mode 100644 index 6a4b03ee..00000000 --- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py +++ /dev/null @@ -1,129 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_apt_configure -from cloudinit import util - -from cloudinit.tests.helpers import TestCase - -import copy -import os -import re -import shutil -import tempfile - - -class TestAptProxyConfig(TestCase): - def setUp(self): - super(TestAptProxyConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.pfile = os.path.join(self.tmp, "proxy.cfg") - self.cfile = os.path.join(self.tmp, "config.cfg") - - def _search_apt_config(self, contents, ptype, value): - return re.search( - r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), - contents, flags=re.IGNORECASE) - - def test_apt_proxy_written(self): - cfg = {'proxy': 'myproxy'} - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - contents = util.load_file(self.pfile) - self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) - - def test_apt_http_proxy_written(self): - cfg = {'http_proxy': 'myproxy'} - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - contents = util.load_file(self.pfile) - self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) - - def test_apt_all_proxy_written(self): - cfg = {'http_proxy': 'myproxy_http_proxy', - 'https_proxy': 'myproxy_https_proxy', - 'ftp_proxy': 'myproxy_ftp_proxy'} - - values = {'http': cfg['http_proxy'], - 'https': cfg['https_proxy'], - 'ftp': cfg['ftp_proxy'], - } - - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - contents = util.load_file(self.pfile) - - for ptype, pval in values.items(): - self.assertTrue(self._search_apt_config(contents, ptype, pval)) - - def test_proxy_deleted(self): - util.write_file(self.cfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) - self.assertFalse(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - def test_proxy_replaced(self): - util.write_file(self.cfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({'proxy': "foo"}, - self.pfile, self.cfile) - self.assertTrue(os.path.isfile(self.pfile)) - contents = util.load_file(self.pfile) - self.assertTrue(self._search_apt_config(contents, "http", "foo")) - - def test_config_written(self): - payload = 'this is my apt config' - cfg = {'conf': payload} - - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.cfile)) - self.assertFalse(os.path.isfile(self.pfile)) - - self.assertEqual(util.load_file(self.cfile), payload) - - def test_config_replaced(self): - util.write_file(self.pfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({'conf': "foo"}, - self.pfile, self.cfile) - self.assertTrue(os.path.isfile(self.cfile)) - self.assertEqual(util.load_file(self.cfile), "foo") - - def test_config_deleted(self): - # if no 'conf' is provided, delete any previously written file - util.write_file(self.pfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) - self.assertFalse(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - -class TestConversion(TestCase): - def test_convert_with_apt_mirror_as_empty_string(self): - # an empty apt_mirror is the same as no apt_mirror - empty_m_found = cc_apt_configure.convert_to_v3_apt_format( - {'apt_mirror': ''}) - default_found = cc_apt_configure.convert_to_v3_apt_format({}) - self.assertEqual(default_found, empty_m_found) - - def test_convert_with_apt_mirror(self): - mirror = 'http://my.mirror/ubuntu' - f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror}) - self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary'])) - - def test_no_old_content(self): - mirror = 'http://my.mirror/ubuntu' - mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}} - expected = copy.deepcopy(mydata) - self.assertEqual(expected, - cc_apt_configure.convert_to_v3_apt_format(mydata)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py deleted file mode 100644 index d69916f9..00000000 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py +++ /dev/null @@ -1,181 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_handler_apt_configure_sources_list -Test templating of sources list -""" -import logging -import os -import shutil -import tempfile -from unittest import mock - -from cloudinit import templater -from cloudinit import subp -from cloudinit import util - -from cloudinit.config import cc_apt_configure - -from cloudinit.distros.debian import Distro - -from cloudinit.tests import helpers as t_help -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - -YAML_TEXT_CUSTOM_SL = """ -apt_mirror: http://archive.ubuntu.com/ubuntu/ -apt_custom_sources_list: | - ## template:jinja - ## Note, this file is written by cloud-init on first boot of an instance - ## modifications made here will not survive a re-bundle. - ## if you wish to make changes you can: - ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg - ## or do the same in user-data - ## b.) add sources in /etc/apt/sources.list.d - ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl - - # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to - # newer versions of the distribution. - deb {{mirror}} {{codename}} main restricted - deb-src {{mirror}} {{codename}} main restricted - # FIND_SOMETHING_SPECIAL -""" - -EXPECTED_CONVERTED_CONTENT = ( - """## Note, this file is written by cloud-init on first boot of an instance -## modifications made here will not survive a re-bundle. -## if you wish to make changes you can: -## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg -## or do the same in user-data -## b.) add sources in /etc/apt/sources.list.d -## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl - -# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to -# newer versions of the distribution. -deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted -deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted -# FIND_SOMETHING_SPECIAL -""") - - -class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): - """TestAptSourceConfigSourceList - Main Class to test sources list rendering - """ - def setUp(self): - super(TestAptSourceConfigSourceList, self).setUp() - self.subp = subp.subp - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - - rpatcher = mock.patch("cloudinit.util.lsb_release") - get_rel = rpatcher.start() - get_rel.return_value = {'codename': "fakerelease"} - self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") - get_arch = apatcher.start() - get_arch.return_value = 'amd64' - self.addCleanup(apatcher.stop) - - def apt_source_list(self, distro, mirror, mirrorcheck=None): - """apt_source_list - Test rendering of a source.list from template for a given distro - """ - if mirrorcheck is None: - mirrorcheck = mirror - - if isinstance(mirror, list): - cfg = {'apt_mirror_search': mirror} - else: - cfg = {'apt_mirror': mirror} - - mycloud = get_cloud(distro) - - with mock.patch.object(util, 'write_file') as mockwf: - with mock.patch.object(util, 'load_file', - return_value="faketmpl") as mocklf: - with mock.patch.object(os.path, 'isfile', - return_value=True) as mockisfile: - with mock.patch.object( - templater, 'render_string', - return_value='fake') as mockrnd: - with mock.patch.object(util, 'rename'): - cc_apt_configure.handle("test", cfg, mycloud, - LOG, None) - - mockisfile.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - mocklf.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - mockrnd.assert_called_once_with('faketmpl', - {'RELEASE': 'fakerelease', - 'PRIMARY': mirrorcheck, - 'MIRROR': mirrorcheck, - 'SECURITY': mirrorcheck, - 'codename': 'fakerelease', - 'primary': mirrorcheck, - 'mirror': mirrorcheck, - 'security': mirrorcheck}) - mockwf.assert_called_once_with('/etc/apt/sources.list', 'fake', - mode=0o644) - - def test_apt_v1_source_list_debian(self): - """Test rendering of a source.list from template for debian""" - self.apt_source_list('debian', 'http://httpredir.debian.org/debian') - - def test_apt_v1_source_list_ubuntu(self): - """Test rendering of a source.list from template for ubuntu""" - self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/') - - @staticmethod - def myresolve(name): - """Fake util.is_resolvable for mirrorfail tests""" - if name == "does.not.exist": - print("Faking FAIL for '%s'" % name) - return False - else: - print("Faking SUCCESS for '%s'" % name) - return True - - def test_apt_v1_srcl_debian_mirrorfail(self): - """Test rendering of a source.list from template for debian""" - with mock.patch.object(util, 'is_resolvable', - side_effect=self.myresolve) as mockresolve: - self.apt_source_list('debian', - ['http://does.not.exist', - 'http://httpredir.debian.org/debian'], - 'http://httpredir.debian.org/debian') - mockresolve.assert_any_call("does.not.exist") - mockresolve.assert_any_call("httpredir.debian.org") - - def test_apt_v1_srcl_ubuntu_mirrorfail(self): - """Test rendering of a source.list from template for ubuntu""" - with mock.patch.object(util, 'is_resolvable', - side_effect=self.myresolve) as mockresolve: - self.apt_source_list('ubuntu', - ['http://does.not.exist', - 'http://archive.ubuntu.com/ubuntu/'], - 'http://archive.ubuntu.com/ubuntu/') - mockresolve.assert_any_call("does.not.exist") - mockresolve.assert_any_call("archive.ubuntu.com") - - def test_apt_v1_srcl_custom(self): - """Test rendering from a custom source.list template""" - cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) - mycloud = get_cloud() - - # the second mock restores the original subp - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(subp, 'subp', self.subp): - with mock.patch.object(Distro, 'get_primary_arch', - return_value='amd64'): - cc_apt_configure.handle("notimportant", cfg, mycloud, - LOG, None) - - mockwrite.assert_called_once_with( - '/etc/apt/sources.list', - EXPECTED_CONVERTED_CONTENT, - mode=420) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py deleted file mode 100644 index cd6f9239..00000000 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py +++ /dev/null @@ -1,226 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_apt_custom_sources_list -Test templating of custom sources list -""" -from contextlib import ExitStack -import logging -import os -import shutil -import tempfile -from unittest import mock -from unittest.mock import call - -from cloudinit import subp -from cloudinit import util -from cloudinit.config import cc_apt_configure -from cloudinit.distros.debian import Distro -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - -TARGET = "/" - -# Input and expected output for the custom template -YAML_TEXT_CUSTOM_SL = """ -apt: - primary: - - arches: [default] - uri: http://test.ubuntu.com/ubuntu/ - security: - - arches: [default] - uri: http://testsec.ubuntu.com/ubuntu/ - sources_list: | - - # Note, this file is written by cloud-init at install time. It should not - # end up on the installed system itself. - # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to - # newer versions of the distribution. - deb $MIRROR $RELEASE main restricted - deb-src $MIRROR $RELEASE main restricted - deb $PRIMARY $RELEASE universe restricted - deb $SECURITY $RELEASE-security multiverse - # FIND_SOMETHING_SPECIAL -""" - -EXPECTED_CONVERTED_CONTENT = """ -# Note, this file is written by cloud-init at install time. It should not -# end up on the installed system itself. -# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to -# newer versions of the distribution. -deb http://test.ubuntu.com/ubuntu/ fakerel main restricted -deb-src http://test.ubuntu.com/ubuntu/ fakerel main restricted -deb http://test.ubuntu.com/ubuntu/ fakerel universe restricted -deb http://testsec.ubuntu.com/ubuntu/ fakerel-security multiverse -# FIND_SOMETHING_SPECIAL -""" - -# mocked to be independent to the unittest system -MOCKED_APT_SRC_LIST = """ -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""" - -EXPECTED_BASE_CONTENT = (""" -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""") - -EXPECTED_MIRROR_CONTENT = (""" -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted -""") - -EXPECTED_PRIMSEC_CONTENT = (""" -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""") - - -class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): - """TestAptSourceConfigSourceList - Class to test sources list rendering""" - def setUp(self): - super(TestAptSourceConfigSourceList, self).setUp() - self.subp = subp.subp - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - - rpatcher = mock.patch("cloudinit.util.lsb_release") - get_rel = rpatcher.start() - get_rel.return_value = {'codename': "fakerel"} - self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") - get_arch = apatcher.start() - get_arch.return_value = 'amd64' - self.addCleanup(apatcher.stop) - - def _apt_source_list(self, distro, cfg, cfg_on_empty=False): - """_apt_source_list - Test rendering from template (generic)""" - # entry at top level now, wrap in 'apt' key - cfg = {'apt': cfg} - mycloud = get_cloud(distro) - - with ExitStack() as stack: - mock_writefile = stack.enter_context(mock.patch.object( - util, 'write_file')) - mock_loadfile = stack.enter_context(mock.patch.object( - util, 'load_file', return_value=MOCKED_APT_SRC_LIST)) - mock_isfile = stack.enter_context(mock.patch.object( - os.path, 'isfile', return_value=True)) - stack.enter_context(mock.patch.object( - util, 'del_file')) - cfg_func = ('cloudinit.config.cc_apt_configure.' - '_should_configure_on_empty_apt') - mock_shouldcfg = stack.enter_context(mock.patch( - cfg_func, return_value=(cfg_on_empty, 'test') - )) - cc_apt_configure.handle("test", cfg, mycloud, LOG, None) - - return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg - - def test_apt_v3_source_list_debian(self): - """test_apt_v3_source_list_debian - without custom sources or parms""" - cfg = {} - distro = 'debian' - expected = EXPECTED_BASE_CONTENT - - mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) - mock_load_file.assert_called_with(template) - mock_isfile.assert_any_call(template) - self.assertEqual(1, mock_shouldcfg.call_count) - - def test_apt_v3_source_list_ubuntu(self): - """test_apt_v3_source_list_ubuntu - without custom sources or parms""" - cfg = {} - distro = 'ubuntu' - expected = EXPECTED_BASE_CONTENT - - mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) - mock_load_file.assert_called_with(template) - mock_isfile.assert_any_call(template) - self.assertEqual(1, mock_shouldcfg.call_count) - - def test_apt_v3_source_list_ubuntu_snappy(self): - """test_apt_v3_source_list_ubuntu_snappy - without custom sources or - parms""" - cfg = {'apt': {}} - mycloud = get_cloud() - - with mock.patch.object(util, 'write_file') as mock_writefile: - with mock.patch.object(util, 'system_is_snappy', - return_value=True) as mock_issnappy: - cc_apt_configure.handle("test", cfg, mycloud, LOG, None) - - self.assertEqual(0, mock_writefile.call_count) - self.assertEqual(1, mock_issnappy.call_count) - - def test_apt_v3_source_list_centos(self): - """test_apt_v3_source_list_centos - without custom sources or parms""" - cfg = {} - distro = 'rhel' - - mock_writefile, _, _, _ = self._apt_source_list(distro, cfg) - - self.assertEqual(0, mock_writefile.call_count) - - def test_apt_v3_source_list_psm(self): - """test_apt_v3_source_list_psm - Test specifying prim+sec mirrors""" - pm = 'http://test.ubuntu.com/ubuntu/' - sm = 'http://testsec.ubuntu.com/ubuntu/' - cfg = {'preserve_sources_list': False, - 'primary': [{'arches': ["default"], - 'uri': pm}], - 'security': [{'arches': ["default"], - 'uri': sm}]} - distro = 'ubuntu' - expected = EXPECTED_PRIMSEC_CONTENT - - mock_writefile, mock_load_file, mock_isfile, _ = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) - mock_load_file.assert_called_with(template) - mock_isfile.assert_any_call(template) - - def test_apt_v3_srcl_custom(self): - """test_apt_v3_srcl_custom - Test rendering a custom source template""" - cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) - mycloud = get_cloud() - - # the second mock restores the original subp - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(subp, 'subp', self.subp): - with mock.patch.object(Distro, 'get_primary_arch', - return_value='amd64'): - cc_apt_configure.handle("notimportant", cfg, mycloud, - LOG, None) - - calls = [call('/etc/apt/sources.list', - EXPECTED_CONVERTED_CONTENT, - mode=0o644)] - mockwrite.assert_has_calls(calls) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_key.py b/tests/unittests/test_handler/test_handler_apt_key.py deleted file mode 100644 index 00e5a38d..00000000 --- a/tests/unittests/test_handler/test_handler_apt_key.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -from unittest import mock - -from cloudinit.config import cc_apt_configure -from cloudinit import subp -from cloudinit import util - -TEST_KEY_HUMAN = ''' -/etc/apt/cloud-init.gpg.d/my_key.gpg --------------------------------------------- -pub rsa4096 2021-10-22 [SC] - 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85 -uid [ unknown] Brett Holman -sub rsa4096 2021-10-22 [A] -sub rsa4096 2021-10-22 [E] -''' - -TEST_KEY_MACHINE = ''' -tru::1:1635129362:0:3:1:5 -pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: -fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: -uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \ -::::::::::0: -sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: -fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: -sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: -fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: -''' - -TEST_KEY_FINGERPRINT_HUMAN = \ - '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' - -TEST_KEY_FINGERPRINT_MACHINE = \ - '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' - - -class TestAptKey: - """TestAptKey - Class to test apt-key commands - """ - @mock.patch.object(subp, 'subp', return_value=('fakekey', '')) - @mock.patch.object(util, 'write_file') - def _apt_key_add_success_helper(self, directory, *args, hardened=False): - file = cc_apt_configure.apt_key( - 'add', - output_file='my-key', - data='fakekey', - hardened=hardened) - assert file == directory + '/my-key.gpg' - - def test_apt_key_add_success(self): - """Verify the correct directory path gets returned for unhardened case - """ - self._apt_key_add_success_helper('/etc/apt/trusted.gpg.d') - - def test_apt_key_add_success_hardened(self): - """Verify the correct directory path gets returned for hardened case - """ - self._apt_key_add_success_helper( - '/etc/apt/cloud-init.gpg.d', - hardened=True) - - def test_apt_key_add_fail_no_file_name(self): - """Verify that null filename gets handled correctly - """ - file = cc_apt_configure.apt_key( - 'add', - output_file=None, - data='') - assert '/dev/null' == file - - def _apt_key_fail_helper(self): - file = cc_apt_configure.apt_key( - 'add', - output_file='my-key', - data='fakekey') - assert file == '/dev/null' - - @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) - def test_apt_key_add_fail_no_file_name_subproc(self, *args): - """Verify that bad key value gets handled correctly - """ - self._apt_key_fail_helper() - - @mock.patch.object( - subp, 'subp', side_effect=UnicodeDecodeError('test', b'', 1, 1, '')) - def test_apt_key_add_fail_no_file_name_unicode(self, *args): - """Verify that bad key encoding gets handled correctly - """ - self._apt_key_fail_helper() - - def _apt_key_list_success_helper(self, finger, key, human_output=True): - @mock.patch.object(os, 'listdir', return_value=('/fake/dir/key.gpg',)) - @mock.patch.object(subp, 'subp', return_value=(key, '')) - def mocked_list(*a): - - keys = cc_apt_configure.apt_key('list', human_output) - assert finger in keys - mocked_list() - - def test_apt_key_list_success_human(self): - """Verify expected key output, human - """ - self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_HUMAN, - TEST_KEY_HUMAN) - - def test_apt_key_list_success_machine(self): - """Verify expected key output, machine - """ - self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_MACHINE, - TEST_KEY_MACHINE, human_output=False) - - @mock.patch.object(os, 'listdir', return_value=()) - @mock.patch.object(subp, 'subp', return_value=('', '')) - def test_apt_key_list_fail_no_keys(self, *args): - """Ensure falsy output for no keys - """ - keys = cc_apt_configure.apt_key('list') - assert not keys - - @mock.patch.object(os, 'listdir', return_value=('file_not_gpg_key.txt')) - @mock.patch.object(subp, 'subp', return_value=('', '')) - def test_apt_key_list_fail_no_keys_file(self, *args): - """Ensure non-gpg file is not returned. - - apt-key used file extensions for this, so we do too - """ - assert not cc_apt_configure.apt_key('list') - - @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) - @mock.patch.object(os, 'listdir', return_value=('bad_gpg_key.gpg')) - def test_apt_key_list_fail_bad_key_file(self, *args): - """Ensure bad gpg key doesn't throw exeption. - """ - assert not cc_apt_configure.apt_key('list') diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py deleted file mode 100644 index 2357d699..00000000 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ /dev/null @@ -1,651 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_handler_apt_source_v1 -Testing various config variations of the apt_source config -This calls all things with v1 format to stress the conversion code on top of -the actually tested code. -""" -import os -import re -import shutil -import tempfile -import pathlib -from unittest import mock -from unittest.mock import call - -from cloudinit.config import cc_apt_configure -from cloudinit import gpg -from cloudinit import subp -from cloudinit import util - -from cloudinit.tests.helpers import TestCase - -EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ -NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy -8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0 -HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG -CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29 -OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ -FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm -S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== -=ACB2 ------END PGP PUBLIC KEY BLOCK-----""" - -ADD_APT_REPO_MATCH = r"^[\w-]+:\w" - - -class FakeDistro(object): - """Fake Distro helper object""" - def update_package_sources(self): - """Fake update_package_sources helper method""" - return - - -class FakeDatasource: - """Fake Datasource helper object""" - def __init__(self): - self.region = 'region' - - -class FakeCloud(object): - """Fake Cloud helper object""" - def __init__(self): - self.distro = FakeDistro() - self.datasource = FakeDatasource() - - -class TestAptSourceConfig(TestCase): - """TestAptSourceConfig - Main Class to test apt_source configs - """ - release = "fantastic" - - def setUp(self): - super(TestAptSourceConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.aptlistfile = os.path.join(self.tmp, "single-deb.list") - self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list") - self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") - self.join = os.path.join - self.matcher = re.compile(ADD_APT_REPO_MATCH).search - # mock fallback filename into writable tmp dir - self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/", - "cloud_config_sources.list") - - self.fakecloud = FakeCloud() - - rpatcher = mock.patch("cloudinit.util.lsb_release") - get_rel = rpatcher.start() - get_rel.return_value = {'codename': self.release} - self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") - get_arch = apatcher.start() - get_arch.return_value = 'amd64' - self.addCleanup(apatcher.stop) - - def _get_default_params(self): - """get_default_params - Get the most basic default mrror and release info to be used in tests - """ - params = {} - params['RELEASE'] = self.release - params['MIRROR'] = "http://archive.ubuntu.com/ubuntu" - return params - - def wrapv1conf(self, cfg): - params = self._get_default_params() - # old v1 list format under old keys, but callabe to main handler - # disable source.list rendering and set mirror to avoid other code - return {'apt_preserve_sources_list': True, - 'apt_mirror': params['MIRROR'], - 'apt_sources': cfg} - - def myjoin(self, *args, **kwargs): - """myjoin - redir into writable tmpdir""" - if (args[0] == "/etc/apt/sources.list.d/" and - args[1] == "cloud_config_sources.list" and - len(args) == 2): - return self.join(self.tmp, args[0].lstrip("/"), args[1]) - else: - return self.join(*args, **kwargs) - - def apt_src_basic(self, filename, cfg): - """apt_src_basic - Test Fix deb source string, has to overwrite mirror conf in params - """ - cfg = self.wrapv1conf(cfg) - - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "karmic-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_basic(self): - """Test deb source string, overwrite mirror and filename""" - cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile} - self.apt_src_basic(self.aptlistfile, [cfg]) - - def test_apt_src_basic_dict(self): - """Test deb source string, overwrite mirror and filename (dict)""" - cfg = {self.aptlistfile: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}} - self.apt_src_basic(self.aptlistfile, cfg) - - def apt_src_basic_tri(self, cfg): - """apt_src_basic_tri - Test Fix three deb source string, has to overwrite mirror conf in - params. Test with filenames provided in config. - generic part to check three files with different content - """ - self.apt_src_basic(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "precise-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "lucid-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_basic_tri(self): - """Test Fix three deb source string with filenames""" - cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile} - cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile2} - cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile3} - self.apt_src_basic_tri([cfg1, cfg2, cfg3]) - - def test_apt_src_basic_dict_tri(self): - """Test Fix three deb source string with filenames (dict)""" - cfg = {self.aptlistfile: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}, - self.aptlistfile2: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted')}, - self.aptlistfile3: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted')}} - self.apt_src_basic_tri(cfg) - - def test_apt_src_basic_nofn(self): - """Test Fix three deb source string without filenames (dict)""" - cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_basic(self.fallbackfn, [cfg]) - - def apt_src_replacement(self, filename, cfg): - """apt_src_replace - Test Autoreplacement of MIRROR and RELEASE in source specs - """ - cfg = self.wrapv1conf(cfg) - params = self._get_default_params() - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_replace(self): - """Test Autoreplacement of MIRROR and RELEASE in source specs""" - cfg = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - self.apt_src_replacement(self.aptlistfile, [cfg]) - - def apt_src_replace_tri(self, cfg): - """apt_src_replace_tri - Test three autoreplacements of MIRROR and RELEASE in source specs with - generic part - """ - self.apt_src_replacement(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - params = self._get_default_params() - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "main"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "universe"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_replace_tri(self): - """Test triple Autoreplacement of MIRROR and RELEASE in source specs""" - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - self.apt_src_replace_tri([cfg1, cfg2, cfg3]) - - def test_apt_src_replace_dict_tri(self): - """Test triple Autoreplacement in source specs (dict)""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, - 'notused': {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} - self.apt_src_replace_tri(cfg) - - def test_apt_src_replace_nofn(self): - """Test Autoreplacement of MIRROR and RELEASE in source specs nofile""" - cfg = {'source': 'deb $MIRROR $RELEASE multiverse'} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_replacement(self.fallbackfn, [cfg]) - - def apt_src_keyid(self, filename, cfg, keynum): - """apt_src_keyid - Test specification of a source + keyid - """ - cfg = self.wrapv1conf(cfg) - - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - # check if it added the right number of keys - calls = [] - sources = cfg['apt']['sources'] - for src in sources: - print(sources[src]) - calls.append(call(sources[src], None)) - - mockobj.assert_has_calls(calls, any_order=True) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_keyid(self): - """Test specification of a source + keyid with filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77", - 'filename': self.aptlistfile} - self.apt_src_keyid(self.aptlistfile, [cfg], 1) - - def test_apt_src_keyid_tri(self): - """Test 3x specification of a source + keyid with filename being set""" - cfg1 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77", - 'filename': self.aptlistfile} - cfg2 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial universe'), - 'keyid': "03683F77", - 'filename': self.aptlistfile2} - cfg3 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial multiverse'), - 'keyid': "03683F77", - 'filename': self.aptlistfile3} - - self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3) - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "universe"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_keyid_nofn(self): - """Test specification of a source + keyid without filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77"} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_keyid(self.fallbackfn, [cfg], 1) - - def apt_src_key(self, filename, cfg): - """apt_src_key - Test specification of a source + key - """ - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - # check if it added the right amount of keys - sources = cfg['apt']['sources'] - calls = [] - for src in sources: - print(sources[src]) - calls.append(call(sources[src], None)) - - mockobj.assert_has_calls(calls, any_order=True) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_key(self): - """Test specification of a source + key with filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'key': "fakekey 4321", - 'filename': self.aptlistfile} - self.apt_src_key(self.aptlistfile, cfg) - - def test_apt_src_key_nofn(self): - """Test specification of a source + key without filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'key': "fakekey 4321"} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_key(self.fallbackfn, cfg) - - def test_apt_src_keyonly(self): - """Test specifying key without source""" - cfg = {'key': "fakekey 4242", - 'filename': self.aptlistfile} - cfg = self.wrapv1conf([cfg]) - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4242', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_src_keyidonly(self): - """Test specification of a keyid without source""" - cfg = {'keyid': "03683F77", - 'filename': self.aptlistfile} - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')): - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - cc_apt_configure.handle( - "test", - cfg, - self.fakecloud, - None, - None) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 1212', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): - """apt_src_keyid_real - Test specification of a keyid without source including - up to addition of the key (add_apt_key_raw mocked to keep the - environment as is) - """ - key = cfg['keyid'] - keyserver = cfg.get('keyserver', 'keyserver.ubuntu.com') - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: - with mock.patch.object(gpg, 'getkeybyid', - return_value=expectedkey) as mockgetkey: - cc_apt_configure.handle("test", cfg, self.fakecloud, - None, None) - if is_hardened is not None: - mockkey.assert_called_with( - expectedkey, - self.aptlistfile, - hardened=is_hardened) - else: - mockkey.assert_called_with(expectedkey, self.aptlistfile) - mockgetkey.assert_called_with(key, keyserver) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_src_keyid_real(self): - """test_apt_src_keyid_real - Test keyid including key add""" - keyid = "03683F77" - cfg = {'keyid': keyid, - 'filename': self.aptlistfile} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_src_longkeyid_real(self): - """test_apt_src_longkeyid_real - Test long keyid including key add""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {'keyid': keyid, - 'filename': self.aptlistfile} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_src_longkeyid_ks_real(self): - """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {'keyid': keyid, - 'keyserver': 'keys.gnupg.net', - 'filename': self.aptlistfile} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_src_ppa(self): - """Test adding a ppa""" - cfg = {'source': 'ppa:smoser/cloud-init-test', - 'filename': self.aptlistfile} - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(subp, 'subp') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - mockobj.assert_called_once_with(['add-apt-repository', - 'ppa:smoser/cloud-init-test'], - target=None) - - # adding ppa should ignore filename (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_src_ppa_tri(self): - """Test adding three ppa's""" - cfg1 = {'source': 'ppa:smoser/cloud-init-test', - 'filename': self.aptlistfile} - cfg2 = {'source': 'ppa:smoser/cloud-init-test2', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'ppa:smoser/cloud-init-test3', - 'filename': self.aptlistfile3} - cfg = self.wrapv1conf([cfg1, cfg2, cfg3]) - - with mock.patch.object(subp, 'subp') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, - None, None) - calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], - target=None), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], - target=None), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], - target=None)] - mockobj.assert_has_calls(calls, any_order=True) - - # adding ppa should ignore all filenames (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - self.assertFalse(os.path.isfile(self.aptlistfile2)) - self.assertFalse(os.path.isfile(self.aptlistfile3)) - - def test_convert_to_new_format(self): - """Test the conversion of old to new format""" - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - cfg = {'apt_sources': [cfg1, cfg2, cfg3]} - checkcfg = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg) - self.assertEqual(newcfg['apt']['sources'], checkcfg) - - # convert again, should stay the same - newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg) - self.assertEqual(newcfg2['apt']['sources'], checkcfg) - - # should work without raising an exception - cc_apt_configure.convert_to_v3_apt_format({}) - - with self.assertRaises(ValueError): - cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5}) - - def test_convert_to_new_format_collision(self): - """Test the conversion of old to new format with collisions - That matches e.g. the MAAS case specifying old and new config""" - cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, - 'apt_proxy': 'http://192.168.122.1:8000/'} - cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}} - cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, - 'apt_proxy': 'ftp://192.168.122.1:8000/'} - - # collision (equal) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) - self.assertEqual(newcfg, cfg_3_only) - # collision (equal, so ok to remove) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) - self.assertEqual(newcfg, cfg_3_only) - # collision (unequal) - match = "Old and New.*unequal.*apt_proxy" - with self.assertRaisesRegex(ValueError, match): - cc_apt_configure.convert_to_v3_apt_format(cfgconflict) - - def test_convert_to_new_format_dict_collision(self): - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - fullv3 = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - cfg_3_only = {'apt': {'sources': fullv3}} - cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]} - cfg_1_and_3.update(cfg_3_only) - - # collision (equal, so ok to remove) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) - self.assertEqual(newcfg, cfg_3_only) - # no old spec (same result) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) - self.assertEqual(newcfg, cfg_3_only) - - diff = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'DIFFERENTVERSE'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - cfg_3_only = {'apt': {'sources': diff}} - cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]} - cfg_1_and_3_different.update(cfg_3_only) - - # collision (unequal by dict having a different entry) - with self.assertRaises(ValueError): - cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different) - - missing = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}} - cfg_3_only = {'apt': {'sources': missing}} - cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]} - cfg_1_and_3_missing.update(cfg_3_only) - # collision (unequal by dict missing an entry) - with self.assertRaises(ValueError): - cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py deleted file mode 100644 index 20289121..00000000 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ /dev/null @@ -1,1170 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""test_handler_apt_source_v3 -Testing various config variations of the apt_source custom config -This tries to call all in the new v3 format and cares about new features -""" -import glob -import os -import re -import shutil -import socket -import tempfile -import pathlib - -from unittest import TestCase, mock -from unittest.mock import call - -from cloudinit import gpg -from cloudinit import subp -from cloudinit import util -from cloudinit.config import cc_apt_configure -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ -NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy -8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0 -HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG -CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29 -OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ -FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm -S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== -=ACB2 ------END PGP PUBLIC KEY BLOCK-----""" - -ADD_APT_REPO_MATCH = r"^[\w-]+:\w" - -TARGET = None - -MOCK_LSB_RELEASE_DATA = { - 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS', - 'release': '18.04', 'codename': 'bionic'} - - -class FakeDatasource: - """Fake Datasource helper object""" - def __init__(self): - self.region = 'region' - - -class FakeCloud: - """Fake Cloud helper object""" - def __init__(self): - self.datasource = FakeDatasource() - - -class TestAptSourceConfig(t_help.FilesystemMockingTestCase): - """TestAptSourceConfig - Main Class to test apt configs - """ - def setUp(self): - super(TestAptSourceConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.addCleanup(shutil.rmtree, self.new_root) - self.aptlistfile = os.path.join(self.tmp, "single-deb.list") - self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list") - self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") - self.join = os.path.join - self.matcher = re.compile(ADD_APT_REPO_MATCH).search - self.add_patch( - 'cloudinit.config.cc_apt_configure.util.lsb_release', - 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy()) - - @staticmethod - def _add_apt_sources(*args, **kwargs): - with mock.patch.object(cc_apt_configure, 'update_packages'): - cc_apt_configure.add_apt_sources(*args, **kwargs) - - @staticmethod - def _get_default_params(): - """get_default_params - Get the most basic default mrror and release info to be used in tests - """ - params = {} - params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release'] - arch = 'amd64' - params['MIRROR'] = cc_apt_configure.\ - get_default_mirrors(arch)["PRIMARY"] - return params - - def _myjoin(self, *args, **kwargs): - """_myjoin - redir into writable tmpdir""" - if (args[0] == "/etc/apt/sources.list.d/" and - args[1] == "cloud_config_sources.list" and - len(args) == 2): - return self.join(self.tmp, args[0].lstrip("/"), args[1]) - else: - return self.join(*args, **kwargs) - - def _apt_src_basic(self, filename, cfg): - """_apt_src_basic - Test Fix deb source string, has to overwrite mirror conf in params - """ - params = self._get_default_params() - - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "karmic-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_basic(self): - """test_apt_v3_src_basic - Test fix deb source string""" - cfg = {self.aptlistfile: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}} - self._apt_src_basic(self.aptlistfile, cfg) - - def test_apt_v3_src_basic_tri(self): - """test_apt_v3_src_basic_tri - Test multiple fix deb source strings""" - cfg = {self.aptlistfile: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}, - self.aptlistfile2: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted')}, - self.aptlistfile3: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted')}} - self._apt_src_basic(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "precise-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "lucid-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def _apt_src_replacement(self, filename, cfg): - """apt_src_replace - Test Autoreplacement of MIRROR and RELEASE in source specs - """ - params = self._get_default_params() - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_replace(self): - """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}} - self._apt_src_replacement(self.aptlistfile, cfg) - - def test_apt_v3_src_replace_fn(self): - """test_apt_v3_src_replace_fn - Test filename overwritten in dict""" - cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile}} - # second file should overwrite the dict key - self._apt_src_replacement(self.aptlistfile, cfg) - - def _apt_src_replace_tri(self, cfg): - """_apt_src_replace_tri - Test three autoreplacements of MIRROR and RELEASE in source specs with - generic part - """ - self._apt_src_replacement(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - params = self._get_default_params() - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "main"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "universe"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_replace_tri(self): - """test_apt_v3_src_replace_tri - Test multiple replace/overwrites""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, - 'notused': {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} - self._apt_src_replace_tri(cfg) - - def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None): - """_apt_src_keyid - Test specification of a source + keyid - """ - params = self._get_default_params() - - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - # check if it added the right number of keys - calls = [] - for key in cfg: - if is_hardened is not None: - calls.append(call(cfg[key], hardened=is_hardened)) - else: - calls.append(call(cfg[key], TARGET)) - - mockobj.assert_has_calls(calls, any_order=True) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_keyid(self): - """test_apt_v3_src_keyid - Test source + keyid with filename""" - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'filename': self.aptlistfile, - 'keyid': "03683F77"}} - self._apt_src_keyid(self.aptlistfile, cfg, 1) - - def test_apt_v3_src_keyid_tri(self): - """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes""" - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77"}, - 'ignored': {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial universe'), - 'keyid': "03683F77", - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial multiverse'), - 'filename': self.aptlistfile3, - 'keyid': "03683F77"}} - - self._apt_src_keyid(self.aptlistfile, cfg, 3) - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "universe"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_key(self): - """test_apt_v3_src_key - Test source + key""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'filename': self.aptlistfile, - 'key': "fakekey 4321"}} - - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4321', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - self.assertTrue(os.path.isfile(self.aptlistfile)) - - contents = util.load_file(self.aptlistfile) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_keyonly(self): - """test_apt_v3_src_keyonly - Test key without source""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'key': "fakekey 4242"}} - - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4242', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_keyidonly(self): - """test_apt_v3_src_keyidonly - Test keyid without source""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'keyid': "03683F77"}} - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')): - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 1212', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): - """apt_src_keyid_real - Test specification of a keyid without source including - up to addition of the key (add_apt_key_raw mocked to keep the - environment as is) - """ - params = self._get_default_params() - - with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: - with mock.patch.object(gpg, 'getkeybyid', - return_value=expectedkey) as mockgetkey: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - keycfg = cfg[self.aptlistfile] - mockgetkey.assert_called_with(keycfg['keyid'], - keycfg.get('keyserver', - 'keyserver.ubuntu.com')) - if is_hardened is not None: - mockkey.assert_called_with( - expectedkey, - keycfg['keyfile'], - hardened=is_hardened) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_keyid_real(self): - """test_apt_v3_src_keyid_real - Test keyid including key add""" - keyid = "03683F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile}} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_v3_src_longkeyid_real(self): - """test_apt_v3_src_longkeyid_real Test long keyid including key add""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile}} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_v3_src_longkeyid_ks_real(self): - """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile, - 'keyserver': 'keys.gnupg.net'}} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY) - - def test_apt_v3_src_keyid_keyserver(self): - """test_apt_v3_src_keyid_keyserver - Test custom keyserver""" - keyid = "03683F77" - params = self._get_default_params() - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile, - 'keyserver': 'test.random.com'}} - - # in some test environments only *.ubuntu.com is reachable - # so mock the call and check if the config got there - with mock.patch.object(gpg, 'getkeybyid', - return_value="fakekey") as mockgetkey: - with mock.patch.object(cc_apt_configure, - 'add_apt_key_raw') as mockadd: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - mockgetkey.assert_called_with('03683F77', 'test.random.com') - mockadd.assert_called_with('fakekey', self.aptlistfile, hardened=False) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_ppa(self): - """test_apt_v3_src_ppa - Test specification of a ppa""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}} - - with mock.patch("cloudinit.subp.subp") as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - mockobj.assert_any_call(['add-apt-repository', - 'ppa:smoser/cloud-init-test'], target=TARGET) - - # adding ppa should ignore filename (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_ppa_tri(self): - """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}, - self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'}, - self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}} - - with mock.patch("cloudinit.subp.subp") as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], - target=TARGET), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], - target=TARGET), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], - target=TARGET)] - mockobj.assert_has_calls(calls, any_order=True) - - # adding ppa should ignore all filenames (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - self.assertFalse(os.path.isfile(self.aptlistfile2)) - self.assertFalse(os.path.isfile(self.aptlistfile3)) - - @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_list_rename(self, m_get_dpkg_architecture): - """test_apt_v3_list_rename - Test find mirror and apt list renaming""" - pre = "/var/lib/apt/lists" - # filenames are archive dependent - - arch = 's390x' - m_get_dpkg_architecture.return_value = arch - component = "ubuntu-ports" - archive = "ports.ubuntu.com" - - cfg = {'primary': [{'arches': ["default"], - 'uri': - 'http://test.ubuntu.com/%s/' % component}], - 'security': [{'arches': ["default"], - 'uri': - 'http://testsec.ubuntu.com/%s/' % component}]} - post = ("%s_dists_%s-updates_InRelease" % - (component, MOCK_LSB_RELEASE_DATA['codename'])) - fromfn = ("%s/%s_%s" % (pre, archive, post)) - tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) - - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) - - self.assertEqual(mirrors['MIRROR'], - "http://test.ubuntu.com/%s/" % component) - self.assertEqual(mirrors['PRIMARY'], - "http://test.ubuntu.com/%s/" % component) - self.assertEqual(mirrors['SECURITY'], - "http://testsec.ubuntu.com/%s/" % component) - - with mock.patch.object(os, 'rename') as mockren: - with mock.patch.object(glob, 'glob', - return_value=[fromfn]): - cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch) - - mockren.assert_any_call(fromfn, tofn) - - @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture): - target = os.path.join(self.tmp, "rename_non_slash") - apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS) - - arch = 'amd64' - m_get_dpkg_architecture.return_value = arch - - mirror_path = "some/random/path/" - primary = "http://test.ubuntu.com/" + mirror_path - security = "http://test-security.ubuntu.com/" + mirror_path - mirrors = {'PRIMARY': primary, 'SECURITY': security} - - # these match default archive prefixes - opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial" - osec_pre = "security.ubuntu.com_ubuntu_dists_xenial" - # this one won't match and should not be renamed defaults. - other_pre = "dl.google.com_linux_chrome_deb_dists_stable" - # these are our new expected prefixes - npri_pre = "test.ubuntu.com_some_random_path_dists_xenial" - nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial" - - files = [ - # orig prefix, new prefix, suffix - (opri_pre, npri_pre, "_main_binary-amd64_Packages"), - (opri_pre, npri_pre, "_main_binary-amd64_InRelease"), - (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"), - (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"), - (other_pre, other_pre, "_main_binary-amd64_Packages"), - (other_pre, other_pre, "_Release"), - (other_pre, other_pre, "_Release.gpg"), - (osec_pre, nsec_pre, "_InRelease"), - (osec_pre, nsec_pre, "_main_binary-amd64_Packages"), - (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"), - ] - - expected = sorted([npre + suff for opre, npre, suff in files]) - # create files - for (opre, _npre, suff) in files: - fpath = os.path.join(apt_lists_d, opre + suff) - util.write_file(fpath, content=fpath) - - cc_apt_configure.rename_apt_lists(mirrors, target, arch) - found = sorted(os.listdir(apt_lists_d)) - self.assertEqual(expected, found) - - @staticmethod - def test_apt_v3_proxy(): - """test_apt_v3_proxy - Test apt_*proxy configuration""" - cfg = {"proxy": "foobar1", - "http_proxy": "foobar2", - "ftp_proxy": "foobar3", - "https_proxy": "foobar4"} - - with mock.patch.object(util, 'write_file') as mockobj: - cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused") - - mockobj.assert_called_with('proxyfn', - ('Acquire::http::Proxy "foobar1";\n' - 'Acquire::http::Proxy "foobar2";\n' - 'Acquire::ftp::Proxy "foobar3";\n' - 'Acquire::https::Proxy "foobar4";\n')) - - def test_apt_v3_mirror(self): - """test_apt_v3_mirror - Test defining a mirror""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir}], - "security": [{'arches': ["default"], - "uri": smir}]} - - mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), 'amd64') - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_mirror_default(self): - """test_apt_v3_mirror_default - Test without defining a mirror""" - arch = 'amd64' - default_mirrors = cc_apt_configure.get_default_mirrors(arch) - pmir = default_mirrors["PRIMARY"] - smir = default_mirrors["SECURITY"] - mycloud = get_cloud() - mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch) - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_mirror_arches(self): - """test_apt_v3_mirror_arches - Test arches selection of mirror""" - pmir = "http://my-primary.ubuntu.com/ubuntu/" - smir = "http://my-security.ubuntu.com/ubuntu/" - arch = 'ppc64el' - cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"}, - {'arches': [arch], "uri": pmir}], - "security": [{'arches': ["default"], "uri": "nothis-security"}, - {'arches': [arch], "uri": smir}]} - - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) - - self.assertEqual(mirrors['PRIMARY'], pmir) - self.assertEqual(mirrors['MIRROR'], pmir) - self.assertEqual(mirrors['SECURITY'], smir) - - def test_apt_v3_mirror_arches_default(self): - """test_apt_v3_mirror_arches - Test falling back to default arch""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir}, - {'arches': ["thisarchdoesntexist"], - "uri": "notthis"}], - "security": [{'arches': ["thisarchdoesntexist"], - "uri": "nothat"}, - {'arches': ["default"], - "uri": smir}]} - - mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), 'amd64') - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_get_def_mir_non_intel_no_arch( - self, m_get_dpkg_architecture - ): - arch = 'ppc64el' - m_get_dpkg_architecture.return_value = arch - expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', - 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} - self.assertEqual(expected, cc_apt_configure.get_default_mirrors()) - - def test_apt_v3_get_default_mirrors_non_intel_with_arch(self): - found = cc_apt_configure.get_default_mirrors('ppc64el') - - expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', - 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} - self.assertEqual(expected, found) - - def test_apt_v3_mirror_arches_sysdefault(self): - """test_apt_v3_mirror_arches - Test arches fallback to sys default""" - arch = 'amd64' - default_mirrors = cc_apt_configure.get_default_mirrors(arch) - pmir = default_mirrors["PRIMARY"] - smir = default_mirrors["SECURITY"] - mycloud = get_cloud() - cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"], - "uri": "notthis"}, - {'arches': ["thisarchdoesntexist"], - "uri": "notthiseither"}], - "security": [{'arches': ["thisarchdoesntexist"], - "uri": "nothat"}, - {'arches': ["thisarchdoesntexist_64"], - "uri": "nothateither"}]} - - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - - self.assertEqual(mirrors['MIRROR'], pmir) - self.assertEqual(mirrors['PRIMARY'], pmir) - self.assertEqual(mirrors['SECURITY'], smir) - - def test_apt_v3_mirror_search(self): - """test_apt_v3_mirror_search - Test searching mirrors in a list - mock checks to avoid relying on network connectivity""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "search": ["pfailme", pmir]}], - "security": [{'arches': ["default"], - "search": ["sfailme", smir]}]} - - with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', - side_effect=[pmir, smir]) as mocksearch: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), - 'amd64') - - calls = [call(["pfailme", pmir]), - call(["sfailme", smir])] - mocksearch.assert_has_calls(calls) - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_mirror_search_many2(self): - """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir, - "search": ["pfailme", "foo"]}], - "security": [{'arches': ["default"], - "uri": smir, - "search": ["sfailme", "bar"]}]} - - arch = 'amd64' - - # should be called only once per type, despite two mirror configs - mycloud = None - with mock.patch.object(cc_apt_configure, 'get_mirror', - return_value="http://mocked/foo") as mockgm: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(cfg, 'primary', arch, mycloud), - call(cfg, 'security', arch, mycloud)] - mockgm.assert_has_calls(calls) - - # should not be called, since primary is specified - with mock.patch.object(cc_apt_configure.util, - 'search_for_mirror') as mockse: - mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), arch) - mockse.assert_not_called() - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_url_resolvable(self): - """test_apt_v3_url_resolvable - Test resolving urls""" - - with mock.patch.object(util, 'is_resolvable') as mockresolve: - util.is_resolvable_url("http://1.2.3.4/ubuntu") - mockresolve.assert_called_with("1.2.3.4") - - with mock.patch.object(util, 'is_resolvable') as mockresolve: - util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") - mockresolve.assert_called_with("us.archive.ubuntu.com") - - # former tests can leave this set (or not if the test is ran directly) - # do a hard reset to ensure a stable result - util._DNS_REDIRECT_IP = None - bad = [(None, None, None, "badname", ["10.3.2.1"])] - good = [(None, None, None, "goodname", ["10.2.3.4"])] - with mock.patch.object(socket, 'getaddrinfo', - side_effect=[bad, bad, bad, good, - good]) as mocksock: - ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") - ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu") - mocksock.assert_any_call('does-not-exist.example.com.', None, - 0, 0, 1, 2) - mocksock.assert_any_call('example.invalid.', None, 0, 0, 1, 2) - mocksock.assert_any_call('us.archive.ubuntu.com', None) - mocksock.assert_any_call('1.2.3.4', None) - - self.assertTrue(ret) - self.assertTrue(ret2) - - # side effect need only bad ret after initial call - with mock.patch.object(socket, 'getaddrinfo', - side_effect=[bad]) as mocksock: - ret3 = util.is_resolvable_url("http://failme.com/ubuntu") - calls = [call('failme.com', None)] - mocksock.assert_has_calls(calls) - self.assertFalse(ret3) - - def test_apt_v3_disable_suites(self): - """test_disable_suites - disable_suites with many configurations""" - release = "xenial" - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - - # disable nothing - disabled = [] - expect = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable release suite - disabled = ["$RELEASE"] - expect = """\ -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable other suite - disabled = ["$RELEASE-updates"] - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu""" - """ xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # multi disable - disabled = ["$RELEASE-updates", "$RELEASE-security"] - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # multi line disable (same suite multiple times in input) - disabled = ["$RELEASE-updates", "$RELEASE-security"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://UBUNTU.com//ubuntu xenial-updates main -deb http://UBUNTU.COM//ubuntu xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ - """xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # comment in input - disabled = ["$RELEASE-updates", "$RELEASE-security"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -#foo -#deb http://UBUNTU.com//ubuntu xenial-updates main -deb http://UBUNTU.COM//ubuntu xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -#foo -#deb http://UBUNTU.com//ubuntu xenial-updates main -# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ - """xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable custom suite - disabled = ["foobar"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb http://ubuntu.com/ubuntu/ foobar main""" - expect = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable non existing suite - disabled = ["foobar"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb http://ubuntu.com/ubuntu/ notfoobar main""" - expect = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb http://ubuntu.com/ubuntu/ notfoobar main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable suite with option - disabled = ["$RELEASE-updates"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb [a=b] http://ubu.com//ubu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """ - """xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable suite with more options and auto $RELEASE expansion - disabled = ["updates"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb [a=b c=d] http://ubu.com//ubu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = """deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb [a=b c=d] \ -http://ubu.com//ubu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable suite while options at others - disabled = ["$RELEASE-security"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - def test_disable_suites_blank_lines(self): - """test_disable_suites_blank_lines - ensure blank lines allowed""" - lines = ["deb %(repo)s %(rel)s main universe", - "", - "deb %(repo)s %(rel)s-updates main universe", - " # random comment", - "#comment here", - ""] - rel = "trusty" - repo = 'http://example.com/mirrors/ubuntu' - orig = "\n".join(lines) % {'repo': repo, 'rel': rel} - self.assertEqual( - orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)) - - @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain') - def test_apt_v3_mirror_search_dns(self, m_get_hostname): - """test_apt_v3_mirror_search_dns - Test searching dns patterns""" - pmir = "phit" - smir = "shit" - arch = 'amd64' - mycloud = get_cloud('ubuntu') - cfg = {"primary": [{'arches': ["default"], - "search_dns": True}], - "security": [{'arches': ["default"], - "search_dns": True}]} - - with mock.patch.object(cc_apt_configure, 'get_mirror', - return_value="http://mocked/foo") as mockgm: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(cfg, 'primary', arch, mycloud), - call(cfg, 'security', arch, mycloud)] - mockgm.assert_has_calls(calls) - - with mock.patch.object(cc_apt_configure, 'search_for_mirror_dns', - return_value="http://mocked/foo") as mocksdns: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(True, 'primary', cfg, mycloud), - call(True, 'security', cfg, mycloud)] - mocksdns.assert_has_calls(calls) - - # first return is for the non-dns call before - with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', - side_effect=[None, pmir, None, smir]) as mockse: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - - calls = [call(None), - call(['http://ubuntu-mirror.localdomain/ubuntu', - 'http://ubuntu-mirror/ubuntu']), - call(None), - call(['http://ubuntu-security-mirror.localdomain/ubuntu', - 'http://ubuntu-security-mirror/ubuntu'])] - mockse.assert_has_calls(calls) - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_add_mirror_keys(self): - """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" - arch = 'amd64' - cfg = { - 'primary': [ - {'arches': [arch], - 'uri': 'http://test.ubuntu.com/', - 'filename': 'primary', - 'key': 'fakekey_primary'}], - 'security': [ - {'arches': [arch], - 'uri': 'http://testsec.ubuntu.com/', - 'filename': 'security', - 'key': 'fakekey_security'}] - } - - with mock.patch.object(cc_apt_configure, - 'add_apt_key_raw') as mockadd: - cc_apt_configure.add_mirror_keys(cfg, TARGET) - calls = [ - mock.call('fakekey_primary', 'primary', hardened=False), - mock.call('fakekey_security', 'security', hardened=False), - ] - mockadd.assert_has_calls(calls, any_order=True) - - -class TestDebconfSelections(TestCase): - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_set_sel_appends_newline_if_absent(self, m_subp): - """Automatically append a newline to debconf-set-selections config.""" - selections = b'some/setting boolean true' - cc_apt_configure.debconf_set_selections(selections=selections) - cc_apt_configure.debconf_set_selections(selections=selections + b'\n') - m_call = mock.call( - ['debconf-set-selections'], data=selections + b'\n', capture=True, - target=None) - self.assertEqual([m_call, m_call], m_subp.call_args_list) - - @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - def test_no_set_sel_if_none_to_set(self, m_set_sel): - cc_apt_configure.apply_debconf_selections({'foo': 'bar'}) - m_set_sel.assert_not_called() - - @mock.patch("cloudinit.config.cc_apt_configure." - "debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): - data = { - 'set1': 'pkga pkga/q1 mybool false', - 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' - 'pkgc\tpkgc/ip\tstring\t10.0.0.1')} - lines = '\n'.join(data.values()).split('\n') - - m_get_inst.return_value = ["adduser", "apparmor"] - m_set_sel.return_value = None - - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) - self.assertTrue(m_get_inst.called) - self.assertEqual(m_set_sel.call_count, 1) - - # assumes called with *args value. - selections = m_set_sel.call_args_list[0][0][0].decode() - - missing = [ - line for line in lines if line not in selections.splitlines() - ] - self.assertEqual([], missing) - - @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") - @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, - m_dpkg_r): - data = { - 'set1': 'pkga pkga/q1 mybool false', - 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' - 'pkgc\tpkgc/ip\tstring\t10.0.0.1'), - 'cloud-init': ('cloud-init cloud-init/datasources' - 'multiselect MAAS')} - - m_set_sel.return_value = None - m_get_inst.return_value = ["adduser", "apparmor", "pkgb", - "cloud-init", 'zdog'] - - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) - - # reconfigure should be called with the intersection - # of (packages in config, packages installed) - self.assertEqual(m_dpkg_r.call_count, 1) - # assumes called with *args (dpkg_reconfigure([a,b,c], target=)) - packages = m_dpkg_r.call_args_list[0][0][0] - self.assertEqual(set(['cloud-init', 'pkgb']), set(packages)) - - @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") - @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, - m_dpkg_r): - data = {'set1': 'pkga pkga/q1 mybool false'} - - m_get_inst.return_value = ["adduser", "apparmor", "pkgb", - "cloud-init", 'zdog'] - m_set_sel.return_value = None - - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) - - self.assertTrue(m_get_inst.called) - self.assertEqual(m_dpkg_r.call_count, 0) - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_dpkg_reconfigure_does_reconfigure(self, m_subp): - target = "/foo-target" - - # due to the way the cleaners are called (via dictionary reference) - # mocking clean_cloud_init directly does not work. So we mock - # the CONFIG_CLEANERS dictionary and assert our cleaner is called. - ci_cleaner = mock.MagicMock() - with mock.patch.dict(("cloudinit.config.cc_apt_configure." - "CONFIG_CLEANERS"), - values={'cloud-init': ci_cleaner}, clear=True): - cc_apt_configure.dpkg_reconfigure(['pkga', 'cloud-init'], - target=target) - # cloud-init is actually the only package we have a cleaner for - # so for now, its the only one that should reconfigured - self.assertTrue(m_subp.called) - ci_cleaner.assert_called_with(target) - self.assertEqual(m_subp.call_count, 1) - found = m_subp.call_args_list[0][0][0] - expected = ['dpkg-reconfigure', '--frontend=noninteractive', - 'cloud-init'] - self.assertEqual(expected, found) - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp): - cc_apt_configure.dpkg_reconfigure([]) - m_subp.assert_not_called() - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp): - cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar']) - m_subp.assert_not_called() - -# -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py deleted file mode 100644 index 8cd3a5e1..00000000 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ /dev/null @@ -1,152 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import tempfile - -from cloudinit.config.cc_bootcmd import handle, schema -from cloudinit import (subp, util) -from cloudinit.tests.helpers import ( - CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class FakeExtendedTempFile(object): - def __init__(self, suffix): - self.suffix = suffix - self.handle = tempfile.NamedTemporaryFile( - prefix="ci-%s." % self.__class__.__name__, delete=False) - - def __enter__(self): - return self.handle - - def __exit__(self, exc_type, exc_value, traceback): - self.handle.close() - util.del_file(self.handle.name) - - -class TestBootcmd(CiTestCase): - - with_logs = True - - _etmpfile_path = ('cloudinit.config.cc_bootcmd.temp_utils.' - 'ExtendedTemporaryFile') - - def setUp(self): - super(TestBootcmd, self).setUp() - self.subp = subp.subp - self.new_root = self.tmp_dir() - - def test_handler_skip_if_no_bootcmd(self): - """When the provided config doesn't contain bootcmd, skip it.""" - cfg = {} - mycloud = get_cloud() - handle('notimportant', cfg, mycloud, LOG, None) - self.assertIn( - "Skipping module named notimportant, no 'bootcmd' key", - self.logs.getvalue()) - - def test_handler_invalid_command_set(self): - """Commands which can't be converted to shell will raise errors.""" - invalid_config = {'bootcmd': 1} - cc = get_cloud() - with self.assertRaises(TypeError) as context_manager: - handle('cc_bootcmd', invalid_config, cc, LOG, []) - self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) - self.assertEqual( - "Input to shellify was type 'int'. Expected list or tuple.", - str(context_manager.exception)) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_type(self): - """Schema validation warns of non-array type for bootcmd key. - - Schema validation is not strict, so bootcmd attempts to shellify the - invalid content. - """ - invalid_config = {'bootcmd': 1} - cc = get_cloud() - with self.assertRaises(TypeError): - handle('cc_bootcmd', invalid_config, cc, LOG, []) - self.assertIn( - 'Invalid config:\nbootcmd: 1 is not of type \'array\'', - self.logs.getvalue()) - self.assertIn('Failed to shellify', self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_item_type(self): - """Schema validation warns of non-array or string bootcmd items. - - Schema validation is not strict, so bootcmd attempts to shellify the - invalid content. - """ - invalid_config = { - 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} - cc = get_cloud() - with self.assertRaises(TypeError) as context_manager: - handle('cc_bootcmd', invalid_config, cc, LOG, []) - expected_warnings = [ - 'bootcmd.1: 20 is not valid under any of the given schemas', - 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given' - ' schema' - ] - logs = self.logs.getvalue() - for warning in expected_warnings: - self.assertIn(warning, logs) - self.assertIn('Failed to shellify', logs) - self.assertEqual( - ("Unable to shellify type 'int'. Expected list, string, tuple. " - "Got: 20"), - str(context_manager.exception)) - - def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self): - """Valid schema runs a bootcmd script with INSTANCE_ID in the env.""" - cc = get_cloud() - out_file = self.tmp_path('bootcmd.out', self.new_root) - my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425" - valid_config = {'bootcmd': [ - 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} - - with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.allow_subp(['/bin/sh']): - handle('cc_bootcmd', valid_config, cc, LOG, []) - self.assertEqual(my_id + ' iid-datasource-none\n', - util.load_file(out_file)) - - def test_handler_runs_bootcmd_script_with_error(self): - """When a valid script generates an error, that error is raised.""" - cc = get_cloud() - valid_config = {'bootcmd': ['exit 1']} # Script with error - - with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.allow_subp(['/bin/sh']): - with self.assertRaises(subp.ProcessExecutionError) as ctxt: - handle('does-not-matter', valid_config, cc, LOG, []) - self.assertIn( - 'Unexpected error while running command.\n' - "Command: ['/bin/sh',", - str(ctxt.exception)) - self.assertIn( - 'Failed to run bootcmd module does-not-matter', - self.logs.getvalue()) - - -@skipUnlessJsonSchema() -class TestSchema(CiTestCase, SchemaTestCaseMixin): - """Directly test schema rather than through handle.""" - - schema = schema - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - ["byebye", "byebye"], 'command entries can be duplicate') - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - ["echo bye", "echo bye"], "command entries can be duplicate.") - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py deleted file mode 100644 index 2a4ab49e..00000000 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ /dev/null @@ -1,361 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import shutil -import tempfile -import unittest -from contextlib import ExitStack -from unittest import mock - -from cloudinit import distros -from cloudinit.config import cc_ca_certs -from cloudinit import helpers -from cloudinit import subp -from cloudinit import util -from cloudinit.tests.helpers import TestCase - -from tests.unittests.util import get_cloud - - -class TestNoConfig(unittest.TestCase): - def setUp(self): - super(TestNoConfig, self).setUp() - self.name = "ca-certs" - self.cloud_init = None - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - def test_no_config(self): - """ - Test that nothing is done if no ca-certs configuration is provided. - """ - config = util.get_builtin_cfg() - with ExitStack() as mocks: - util_mock = mocks.enter_context( - mock.patch.object(util, 'write_file')) - certs_mock = mocks.enter_context( - mock.patch.object(cc_ca_certs, 'update_ca_certs')) - - cc_ca_certs.handle(self.name, config, self.cloud_init, self.log, - self.args) - - self.assertEqual(util_mock.call_count, 0) - self.assertEqual(certs_mock.call_count, 0) - - -class TestConfig(TestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.name = "ca-certs" - self.paths = None - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def _mock_init(self): - self.mocks = ExitStack() - self.addCleanup(self.mocks.close) - - # Mock out the functions that actually modify the system - self.mock_add = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'add_ca_certs')) - self.mock_update = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'update_ca_certs')) - self.mock_remove = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) - - def test_no_trusted_list(self): - """ - Test that no certificates are written if the 'trusted' key is not - present. - """ - config = {"ca-certs": {}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_empty_trusted_list(self): - """Test that no certificate are written if 'trusted' list is empty.""" - config = {"ca-certs": {"trusted": []}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_single_trusted(self): - """Test that a single cert gets passed to add_ca_certs.""" - config = {"ca-certs": {"trusted": ["CERT1"]}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.mock_add.assert_called_once_with(conf, ['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_multiple_trusted(self): - """Test that multiple certs get passed to add_ca_certs.""" - config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_remove_default_ca_certs(self): - """Test remove_defaults works as expected.""" - config = {"ca-certs": {"remove-defaults": True}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) - - def test_no_remove_defaults_if_false(self): - """Test remove_defaults is not called when config value is False.""" - config = {"ca-certs": {"remove-defaults": False}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_correct_order_for_remove_then_add(self): - """Test remove_defaults is not called when config value is False.""" - config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.mock_add.assert_called_once_with(conf, ['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) - - -class TestAddCaCerts(TestCase): - - def setUp(self): - super(TestAddCaCerts, self).setUp() - tmpdir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmpdir) - self.paths = helpers.Paths({ - 'cloud_dir': tmpdir, - }) - self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat") - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def test_no_certs_in_list(self): - """Test that no certificate are written if not provided.""" - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(util, 'write_file') as mockobj: - cc_ca_certs.add_ca_certs(conf, []) - self.assertEqual(mockobj.call_count, 0) - - def test_single_cert_trailing_cr(self): - """Test adding a single certificate to the trusted CAs - when existing ca-certificates has trailing newline""" - cert = "CERT1\nLINE2\nLINE3" - - ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" - expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" - - self.m_stat.return_value.st_size = 1 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) - - cc_ca_certs.add_ca_certs(conf, [cert]) - - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - expected, omode="wb")]) - mock_load.assert_called_once_with(conf['ca_cert_config']) - - def test_single_cert_no_trailing_cr(self): - """Test adding a single certificate to the trusted CAs - when existing ca-certificates has no trailing newline""" - cert = "CERT1\nLINE2\nLINE3" - - ca_certs_content = "line1\nline2\nline3" - - self.m_stat.return_value.st_size = 1 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) - - cc_ca_certs.add_ca_certs(conf, [cert]) - - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - "%s\n%s\n" % (ca_certs_content, - conf['ca_cert_filename']), - omode="wb")]) - - mock_load.assert_called_once_with(conf['ca_cert_config']) - - def test_single_cert_to_empty_existing_ca_file(self): - """Test adding a single certificate to the trusted CAs - when existing ca-certificates.conf is empty""" - cert = "CERT1\nLINE2\nLINE3" - - expected = "cloud-init-ca-certs.crt\n" - - self.m_stat.return_value.st_size = 0 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(util, 'write_file', - autospec=True) as m_write: - - cc_ca_certs.add_ca_certs(conf, [cert]) - - m_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - m_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - expected, omode="wb")]) - - def test_multiple_certs(self): - """Test adding multiple certificates to the trusted CAs.""" - certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"] - expected_cert_file = "\n".join(certs) - ca_certs_content = "line1\nline2\nline3" - - self.m_stat.return_value.st_size = 1 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) - - cc_ca_certs.add_ca_certs(conf, certs) - - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - expected_cert_file, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - "%s\n%s\n" % (ca_certs_content, - conf['ca_cert_filename']), - omode='wb')]) - - mock_load.assert_called_once_with(conf['ca_cert_config']) - - -class TestUpdateCaCerts(unittest.TestCase): - def test_commands(self): - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(subp, 'subp') as mockobj: - cc_ca_certs.update_ca_certs(conf) - mockobj.assert_called_once_with( - conf['ca_cert_update_cmd'], capture=False) - - -class TestRemoveDefaultCaCerts(TestCase): - - def setUp(self): - super(TestRemoveDefaultCaCerts, self).setUp() - tmpdir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmpdir) - self.paths = helpers.Paths({ - 'cloud_dir': tmpdir, - }) - - def test_commands(self): - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_delete = mocks.enter_context( - mock.patch.object(util, 'delete_dir_contents')) - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_subp = mocks.enter_context( - mock.patch.object(subp, 'subp')) - - cc_ca_certs.remove_default_ca_certs(distro_name, conf) - - mock_delete.assert_has_calls([ - mock.call(conf['ca_cert_path']), - mock.call(conf['ca_cert_system_path'])]) - - if conf['ca_cert_config'] is not None: - mock_write.assert_called_once_with( - conf['ca_cert_config'], "", mode=0o644) - - if distro_name in ['debian', 'ubuntu']: - mock_subp.assert_called_once_with( - ('debconf-set-selections', '-'), - "ca-certificates \ -ca-certificates/trust_new_crts select no") - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py deleted file mode 100644 index 0672cebc..00000000 --- a/tests/unittests/test_handler/test_handler_chef.py +++ /dev/null @@ -1,271 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import httpretty -import json -import logging -import os - -from cloudinit.config import cc_chef -from cloudinit import util - -from cloudinit.tests.helpers import ( - HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - -CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) - -# This is adjusted to use http because using with https causes issue -# in some openssl/httpretty combinations. -# https://github.com/gabrielfalcao/HTTPretty/issues/242 -# We saw issue in opensuse 42.3 with -# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1 -OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:") - - -class TestInstallChefOmnibus(HttprettyTestCase): - - def setUp(self): - super(TestInstallChefOmnibus, self).setUp() - self.new_root = self.tmp_dir() - - @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) - def test_install_chef_from_omnibus_runs_chef_url_content(self): - """install_chef_from_omnibus calls subp_blob_in_tempfile.""" - response = b'#!/bin/bash\necho "Hi Mom"' - httpretty.register_uri( - httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200) - ret = (None, None) # stdout, stderr but capture=False - - with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile", - return_value=ret) as m_subp_blob: - cc_chef.install_chef_from_omnibus() - # admittedly whitebox, but assuming subp_blob_in_tempfile works - # this should be fine. - self.assertEqual( - [mock.call(blob=response, args=[], basename='chef-omnibus-install', - capture=False)], - m_subp_blob.call_args_list) - - @mock.patch('cloudinit.config.cc_chef.url_helper.readurl') - @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') - def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl): - """install_chef_from_omnibus retries OMNIBUS_URL upon failure.""" - - class FakeURLResponse(object): - contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format( - self.new_root) - - m_rdurl.return_value = FakeURLResponse() - - cc_chef.install_chef_from_omnibus() - expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES, - 'url': cc_chef.OMNIBUS_URL} - self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1]) - cc_chef.install_chef_from_omnibus(retries=10) - expected_kwargs = {'retries': 10, - 'url': cc_chef.OMNIBUS_URL} - self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1]) - expected_subp_kwargs = { - 'args': ['-v', '2.0'], - 'basename': 'chef-omnibus-install', - 'blob': m_rdurl.return_value.contents, - 'capture': False - } - self.assertCountEqual( - expected_subp_kwargs, - m_subp_blob.call_args_list[0][1]) - - @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) - @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') - def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob): - """install_chef_from_omnibus provides version arg to OMNIBUS_URL.""" - chef_outfile = self.tmp_path('chef.out', self.new_root) - response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile) - httpretty.register_uri( - httpretty.GET, cc_chef.OMNIBUS_URL, body=response) - cc_chef.install_chef_from_omnibus(omnibus_version='2.0') - - called_kwargs = m_subp_blob.call_args_list[0][1] - expected_kwargs = { - 'args': ['-v', '2.0'], - 'basename': 'chef-omnibus-install', - 'blob': response, - 'capture': False - } - self.assertCountEqual(expected_kwargs, called_kwargs) - - -class TestChef(FilesystemMockingTestCase): - - def setUp(self): - super(TestChef, self).setUp() - self.tmp = self.tmp_dir() - - def test_no_config(self): - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - cfg = {} - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - for d in cc_chef.CHEF_DIRS: - self.assertFalse(os.path.isdir(d)) - - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") - def test_basic_config(self): - """ - test basic config looks sane - - # This should create a file of the format... - # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000 - chef_license "accept" - log_level :info - ssl_verify_mode :verify_none - log_location "/var/log/chef/client.log" - validation_client_name "bob" - validation_key "/etc/chef/validation.pem" - client_key "/etc/chef/client.pem" - chef_server_url "localhost" - environment "_default" - node_name "iid-datasource-none" - json_attribs "/etc/chef/firstboot.json" - file_cache_path "/var/cache/chef" - file_backup_path "/var/backups/chef" - pid_file "/var/run/chef/client.pid" - Chef::Log::Formatter.show_time = true - encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" - """ - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - cfg = { - 'chef': { - 'chef_license': "accept", - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': "/etc/chef/vkey.pem", - 'validation_cert': "this is my cert", - 'encrypted_data_bag_secret': - '/etc/chef/encrypted_data_bag_secret' - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - for d in cc_chef.CHEF_DIRS: - self.assertTrue(os.path.isdir(d)) - c = util.load_file(cc_chef.CHEF_RB_PATH) - - # the content of these keys is not expected to be rendered to tmpl - unrendered_keys = ('validation_cert',) - for k, v in cfg['chef'].items(): - if k in unrendered_keys: - continue - self.assertIn(v, c) - for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items(): - if k in unrendered_keys: - continue - # the value from the cfg overrides that in the default - val = cfg['chef'].get(k, v) - if isinstance(val, str): - self.assertIn(val, c) - c = util.load_file(cc_chef.CHEF_FB_PATH) - self.assertEqual({}, json.loads(c)) - - def test_firstboot_json(self): - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'run_list': ['a', 'b', 'c'], - 'initial_attributes': { - 'c': 'd', - } - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - c = util.load_file(cc_chef.CHEF_FB_PATH) - self.assertEqual( - { - 'run_list': ['a', 'b', 'c'], - 'c': 'd', - }, json.loads(c)) - - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") - def test_template_deletes(self): - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'json_attribs': None, - 'show_time': None, - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - c = util.load_file(cc_chef.CHEF_RB_PATH) - self.assertNotIn('json_attribs', c) - self.assertNotIn('Formatter.show_time', c) - - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") - def test_validation_cert_and_validation_key(self): - # test validation_cert content is written to validation_key path - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - v_path = '/etc/chef/vkey.pem' - v_cert = 'this is my cert' - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': v_path, - 'validation_cert': v_cert - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - content = util.load_file(cc_chef.CHEF_RB_PATH) - self.assertIn(v_path, content) - util.load_file(v_path) - self.assertEqual(v_cert, util.load_file(v_path)) - - def test_validation_cert_with_system(self): - # test validation_cert content is not written over system file - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - v_path = '/etc/chef/vkey.pem' - v_cert = "system" - expected_cert = "this is the system file certificate" - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': v_path, - 'validation_cert': v_cert - }, - } - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - util.write_file(v_path, expected_cert) - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - content = util.load_file(cc_chef.CHEF_RB_PATH) - self.assertIn(v_path, content) - util.load_file(v_path) - self.assertEqual(expected_cert, util.load_file(v_path)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py deleted file mode 100644 index 41e9d9bd..00000000 --- a/tests/unittests/test_handler/test_handler_debug.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. -# -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import shutil -import tempfile - -from cloudinit import util -from cloudinit.config import cc_debug -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -@mock.patch('cloudinit.distros.debian.read_system_locale') -class TestDebug(FilesystemMockingTestCase): - def setUp(self): - super(TestDebug, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - self.patchUtils(self.new_root) - - def test_debug_write(self, m_locale): - m_locale.return_value = 'en_US.UTF-8' - cfg = { - 'abc': '123', - 'c': '\u20a0', - 'debug': { - 'verbose': True, - # Does not actually write here due to mocking... - 'output': '/var/log/cloud-init-debug.log', - }, - } - cc = get_cloud() - cc_debug.handle('cc_debug', cfg, cc, LOG, []) - contents = util.load_file('/var/log/cloud-init-debug.log') - # Some basic sanity tests... - self.assertNotEqual(0, len(contents)) - for k in cfg.keys(): - self.assertIn(k, contents) - - def test_debug_no_write(self, m_locale): - m_locale.return_value = 'en_US.UTF-8' - cfg = { - 'abc': '123', - 'debug': { - 'verbose': False, - # Does not actually write here due to mocking... - 'output': '/var/log/cloud-init-debug.log', - }, - } - cc = get_cloud() - cc_debug.handle('cc_debug', cfg, cc, LOG, []) - self.assertRaises(IOError, - util.load_file, '/var/log/cloud-init-debug.log') - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py deleted file mode 100644 index 4f4a57fa..00000000 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ /dev/null @@ -1,243 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import random - -from cloudinit.config import cc_disk_setup -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, TestCase - - -class TestIsDiskUsed(TestCase): - - def setUp(self): - super(TestIsDiskUsed, self).setUp() - self.patches = ExitStack() - mod_name = 'cloudinit.config.cc_disk_setup' - self.enumerate_disk = self.patches.enter_context( - mock.patch('{0}.enumerate_disk'.format(mod_name))) - self.check_fs = self.patches.enter_context( - mock.patch('{0}.check_fs'.format(mod_name))) - - def tearDown(self): - super(TestIsDiskUsed, self).tearDown() - self.patches.close() - - def test_multiple_child_nodes_returns_true(self): - self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2)) - self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) - self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) - - def test_valid_filesystem_returns_true(self): - self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) - self.check_fs.return_value = ( - mock.MagicMock(), 'ext4', mock.MagicMock()) - self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) - - def test_one_child_nodes_and_no_fs_returns_false(self): - self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) - self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) - self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock())) - - -class TestGetMbrHddSize(TestCase): - - def setUp(self): - super(TestGetMbrHddSize, self).setUp() - self.patches = ExitStack() - self.subp = self.patches.enter_context( - mock.patch.object(cc_disk_setup.subp, 'subp')) - - def tearDown(self): - super(TestGetMbrHddSize, self).tearDown() - self.patches.close() - - def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes): - def _subp(cmd, *args, **kwargs): - self.assertEqual(3, len(cmd)) - if '--getsize64' in cmd: - return hdd_size_in_bytes, None - elif '--getss' in cmd: - return sector_size_in_bytes, None - raise Exception('Unexpected blockdev command called') - - self.subp.side_effect = _subp - - def _test_for_sector_size(self, sector_size): - size_in_bytes = random.randint(10000, 10000000) * 512 - size_in_sectors = size_in_bytes / sector_size - self._configure_subp_mock(size_in_bytes, sector_size) - self.assertEqual(size_in_sectors, - cc_disk_setup.get_hdd_size('/dev/sda1')) - - def test_size_for_512_byte_sectors(self): - self._test_for_sector_size(512) - - def test_size_for_1024_byte_sectors(self): - self._test_for_sector_size(1024) - - def test_size_for_2048_byte_sectors(self): - self._test_for_sector_size(2048) - - def test_size_for_4096_byte_sectors(self): - self._test_for_sector_size(4096) - - -class TestGetPartitionMbrLayout(TestCase): - - def test_single_partition_using_boolean(self): - self.assertEqual('0,', - cc_disk_setup.get_partition_mbr_layout(1000, True)) - - def test_single_partition_using_list(self): - disk_size = random.randint(1000000, 1000000000000) - self.assertEqual( - ',,83', - cc_disk_setup.get_partition_mbr_layout(disk_size, [100])) - - def test_half_and_half(self): - disk_size = random.randint(1000000, 1000000000000) - expected_partition_size = int(float(disk_size) / 2) - self.assertEqual( - ',{0},83\n,,83'.format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50])) - - def test_thirds_with_different_partition_type(self): - disk_size = random.randint(1000000, 1000000000000) - expected_partition_size = int(float(disk_size) * 0.33) - self.assertEqual( - ',{0},83\n,,82'.format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]])) - - -class TestUpdateFsSetupDevices(TestCase): - def test_regression_1634678(self): - # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678 - fs_setup = { - 'partition': 'auto', - 'device': '/dev/xvdb1', - 'overwrite': False, - 'label': 'test', - 'filesystem': 'ext4' - } - - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) - - self.assertEqual({ - '_origname': '/dev/xvdb1', - 'partition': 'auto', - 'device': '/dev/xvdb1', - 'overwrite': False, - 'label': 'test', - 'filesystem': 'ext4' - }, fs_setup) - - def test_dotted_devname(self): - fs_setup = { - 'partition': 'auto', - 'device': 'ephemeral0.0', - 'label': 'test2', - 'filesystem': 'xfs' - } - - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) - - self.assertEqual({ - '_origname': 'ephemeral0.0', - '_partition': 'auto', - 'partition': '0', - 'device': 'ephemeral0', - 'label': 'test2', - 'filesystem': 'xfs' - }, fs_setup) - - def test_dotted_devname_populates_partition(self): - fs_setup = { - 'device': 'ephemeral0.1', - 'label': 'test2', - 'filesystem': 'xfs' - } - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) - self.assertEqual({ - '_origname': 'ephemeral0.1', - 'device': 'ephemeral0', - 'partition': '1', - 'label': 'test2', - 'filesystem': 'xfs' - }, fs_setup) - - -@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device', - return_value=None) -@mock.patch('cloudinit.config.cc_disk_setup.find_device_node', - return_value=('/dev/xdb1', False)) -@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) -@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', '')) -class TestMkfsCommandHandling(CiTestCase): - - with_logs = True - - def test_with_cmd(self, subp, *args): - """mkfs honors cmd and logs warnings when extra_opts or overwrite are - provided.""" - cc_disk_setup.mkfs({ - 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', - 'filesystem': 'ext4', - 'device': '/dev/xdb1', - 'label': 'with_cmd', - 'extra_opts': ['should', 'generate', 'warning'], - 'overwrite': 'should generate warning too' - }) - - self.assertIn( - 'extra_opts ' + - 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + - '/dev/xdb1', - self.logs.getvalue()) - self.assertIn( - 'overwrite ' + - 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + - '/dev/xdb1', - self.logs.getvalue()) - - subp.assert_called_once_with( - 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) - - @mock.patch('cloudinit.config.cc_disk_setup.subp.which') - def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args): - """mkfs observes extra_opts and overwrite settings when cmd is not - present.""" - m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p] - cc_disk_setup.mkfs({ - 'filesystem': 'ext4', - 'device': '/dev/xdb1', - 'label': 'without_cmd', - 'extra_opts': ['are', 'added'], - 'overwrite': True - }) - - subp.assert_called_once_with( - ['/sbin/mkfs.ext4', '/dev/xdb1', - '-L', 'without_cmd', '-F', 'are', 'added'], - shell=False) - - @mock.patch('cloudinit.config.cc_disk_setup.subp.which') - def test_mkswap(self, m_which, subp, *args): - """mkfs observes extra_opts and overwrite settings when cmd is not - present.""" - m_which.side_effect = iter([None, '/sbin/mkswap']) - cc_disk_setup.mkfs({ - 'filesystem': 'swap', - 'device': '/dev/xdb1', - 'label': 'swap', - 'overwrite': True, - }) - - self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')], - m_which.call_args_list) - subp.assert_called_once_with( - ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False) - -# -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py deleted file mode 100644 index e3778b11..00000000 --- a/tests/unittests/test_handler/test_handler_etc_hosts.py +++ /dev/null @@ -1,70 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_update_etc_hosts - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util - -from cloudinit.tests import helpers as t_help - -import logging -import os -import shutil - -LOG = logging.getLogger(__name__) - - -class TestHostsFile(t_help.FilesystemMockingTestCase): - def setUp(self): - super(TestHostsFile, self).setUp() - self.tmp = self.tmp_dir() - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def test_write_etc_hosts_suse_localhost(self): - cfg = { - 'manage_etc_hosts': 'localhost', - 'hostname': 'cloud-init.test.us' - } - os.makedirs('%s/etc/' % self.tmp) - hosts_content = '192.168.1.1 blah.blah.us blah\n' - fout = open('%s/etc/hosts' % self.tmp, 'w') - fout.write(hosts_content) - fout.close() - distro = self._fetch_distro('sles') - distro.hosts_fn = '%s/etc/hosts' % self.tmp - paths = helpers.Paths({}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) - contents = util.load_file('%s/etc/hosts' % self.tmp) - if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents: - self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') - if '192.168.1.1\tblah.blah.us\tblah' not in contents: - self.assertIsNone('Default etc/hosts content modified') - - @t_help.skipUnlessJinja() - def test_write_etc_hosts_suse_template(self): - cfg = { - 'manage_etc_hosts': 'template', - 'hostname': 'cloud-init.test.us' - } - shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp) - distro = self._fetch_distro('sles') - paths = helpers.Paths({}) - paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl' - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) - contents = util.load_file('%s/etc/hosts' % self.tmp) - if '127.0.1.1 cloud-init.test.us cloud-init' not in contents: - self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') - if '::1 cloud-init.test.us cloud-init' not in contents: - self.assertIsNone('No entry for 127.0.0.1 in etc/hosts') diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py deleted file mode 100644 index b7d5d7ba..00000000 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ /dev/null @@ -1,309 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import cloud -from cloudinit.config import cc_growpart -from cloudinit import subp -from cloudinit import temp_utils - -from cloudinit.tests.helpers import TestCase - -import errno -import logging -import os -import shutil -import re -import unittest -from contextlib import ExitStack -from unittest import mock -import stat - -# growpart: -# mode: auto # off, on, auto, 'growpart' -# devices: ['root'] - -HELP_GROWPART_RESIZE = """ -growpart disk partition - rewrite partition table so that partition takes up all the space it can - options: - -h | --help print Usage and exit - - -u | --update R update the the kernel partition table info after growing - this requires kernel support and 'partx --update' - R is one of: - - 'auto' : [default] update partition if possible - - Example: - - growpart /dev/sda 1 - Resize partition 1 on /dev/sda -""" - -HELP_GROWPART_NO_RESIZE = """ -growpart disk partition - rewrite partition table so that partition takes up all the space it can - options: - -h | --help print Usage and exit - - Example: - - growpart /dev/sda 1 - Resize partition 1 on /dev/sda -""" - -HELP_GPART = """ -usage: gpart add -t type [-a alignment] [-b start] geom - gpart backup geom - gpart bootcode [-b bootcode] [-p partcode -i index] [-f flags] geom - - gpart resize -i index [-a alignment] [-s size] [-f flags] geom - gpart restore [-lF] [-f flags] provider [...] - gpart recover [-f flags] geom - gpart help - -""" - - -class Dir: - '''Stub object''' - def __init__(self, name): - self.name = name - self.st_mode = name - - def is_dir(self, *args, **kwargs): - return True - - def stat(self, *args, **kwargs): - return self - - -class Scanner: - '''Stub object''' - def __enter__(self): - return (Dir(''), Dir(''),) - - def __exit__(self, *args): - pass - - -class TestDisabled(unittest.TestCase): - def setUp(self): - super(TestDisabled, self).setUp() - self.name = "growpart" - self.cloud_init = None - self.log = logging.getLogger("TestDisabled") - self.args = [] - - self.handle = cc_growpart.handle - - def test_mode_off(self): - # Test that nothing is done if mode is off. - - # this really only verifies that resizer_factory isn't called - config = {'growpart': {'mode': 'off'}} - - with mock.patch.object(cc_growpart, 'resizer_factory') as mockobj: - self.handle(self.name, config, self.cloud_init, self.log, - self.args) - self.assertEqual(mockobj.call_count, 0) - - -class TestConfig(TestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.name = "growpart" - self.paths = None - self.cloud = cloud.Cloud(None, self.paths, None, None, None) - self.log = logging.getLogger("TestConfig") - self.args = [] - - self.cloud_init = None - self.handle = cc_growpart.handle - self.tmppath = '/tmp/cloudinit-test-file' - self.tmpdir = os.scandir('/tmp') - self.tmpfile = open(self.tmppath, 'w') - - def tearDown(self): - self.tmpfile.close() - os.remove(self.tmppath) - - @mock.patch.dict("os.environ", clear=True) - def test_no_resizers_auto_is_fine(self): - with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: - - config = {'growpart': {'mode': 'auto'}} - self.handle(self.name, config, self.cloud_init, self.log, - self.args) - - mockobj.assert_has_calls([ - mock.call(['growpart', '--help'], env={'LANG': 'C'}), - mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) - - @mock.patch.dict("os.environ", clear=True) - def test_no_resizers_mode_growpart_is_exception(self): - with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: - config = {'growpart': {'mode': "growpart"}} - self.assertRaises( - ValueError, self.handle, self.name, config, - self.cloud_init, self.log, self.args) - - mockobj.assert_called_once_with( - ['growpart', '--help'], env={'LANG': 'C'}) - - @mock.patch.dict("os.environ", clear=True) - def test_mode_auto_prefers_growpart(self): - with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: - ret = cc_growpart.resizer_factory(mode="auto") - self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) - - mockobj.assert_called_once_with( - ['growpart', '--help'], env={'LANG': 'C'}) - - @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) - @mock.patch.object(temp_utils, 'mkdtemp', return_value='/tmp/much-random') - @mock.patch.object(stat, 'S_ISDIR', return_value=False) - @mock.patch.object(os.path, 'samestat', return_value=True) - @mock.patch.object(os.path, "join", return_value='/tmp') - @mock.patch.object(os, 'scandir', return_value=Scanner()) - @mock.patch.object(os, 'mkdir') - @mock.patch.object(os, 'unlink') - @mock.patch.object(os, 'rmdir') - @mock.patch.object(os, 'open', return_value=1) - @mock.patch.object(os, 'close') - @mock.patch.object(shutil, 'rmtree') - @mock.patch.object(os, 'lseek', return_value=1024) - @mock.patch.object(os, 'lstat', return_value='interesting metadata') - def test_force_lang_check_tempfile(self, *args, **kwargs): - with mock.patch.object( - subp, - 'subp', - return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: - - ret = cc_growpart.resizer_factory(mode="auto") - self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) - diskdev = '/dev/sdb' - partnum = 1 - partdev = '/dev/sdb' - ret.resize(diskdev, partnum, partdev) - mockobj.assert_has_calls([ - mock.call( - ["growpart", '--dry-run', diskdev, partnum], - env={'LANG': 'C', 'TMPDIR': '/tmp'}), - mock.call( - ["growpart", diskdev, partnum], - env={'LANG': 'C', 'TMPDIR': '/tmp'}), - ]) - - @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) - def test_mode_auto_falls_back_to_gpart(self): - with mock.patch.object( - subp, 'subp', - return_value=("", HELP_GPART)) as mockobj: - ret = cc_growpart.resizer_factory(mode="auto") - self.assertIsInstance(ret, cc_growpart.ResizeGpart) - - mockobj.assert_has_calls([ - mock.call(['growpart', '--help'], env={'LANG': 'C'}), - mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) - - def test_handle_with_no_growpart_entry(self): - # if no 'growpart' entry in config, then mode=auto should be used - - myresizer = object() - retval = (("/", cc_growpart.RESIZE.CHANGED, "my-message",),) - - with ExitStack() as mocks: - factory = mocks.enter_context( - mock.patch.object(cc_growpart, 'resizer_factory', - return_value=myresizer)) - rsdevs = mocks.enter_context( - mock.patch.object(cc_growpart, 'resize_devices', - return_value=retval)) - mocks.enter_context( - mock.patch.object(cc_growpart, 'RESIZERS', - (('mysizer', object),) - )) - - self.handle(self.name, {}, self.cloud_init, self.log, self.args) - - factory.assert_called_once_with('auto') - rsdevs.assert_called_once_with(myresizer, ['/']) - - -class TestResize(unittest.TestCase): - def setUp(self): - super(TestResize, self).setUp() - self.name = "growpart" - self.log = logging.getLogger("TestResize") - - def test_simple_devices(self): - # test simple device list - # this patches out devent2dev, os.stat, and device_part_info - # so in the end, doesn't test a lot - devs = ["/dev/XXda1", "/dev/YYda2"] - devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, - st_nlink=1, st_uid=0, st_gid=6, st_size=0, - st_atime=0, st_mtime=0, st_ctime=0) - enoent = ["/dev/NOENT"] - real_stat = os.stat - resize_calls = [] - - class myresizer(object): - def resize(self, diskdev, partnum, partdev): - resize_calls.append((diskdev, partnum, partdev)) - if partdev == "/dev/YYda2": - return (1024, 2048) - return (1024, 1024) # old size, new size - - def mystat(path): - if path in devs: - return devstat_ret - if path in enoent: - e = OSError("%s: does not exist" % path) - e.errno = errno.ENOENT - raise e - return real_stat(path) - - try: - opinfo = cc_growpart.device_part_info - cc_growpart.device_part_info = simple_device_part_info - os.stat = mystat - - resized = cc_growpart.resize_devices(myresizer(), devs + enoent) - - def find(name, res): - for f in res: - if f[0] == name: - return f - return None - - self.assertEqual(cc_growpart.RESIZE.NOCHANGE, - find("/dev/XXda1", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.CHANGED, - find("/dev/YYda2", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.SKIPPED, - find(enoent[0], resized)[1]) - # self.assertEqual(resize_calls, - # [("/dev/XXda", "1", "/dev/XXda1"), - # ("/dev/YYda", "2", "/dev/YYda2")]) - finally: - cc_growpart.device_part_info = opinfo - os.stat = real_stat - - -def simple_device_part_info(devpath): - # simple stupid return (/dev/vda, 1) for /dev/vda - ret = re.search("([^0-9]*)([0-9]*)$", devpath) - x = (ret.group(1), ret.group(2)) - return x - - -class Bunch(object): - def __init__(self, **kwds): - self.__dict__.update(kwds) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_install_hotplug.py b/tests/unittests/test_handler/test_handler_install_hotplug.py deleted file mode 100644 index 5d6b1e77..00000000 --- a/tests/unittests/test_handler/test_handler_install_hotplug.py +++ /dev/null @@ -1,113 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple -from unittest import mock - -import pytest - -from cloudinit.config.cc_install_hotplug import ( - handle, - HOTPLUG_UDEV_PATH, - HOTPLUG_UDEV_RULES_TEMPLATE, -) -from cloudinit.event import EventScope, EventType - - -@pytest.yield_fixture() -def mocks(): - m_update_enabled = mock.patch('cloudinit.stages.update_event_enabled') - m_write = mock.patch('cloudinit.util.write_file', autospec=True) - m_del = mock.patch('cloudinit.util.del_file', autospec=True) - m_subp = mock.patch('cloudinit.subp.subp') - m_which = mock.patch('cloudinit.subp.which', return_value=None) - m_path_exists = mock.patch('os.path.exists', return_value=False) - - yield namedtuple( - 'Mocks', - 'm_update_enabled m_write m_del m_subp m_which m_path_exists' - )( - m_update_enabled.start(), m_write.start(), m_del.start(), - m_subp.start(), m_which.start(), m_path_exists.start() - ) - - m_update_enabled.stop() - m_write.stop() - m_del.stop() - m_subp.stop() - m_which.stop() - m_path_exists.stop() - - -class TestInstallHotplug: - @pytest.mark.parametrize('libexec_exists', [True, False]) - def test_rules_installed_when_supported_and_enabled( - self, mocks, libexec_exists - ): - mocks.m_which.return_value = 'udevadm' - mocks.m_update_enabled.return_value = True - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = { - EventScope.NETWORK: {EventType.HOTPLUG} - } - - if libexec_exists: - libexecdir = "/usr/libexec/cloud-init" - else: - libexecdir = "/usr/lib/cloud-init" - with mock.patch('os.path.exists', return_value=libexec_exists): - handle(None, {}, m_cloud, mock.Mock(), None) - mocks.m_write.assert_called_once_with( - filename=HOTPLUG_UDEV_PATH, - content=HOTPLUG_UDEV_RULES_TEMPLATE.format( - libexecdir=libexecdir), - ) - assert mocks.m_subp.call_args_list == [mock.call([ - 'udevadm', 'control', '--reload-rules', - ])] - assert mocks.m_del.call_args_list == [] - - def test_rules_not_installed_when_unsupported(self, mocks): - mocks.m_update_enabled.return_value = True - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = {} - - handle(None, {}, m_cloud, mock.Mock(), None) - assert mocks.m_write.call_args_list == [] - assert mocks.m_del.call_args_list == [] - assert mocks.m_subp.call_args_list == [] - - def test_rules_not_installed_when_disabled(self, mocks): - mocks.m_update_enabled.return_value = False - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = { - EventScope.NETWORK: {EventType.HOTPLUG} - } - - handle(None, {}, m_cloud, mock.Mock(), None) - assert mocks.m_write.call_args_list == [] - assert mocks.m_del.call_args_list == [] - assert mocks.m_subp.call_args_list == [] - - def test_rules_uninstalled_when_disabled(self, mocks): - mocks.m_path_exists.return_value = True - mocks.m_update_enabled.return_value = False - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = {} - - handle(None, {}, m_cloud, mock.Mock(), None) - mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH) - assert mocks.m_subp.call_args_list == [mock.call([ - 'udevadm', 'control', '--reload-rules', - ])] - assert mocks.m_write.call_args_list == [] - - def test_rules_not_installed_when_no_udevadm(self, mocks): - mocks.m_update_enabled.return_value = True - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = { - EventScope.NETWORK: {EventType.HOTPLUG} - } - - handle(None, {}, m_cloud, mock.Mock(), None) - assert mocks.m_del.call_args_list == [] - assert mocks.m_write.call_args_list == [] - assert mocks.m_subp.call_args_list == [] diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py deleted file mode 100644 index 1cc73ea2..00000000 --- a/tests/unittests/test_handler/test_handler_landscape.py +++ /dev/null @@ -1,126 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -from configobj import ConfigObj - -from cloudinit.config import cc_landscape -from cloudinit import util -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock, - wrap_and_call) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestLandscape(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestLandscape, self).setUp() - self.new_root = self.tmp_dir() - self.conf = self.tmp_path('client.conf', self.new_root) - self.default_file = self.tmp_path('default_landscape', self.new_root) - self.patchUtils(self.new_root) - self.add_patch( - 'cloudinit.distros.ubuntu.Distro.install_packages', - 'm_install_packages' - ) - - def test_handler_skips_empty_landscape_cloudconfig(self): - """Empty landscape cloud-config section does no work.""" - mycloud = get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() - cfg = {'landscape': {}} - cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) - self.assertFalse(mycloud.distro.install_packages.called) - - def test_handler_error_on_invalid_landscape_type(self): - """Raise an error when landscape configuraiton option is invalid.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': 'wrongtype'} - with self.assertRaises(RuntimeError) as context_manager: - cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) - self.assertIn( - "'landscape' key existed in config, but not a dict", - str(context_manager.exception)) - - @mock.patch('cloudinit.config.cc_landscape.subp') - def test_handler_restarts_landscape_client(self, m_subp): - """handler restarts lansdscape-client after install.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual( - [mock.call(['service', 'landscape-client', 'restart'])], - m_subp.subp.call_args_list) - - def test_handler_installs_client_and_creates_config_file(self): - """Write landscape client.conf and install landscape-client.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client'}} - mycloud.distro = mock.MagicMock() - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}, - 'LS_DEFAULT_FILE': {'new': self.default_file}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual( - [mock.call('landscape-client')], - mycloud.distro.install_packages.call_args) - self.assertEqual(expected, dict(ConfigObj(self.conf))) - self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) - default_content = util.load_file(self.default_file) - self.assertEqual('RUN=1\n', default_content) - - def test_handler_writes_merged_client_config_file_with_defaults(self): - """Merge and write options from LSC_CLIENT_CFG_FILE with defaults.""" - # Write existing sparse client.conf file - util.write_file(self.conf, '[client]\ncomputer_title = My PC\n') - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client', - 'computer_title': 'My PC'}} - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual(expected, dict(ConfigObj(self.conf))) - self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) - - def test_handler_writes_merged_provided_cloudconfig_with_defaults(self): - """Merge and write options from cloud-config options with defaults.""" - # Write empty sparse client.conf file - util.write_file(self.conf, '') - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {'computer_title': 'My PC'}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client', - 'computer_title': 'My PC'}} - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual(expected, dict(ConfigObj(self.conf))) - self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py deleted file mode 100644 index 3c17927e..00000000 --- a/tests/unittests/test_handler/test_handler_locale.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Author: Juerg Haefliger -# -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import os -import shutil -import tempfile -from io import BytesIO -from configobj import ConfigObj -from unittest import mock - -from cloudinit import util -from cloudinit.config import cc_locale -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - - -LOG = logging.getLogger(__name__) - - -class TestLocale(t_help.FilesystemMockingTestCase): - - def setUp(self): - super(TestLocale, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - self.patchUtils(self.new_root) - - def test_set_locale_arch(self): - locale = 'en_GB.UTF-8' - locale_configfile = '/etc/invalid-locale-path' - cfg = { - 'locale': locale, - 'locale_configfile': locale_configfile, - } - cc = get_cloud('arch') - - with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp: - with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG: - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_LOG.assert_called_with('Invalid locale_configfile %s, ' - 'only supported value is ' - '/etc/locale.conf', - locale_configfile) - - contents = util.load_file(cc.distro.locale_gen_fn) - self.assertIn('%s UTF-8' % locale, contents) - m_subp.assert_called_with(['localectl', - 'set-locale', - locale], capture=False) - - def test_set_locale_sles(self): - - cfg = { - 'locale': 'My.Locale', - } - cc = get_cloud('sles') - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - if cc.distro.uses_systemd(): - locale_conf = cc.distro.systemd_locale_conf_fn - else: - locale_conf = cc.distro.locale_conf_fn - contents = util.load_file(locale_conf, decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - if cc.distro.uses_systemd(): - self.assertEqual({'LANG': cfg['locale']}, dict(n_cfg)) - else: - self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg)) - - def test_set_locale_sles_default(self): - cfg = {} - cc = get_cloud('sles') - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - - if cc.distro.uses_systemd(): - locale_conf = cc.distro.systemd_locale_conf_fn - keyname = 'LANG' - else: - locale_conf = cc.distro.locale_conf_fn - keyname = 'RC_LANG' - - contents = util.load_file(locale_conf, decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({keyname: 'en_US.UTF-8'}, dict(n_cfg)) - - def test_locale_update_config_if_different_than_default(self): - """Test cc_locale writes updates conf if different than default""" - locale_conf = os.path.join(self.new_root, "etc/default/locale") - util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n') - cfg = {'locale': 'C.UTF-8'} - cc = get_cloud('ubuntu') - with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp: - with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN', - locale_conf): - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_subp.assert_called_with(['update-locale', - '--locale-file=%s' % locale_conf, - 'LANG=C.UTF-8'], capture=False) - - def test_locale_rhel_defaults_en_us_utf8(self): - """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback""" - cfg = {} - cc = get_cloud('rhel') - update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file' - with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd: - m_use_sd.return_value = True - with mock.patch(update_sysconfig) as m_update_syscfg: - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_update_syscfg.assert_called_with('/etc/locale.conf', - {'LANG': 'en_US.UTF-8'}) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py deleted file mode 100644 index ea8b6e90..00000000 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ /dev/null @@ -1,222 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from unittest import mock - -from cloudinit.config import cc_lxd -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - - -class TestLxd(t_help.CiTestCase): - - with_logs = True - - lxd_cfg = { - 'lxd': { - 'init': { - 'network_address': '0.0.0.0', - 'storage_backend': 'zfs', - 'storage_pool': 'poolname', - } - } - } - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_lxd_init(self, mock_subp, m_maybe_clean): - cc = get_cloud() - mock_subp.which.return_value = True - m_maybe_clean.return_value = None - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) - self.assertTrue(mock_subp.which.called) - # no bridge config, so maybe_cleanup should not be called. - self.assertFalse(m_maybe_clean.called) - self.assertEqual( - [mock.call(['lxd', 'waitready', '--timeout=300']), - mock.call( - ['lxd', 'init', '--auto', '--network-address=0.0.0.0', - '--storage-backend=zfs', '--storage-pool=poolname'])], - mock_subp.subp.call_args_list) - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_lxd_install(self, mock_subp, m_maybe_clean): - cc = get_cloud() - cc.distro = mock.MagicMock() - mock_subp.which.return_value = None - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) - self.assertNotIn('WARN', self.logs.getvalue()) - self.assertTrue(cc.distro.install_packages.called) - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) - self.assertFalse(m_maybe_clean.called) - install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux']) - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_no_init_does_nothing(self, mock_subp, m_maybe_clean): - cc = get_cloud() - cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) - self.assertFalse(cc.distro.install_packages.called) - self.assertFalse(mock_subp.subp.called) - self.assertFalse(m_maybe_clean.called) - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean): - cc = get_cloud() - cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) - self.assertFalse(cc.distro.install_packages.called) - self.assertFalse(mock_subp.subp.called) - self.assertFalse(m_maybe_clean.called) - - def test_lxd_debconf_new_full(self): - data = {"mode": "new", - "name": "testbr0", - "ipv4_address": "10.0.8.1", - "ipv4_netmask": "24", - "ipv4_dhcp_first": "10.0.8.2", - "ipv4_dhcp_last": "10.0.8.254", - "ipv4_dhcp_leases": "250", - "ipv4_nat": "true", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true", - "domain": "lxd"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "true", - "lxd/bridge-name": "testbr0", - "lxd/bridge-ipv4": "true", - "lxd/bridge-ipv4-address": "10.0.8.1", - "lxd/bridge-ipv4-netmask": "24", - "lxd/bridge-ipv4-dhcp-first": "10.0.8.2", - "lxd/bridge-ipv4-dhcp-last": "10.0.8.254", - "lxd/bridge-ipv4-dhcp-leases": "250", - "lxd/bridge-ipv4-nat": "true", - "lxd/bridge-ipv6": "true", - "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", - "lxd/bridge-ipv6-netmask": "64", - "lxd/bridge-ipv6-nat": "true", - "lxd/bridge-domain": "lxd"}) - - def test_lxd_debconf_new_partial(self): - data = {"mode": "new", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "true", - "lxd/bridge-ipv6": "true", - "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", - "lxd/bridge-ipv6-netmask": "64", - "lxd/bridge-ipv6-nat": "true"}) - - def test_lxd_debconf_existing(self): - data = {"mode": "existing", - "name": "testbr0"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "false", - "lxd/use-existing-bridge": "true", - "lxd/bridge-name": "testbr0"}) - - def test_lxd_debconf_none(self): - data = {"mode": "none"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "false", - "lxd/bridge-name": ""}) - - def test_lxd_cmd_new_full(self): - data = {"mode": "new", - "name": "testbr0", - "ipv4_address": "10.0.8.1", - "ipv4_netmask": "24", - "ipv4_dhcp_first": "10.0.8.2", - "ipv4_dhcp_last": "10.0.8.254", - "ipv4_dhcp_leases": "250", - "ipv4_nat": "true", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true", - "domain": "lxd"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (["network", "create", "testbr0", - "ipv4.address=10.0.8.1/24", "ipv4.nat=true", - "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", - "ipv6.address=fd98:9e0:3744::1/64", - "ipv6.nat=true", "dns.domain=lxd"], - ["network", "attach-profile", - "testbr0", "default", "eth0"])) - - def test_lxd_cmd_new_partial(self): - data = {"mode": "new", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (["network", "create", "lxdbr0", "ipv4.address=none", - "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"], - ["network", "attach-profile", - "lxdbr0", "default", "eth0"])) - - def test_lxd_cmd_existing(self): - data = {"mode": "existing", - "name": "testbr0"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (None, ["network", "attach-profile", - "testbr0", "default", "eth0"])) - - def test_lxd_cmd_none(self): - data = {"mode": "none"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (None, None)) - - -class TestLxdMaybeCleanupDefault(t_help.CiTestCase): - """Test the implementation of maybe_cleanup_default.""" - - defnet = cc_lxd._DEFAULT_NETWORK_NAME - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_network_other_than_default_not_deleted(self, m_lxc): - """deletion or removal should only occur if bridge is default.""" - cc_lxd.maybe_cleanup_default( - net_name="lxdbr1", did_init=True, create=True, attach=True) - m_lxc.assert_not_called() - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_did_init_false_does_not_delete(self, m_lxc): - """deletion or removal should only occur if did_init is True.""" - cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=False, create=True, attach=True) - m_lxc.assert_not_called() - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_network_deleted_if_create_true(self, m_lxc): - """deletion of network should occur if create is True.""" - cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=True, create=True, attach=False) - m_lxc.assert_called_with(["network", "delete", self.defnet]) - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_device_removed_if_attach_true(self, m_lxc): - """deletion of network should occur if create is True.""" - nic_name = "my_nic" - profile = "my_profile" - cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=True, create=False, attach=True, - profile=profile, nic_name=nic_name) - m_lxc.assert_called_once_with( - ["profile", "device", "remove", profile, nic_name]) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py deleted file mode 100644 index 9cda6fbe..00000000 --- a/tests/unittests/test_handler/test_handler_mcollective.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import configobj -import logging -import os -import shutil -import tempfile -from io import BytesIO - -from cloudinit import (util) -from cloudinit.config import cc_mcollective -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -STOCK_CONFIG = """\ -main_collective = mcollective -collectives = mcollective -libdir = /usr/share/mcollective/plugins -logfile = /var/log/mcollective.log -loglevel = info -daemonize = 1 - -# Plugins -securityprovider = psk -plugin.psk = unset - -connector = activemq -plugin.activemq.pool.size = 1 -plugin.activemq.pool.1.host = stomp1 -plugin.activemq.pool.1.port = 61613 -plugin.activemq.pool.1.user = mcollective -plugin.activemq.pool.1.password = marionette - -# Facts -factsource = yaml -plugin.yaml = /etc/mcollective/facts.yaml -""" - - -class TestConfig(t_help.FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - # "./": make os.path.join behave correctly with abs path as second arg - self.server_cfg = os.path.join( - self.tmp, "./" + cc_mcollective.SERVER_CFG) - self.pubcert_file = os.path.join( - self.tmp, "./" + cc_mcollective.PUBCERT_FILE) - self.pricert_file = os.path.join( - self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE) - - def test_basic_config(self): - cfg = { - 'mcollective': { - 'conf': { - 'loglevel': 'debug', - 'connector': 'rabbitmq', - 'logfile': '/var/log/mcollective.log', - 'ttl': '4294957', - 'collectives': 'mcollective', - 'main_collective': 'mcollective', - 'securityprovider': 'psk', - 'daemonize': '1', - 'factsource': 'yaml', - 'direct_addressing': '1', - 'plugin.psk': 'unset', - 'libdir': '/usr/share/mcollective/plugins', - 'identity': '1', - }, - }, - } - expected = cfg['mcollective']['conf'] - - self.patchUtils(self.tmp) - cc_mcollective.configure(cfg['mcollective']['conf']) - contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False) - contents = configobj.ConfigObj(BytesIO(contents)) - self.assertEqual(expected, dict(contents)) - - def test_existing_config_is_saved(self): - cfg = {'loglevel': 'warn'} - util.write_file(self.server_cfg, STOCK_CONFIG) - cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) - self.assertTrue(os.path.exists(self.server_cfg)) - self.assertTrue(os.path.exists(self.server_cfg + ".old")) - self.assertEqual(util.load_file(self.server_cfg + ".old"), - STOCK_CONFIG) - - def test_existing_updated(self): - cfg = {'loglevel': 'warn'} - util.write_file(self.server_cfg, STOCK_CONFIG) - cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) - cfgobj = configobj.ConfigObj(self.server_cfg) - self.assertEqual(cfg['loglevel'], cfgobj['loglevel']) - - def test_certificats_written(self): - # check public-cert and private-cert keys in config get written - cfg = {'loglevel': 'debug', - 'public-cert': "this is my public-certificate", - 'private-cert': "secret private certificate"} - - cc_mcollective.configure( - config=cfg, server_cfg=self.server_cfg, - pricert_file=self.pricert_file, pubcert_file=self.pubcert_file) - - found = configobj.ConfigObj(self.server_cfg) - - # make sure these didnt get written in - self.assertFalse('public-cert' in found) - self.assertFalse('private-cert' in found) - - # these need updating to the specified paths - self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file) - self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file) - - # and the security provider should be ssl - self.assertEqual(found['securityprovider'], 'ssl') - - self.assertEqual( - util.load_file(self.pricert_file), cfg['private-cert']) - self.assertEqual( - util.load_file(self.pubcert_file), cfg['public-cert']) - - -class TestHandler(t_help.TestCase): - @t_help.mock.patch("cloudinit.config.cc_mcollective.subp") - @t_help.mock.patch("cloudinit.config.cc_mcollective.util") - def test_mcollective_install(self, mock_util, mock_subp): - cc = get_cloud() - cc.distro = t_help.mock.MagicMock() - mock_util.load_file.return_value = b"" - mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}} - cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, []) - self.assertTrue(cc.distro.install_packages.called) - install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(install_pkg, ('mcollective',)) - - self.assertTrue(mock_subp.subp.called) - self.assertEqual(mock_subp.subp.call_args_list[0][0][0], - ['service', 'mcollective', 'restart']) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py deleted file mode 100644 index 69e8b30d..00000000 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ /dev/null @@ -1,406 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os.path -from unittest import mock - -from cloudinit.config import cc_mounts - -from cloudinit.tests import helpers as test_helpers - - -class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestSanitizeDevname, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - def _touch(self, path): - path = os.path.join(self.new_root, path.lstrip('/')) - basedir = os.path.dirname(path) - if not os.path.exists(basedir): - os.makedirs(basedir) - open(path, 'a').close() - - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) - if not os.path.exists(directory): - os.makedirs(directory) - - def mock_existence_of_disk(self, disk_path): - self._touch(disk_path) - self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1])) - - def mock_existence_of_partition(self, disk_path, partition_number): - self.mock_existence_of_disk(disk_path) - self._touch(disk_path + str(partition_number)) - disk_name = disk_path.split('/')[-1] - self._makedirs(os.path.join('/sys/block', - disk_name, - disk_name + str(partition_number))) - - def test_existent_full_disk_path_is_returned(self): - disk_path = '/dev/sda' - self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname(disk_path, - lambda x: None, - mock.Mock())) - - def test_existent_disk_name_returns_full_path(self): - disk_name = 'sda' - disk_path = '/dev/' + disk_name - self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname(disk_name, - lambda x: None, - mock.Mock())) - - def test_existent_meta_disk_is_returned(self): - actual_disk_path = '/dev/sda' - self.mock_existence_of_disk(actual_disk_path) - self.assertEqual( - actual_disk_path, - cc_mounts.sanitize_devname('ephemeral0', - lambda x: actual_disk_path, - mock.Mock())) - - def test_existent_meta_partition_is_returned(self): - disk_name, partition_part = '/dev/sda', '1' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.1', - lambda x: disk_name, - mock.Mock())) - - def test_existent_meta_partition_with_p_is_returned(self): - disk_name, partition_part = '/dev/sda', 'p1' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.1', - lambda x: disk_name, - mock.Mock())) - - def test_first_partition_returned_if_existent_disk_is_partitioned(self): - disk_name, partition_part = '/dev/sda', '1' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0', - lambda x: disk_name, - mock.Mock())) - - def test_nth_partition_returned_if_requested(self): - disk_name, partition_part = '/dev/sda', '3' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.3', - lambda x: disk_name, - mock.Mock())) - - def test_transformer_returning_none_returns_none(self): - self.assertIsNone( - cc_mounts.sanitize_devname( - 'ephemeral0', lambda x: None, mock.Mock())) - - def test_missing_device_returns_none(self): - self.assertIsNone( - cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock())) - - def test_missing_sys_returns_none(self): - disk_path = '/dev/sda' - self._makedirs(disk_path) - self.assertIsNone( - cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) - - def test_existent_disk_but_missing_partition_returns_none(self): - disk_path = '/dev/sda' - self.mock_existence_of_disk(disk_path) - self.assertIsNone( - cc_mounts.sanitize_devname( - 'ephemeral0.1', lambda x: disk_path, mock.Mock())) - - def test_network_device_returns_network_device(self): - disk_path = 'netdevice:/path' - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) - - def test_device_aliases_remapping(self): - disk_path = '/dev/sda' - self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname('mydata', - lambda x: None, - mock.Mock(), - {'mydata': disk_path})) - - -class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestSwapFileCreation, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - self.fstab_path = os.path.join(self.new_root, 'etc/fstab') - self.swap_path = os.path.join(self.new_root, 'swap.img') - self._makedirs('/etc') - - self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', - 'mock_fstab_path', - self.fstab_path, - autospec=False) - - self.add_patch('cloudinit.config.cc_mounts.subp.subp', - 'm_subp_subp') - - self.add_patch('cloudinit.config.cc_mounts.util.mounts', - 'mock_util_mounts', - return_value={ - '/dev/sda1': {'fstype': 'ext4', - 'mountpoint': '/', - 'opts': 'rw,relatime,discard' - }}) - - self.mock_cloud = mock.Mock() - self.mock_log = mock.Mock() - self.mock_cloud.device_name_to_device = self.device_name_to_device - - self.cc = { - 'swap': { - 'filename': self.swap_path, - 'size': '512', - 'maxsize': '512'}} - - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) - if not os.path.exists(directory): - os.makedirs(directory) - - def device_name_to_device(self, path): - if path == 'swap': - return self.swap_path - else: - dev = None - - return dev - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (4, 20) - m_get_mount_info.return_value = ["", "xfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_xfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (3, 18) - m_get_mount_info.return_value = ["", "xfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_btrfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (4, 20) - m_get_mount_info.return_value = ["", "btrfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_ext4(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (5, 14) - m_get_mount_info.return_value = ["", "ext4"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - -class TestFstabHandling(test_helpers.FilesystemMockingTestCase): - - swap_path = '/dev/sdb1' - - def setUp(self): - super(TestFstabHandling, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - self.fstab_path = os.path.join(self.new_root, 'etc/fstab') - self._makedirs('/etc') - - self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', - 'mock_fstab_path', - self.fstab_path, - autospec=False) - - self.add_patch('cloudinit.config.cc_mounts._is_block_device', - 'mock_is_block_device', - return_value=True) - - self.add_patch('cloudinit.config.cc_mounts.subp.subp', - 'm_subp_subp') - - self.add_patch('cloudinit.config.cc_mounts.util.mounts', - 'mock_util_mounts', - return_value={ - '/dev/sda1': {'fstype': 'ext4', - 'mountpoint': '/', - 'opts': 'rw,relatime,discard' - }}) - - self.mock_cloud = mock.Mock() - self.mock_log = mock.Mock() - self.mock_cloud.device_name_to_device = self.device_name_to_device - - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) - if not os.path.exists(directory): - os.makedirs(directory) - - def device_name_to_device(self, path): - if path == 'swap': - return self.swap_path - else: - dev = None - - return dev - - def test_no_fstab(self): - """ Handle images which do not include an fstab. """ - self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH)) - fstab_expected_content = ( - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) - ) - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_swap_integrity(self): - '''Ensure that the swap file is correctly created and can - swapon successfully. Fixing the corner case of: - kernel: swapon: swapfile has holes''' - - fstab = '/swap.img swap swap defaults 0 0\n' - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab) - cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']} - cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) - - def test_fstab_no_swap_device(self): - '''Ensure that cloud-init adds a discovered swap partition - to /etc/fstab.''' - - fstab_original_content = '' - fstab_expected_content = ( - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) - ) - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_fstab_same_swap_device_already_configured(self): - '''Ensure that cloud-init will not add a swap device if the same - device already exists in /etc/fstab.''' - - fstab_original_content = '%s swap swap defaults 0 0\n' % ( - self.swap_path,) - fstab_expected_content = fstab_original_content - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_fstab_alternate_swap_device_already_configured(self): - '''Ensure that cloud-init will add a discovered swap device to - /etc/fstab even when there exists a swap definition on another - device.''' - - fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n' - fstab_expected_content = ( - fstab_original_content + - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) - ) - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_no_change_fstab_sets_needs_mount_all(self): - '''verify unchanged fstab entries are mounted if not call mount -a''' - fstab_original_content = ( - 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' - 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' - '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' - ) - fstab_expected_content = fstab_original_content - cc = { - 'mounts': [ - ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec'] - ] - } - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['mount', '-a']), - mock.call(['systemctl', 'daemon-reload'])]) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py deleted file mode 100644 index b34a18cb..00000000 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ /dev/null @@ -1,765 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import copy -import os -import shutil -from functools import partial -from os.path import dirname - -from cloudinit import (helpers, util) -from cloudinit.config import cc_ntp -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) - -from tests.unittests.util import get_cloud - - -NTP_TEMPLATE = """\ -## template: jinja -servers {{servers}} -pools {{pools}} -""" - -TIMESYNCD_TEMPLATE = """\ -## template:jinja -[Time] -{% if servers or pools -%} -NTP={% for host in servers|list + pools|list %}{{ host }} {% endfor -%} -{% endif -%} -""" - - -class TestNtp(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestNtp, self).setUp() - self.new_root = self.tmp_dir() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') - self.m_snappy.return_value = False - self.new_root = self.reRoot() - self._get_cloud = partial( - get_cloud, - paths=helpers.Paths({'templates_dir': self.new_root}) - ) - - def _get_template_path(self, template_name, distro, basepath=None): - # ntp.conf.{distro} -> ntp.conf.debian.tmpl - template_fn = '{0}.tmpl'.format( - template_name.replace('{distro}', distro)) - if not basepath: - basepath = self.new_root - path = os.path.join(basepath, template_fn) - return path - - def _generate_template(self, template=None): - if not template: - template = NTP_TEMPLATE - confpath = os.path.join(self.new_root, 'client.conf') - template_fn = os.path.join(self.new_root, 'client.conf.tmpl') - util.write_file(template_fn, content=template) - return (confpath, template_fn) - - def _mock_ntp_client_config(self, client=None, distro=None): - if not client: - client = 'ntp' - if not distro: - distro = 'ubuntu' - dcfg = cc_ntp.distro_ntp_client_configs(distro) - if client == 'systemd-timesyncd': - template = TIMESYNCD_TEMPLATE - else: - template = NTP_TEMPLATE - (confpath, _template_fn) = self._generate_template(template=template) - ntpconfig = copy.deepcopy(dcfg[client]) - ntpconfig['confpath'] = confpath - ntpconfig['template_name'] = os.path.basename(confpath) - return ntpconfig - - @mock.patch("cloudinit.config.cc_ntp.subp") - def test_ntp_install(self, mock_subp): - """ntp_install_client runs install_func when check_exe is absent.""" - mock_subp.which.return_value = None # check_exe not found. - install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, - packages=['ntpx'], check_exe='ntpdx') - mock_subp.which.assert_called_with('ntpdx') - install_func.assert_called_once_with(['ntpx']) - - @mock.patch("cloudinit.config.cc_ntp.subp") - def test_ntp_install_not_needed(self, mock_subp): - """ntp_install_client doesn't install when check_exe is found.""" - client = 'chrony' - mock_subp.which.return_value = [client] # check_exe found. - install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, packages=[client], - check_exe=client) - install_func.assert_not_called() - - @mock.patch("cloudinit.config.cc_ntp.subp") - def test_ntp_install_no_op_with_empty_pkg_list(self, mock_subp): - """ntp_install_client runs install_func with empty list""" - mock_subp.which.return_value = None # check_exe not found - install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, packages=[], - check_exe='timesyncd') - install_func.assert_called_once_with([]) - - def test_ntp_rename_ntp_conf(self): - """When NTP_CONF exists, rename_ntp moves it.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) - util.write_file(ntpconf, "") - cc_ntp.rename_ntp_conf(confpath=ntpconf) - self.assertFalse(os.path.exists(ntpconf)) - self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) - - def test_ntp_rename_ntp_conf_skip_missing(self): - """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) - self.assertFalse(os.path.exists(ntpconf)) - cc_ntp.rename_ntp_conf(confpath=ntpconf) - self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) - self.assertFalse(os.path.exists(ntpconf)) - - def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): - """write_ntp_config_template reads from $client.conf.distro.tmpl""" - servers = [] - pools = ['10.0.0.1', '10.0.0.2'] - (confpath, template_fn) = self._generate_template() - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template('ubuntu', - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) - self.assertEqual( - "servers []\npools ['10.0.0.1', '10.0.0.2']\n", - util.load_file(confpath)) - - def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): - """write_ntp_config_template defaults pools servers upon empty config. - - When both pools and servers are empty, default NR_POOL_SERVERS get - configured. - """ - distro = 'ubuntu' - pools = cc_ntp.generate_server_names(distro) - servers = [] - (confpath, template_fn) = self._generate_template() - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template(distro, - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) - self.assertEqual( - "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) - - def test_defaults_pools_empty_lists_sles(self): - """write_ntp_config_template defaults opensuse pools upon empty config. - - When both pools and servers are empty, default NR_POOL_SERVERS get - configured. - """ - distro = 'sles' - default_pools = cc_ntp.generate_server_names(distro) - (confpath, template_fn) = self._generate_template() - - cc_ntp.write_ntp_config_template(distro, - servers=[], pools=[], - path=confpath, - template_fn=template_fn, - template=None) - for pool in default_pools: - self.assertIn('opensuse', pool) - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - util.load_file(confpath)) - self.assertIn( - "Adding distro default ntp pool servers: {0}".format( - ",".join(default_pools)), - self.logs.getvalue()) - - def test_timesyncd_template(self): - """Test timesycnd template is correct""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - (confpath, template_fn) = self._generate_template( - template=TIMESYNCD_TEMPLATE) - cc_ntp.write_ntp_config_template('ubuntu', - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) - self.assertEqual( - "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), - util.load_file(confpath)) - - def test_distro_ntp_client_configs(self): - """Test we have updated ntp client configs on different distros""" - delta = copy.deepcopy(cc_ntp.DISTRO_CLIENT_CONFIG) - base = copy.deepcopy(cc_ntp.NTP_CLIENT_CONFIG) - # confirm no-delta distros match the base config - for distro in cc_ntp.distros: - if distro not in delta: - result = cc_ntp.distro_ntp_client_configs(distro) - self.assertEqual(base, result) - # for distros with delta, ensure the merged config values match - # what is set in the delta - for distro in delta.keys(): - result = cc_ntp.distro_ntp_client_configs(distro) - for client in delta[distro].keys(): - for key in delta[distro][client].keys(): - self.assertEqual(delta[distro][client][key], - result[client][key]) - - def _get_expected_pools(self, pools, distro, client): - if client in ['ntp', 'chrony']: - if client == 'ntp' and distro == 'alpine': - # NTP for Alpine Linux is Busybox's ntp which does not - # support 'pool' lines in its configuration file. - expected_pools = [] - else: - expected_pools = [ - 'pool {0} iburst'.format(pool) for pool in pools] - elif client == 'systemd-timesyncd': - expected_pools = " ".join(pools) - - return expected_pools - - def _get_expected_servers(self, servers, distro, client): - if client in ['ntp', 'chrony']: - if client == 'ntp' and distro == 'alpine': - # NTP for Alpine Linux is Busybox's ntp which only supports - # 'server' lines without iburst option. - expected_servers = [ - 'server {0}'.format(srv) for srv in servers] - else: - expected_servers = [ - 'server {0} iburst'.format(srv) for srv in servers] - elif client == 'systemd-timesyncd': - expected_servers = " ".join(servers) - - return expected_servers - - def test_ntp_handler_real_distro_ntp_templates(self): - """Test ntp handler renders the shipped distro ntp client templates.""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - for client in ['ntp', 'systemd-timesyncd', 'chrony']: - for distro in cc_ntp.distros: - distro_cfg = cc_ntp.distro_ntp_client_configs(distro) - ntpclient = distro_cfg[client] - confpath = ( - os.path.join(self.new_root, ntpclient.get('confpath')[1:])) - template = ntpclient.get('template_name') - # find sourcetree template file - root_dir = ( - dirname(dirname(os.path.realpath(util.__file__))) + - '/templates') - source_fn = self._get_template_path(template, distro, - basepath=root_dir) - template_fn = self._get_template_path(template, distro) - # don't fail if cloud-init doesn't have a template for - # a distro,client pair - if not os.path.exists(source_fn): - continue - # Create a copy in our tmp_dir - shutil.copy(source_fn, template_fn) - cc_ntp.write_ntp_config_template(distro, servers=servers, - pools=pools, path=confpath, - template_fn=template_fn) - content = util.load_file(confpath) - if client in ['ntp', 'chrony']: - content_lines = content.splitlines() - expected_servers = self._get_expected_servers(servers, - distro, - client) - print('distro=%s client=%s' % (distro, client)) - for sline in expected_servers: - self.assertIn(sline, content_lines, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, - distro))) - expected_pools = self._get_expected_pools(pools, distro, - client) - if expected_pools != []: - for pline in expected_pools: - self.assertIn(pline, content_lines, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, - distro))) - elif client == 'systemd-timesyncd': - expected_servers = self._get_expected_servers(servers, - distro, - client) - expected_pools = self._get_expected_pools(pools, - distro, - client) - expected_content = ( - "# cloud-init generated file\n" + - "# See timesyncd.conf(5) for details.\n\n" + - "[Time]\nNTP=%s %s \n" % (expected_servers, - expected_pools)) - self.assertEqual(expected_content, content) - - def test_no_ntpcfg_does_nothing(self): - """When no ntp section is defined handler logs a warning and noops.""" - cc_ntp.handle('cc_ntp', {}, None, None, []) - self.assertEqual( - 'DEBUG: Skipping module named cc_ntp, ' - 'not present or disabled by cfg\n', - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_allows_empty_ntp_config(self, - m_select): - """Ntp schema validation allows for an empty ntp: configuration.""" - valid_empty_configs = [{'ntp': {}}, {'ntp': None}] - for valid_empty_config in valid_empty_configs: - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) - if distro == 'alpine': - # _mock_ntp_client_config call above did not specify a - # client value and so it defaults to "ntp" which on - # Alpine Linux only supports servers and not pools. - - servers = cc_ntp.generate_server_names(mycloud.distro.name) - self.assertEqual( - "servers {0}\npools []\n".format(servers), - util.load_file(confpath)) - else: - pools = cc_ntp.generate_server_names(mycloud.distro.name) - self.assertEqual( - "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_non_string_item_type(self, - m_sel): - """Ntp schema validation warns of non-strings in pools or servers. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_sel.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" - "ntp.servers.1: None is not of type 'string'", - self.logs.getvalue()) - self.assertEqual("servers ['valid', None]\npools [123]\n", - util.load_file(confpath)) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_of_non_array_type(self, - m_select): - """Ntp schema validation warns of non-array pools or servers types. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} - - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp.pools: 123 is not of type 'array'\n" - "ntp.servers: 'non-array' is not of type 'array'", - self.logs.getvalue()) - self.assertEqual("servers non-array\npools 123\n", - util.load_file(confpath)) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_invalid_key_present(self, - m_select): - """Ntp schema validation warns of invalid keys present in ntp config. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = { - 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} - for distro in cc_ntp.distros: - if distro != 'alpine': - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp: Additional properties are not " - "allowed ('invalidkey' was unexpected)", - self.logs.getvalue()) - self.assertEqual( - "servers []\npools ['0.mycompany.pool.ntp.org']\n", - util.load_file(confpath)) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): - """Ntp schema validation warns of duplicates in servers or pools. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = { - 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], - 'servers': ['10.0.0.1', '10.0.0.1']}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" - " has non-unique elements\nntp.servers: " - "['10.0.0.1', '10.0.0.1'] has non-unique elements", - self.logs.getvalue()) - self.assertEqual( - "servers ['10.0.0.1', '10.0.0.1']\n" - "pools ['0.mypool.org', '0.mypool.org']\n", - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_timesyncd(self, m_select): - """Test ntp handler configures timesyncd""" - servers = ['192.168.2.1', '192.168.2.2'] - pools = ['0.mypool.org'] - cfg = {'ntp': {'servers': servers, 'pools': pools}} - client = 'systemd-timesyncd' - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro, - client=client) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) - self.assertEqual( - "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_enabled_false(self, m_select): - """Test ntp handler does not run if enabled: false """ - cfg = {'ntp': {'enabled': False}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - self.assertEqual(0, m_select.call_count) - - @mock.patch("cloudinit.distros.subp") - @mock.patch("cloudinit.config.cc_ntp.subp") - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - @mock.patch("cloudinit.distros.Distro.uses_systemd") - def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp): - """Test enabled config renders template, and restarts service """ - cfg = {'ntp': {'enabled': True}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - service_name = ntpconfig['service_name'] - m_select.return_value = ntpconfig - - hosts = cc_ntp.generate_server_names(mycloud.distro.name) - uses_systemd = True - expected_service_call = ['systemctl', 'reload-or-restart', - service_name] - expected_content = "servers []\npools {0}\n".format(hosts) - - if distro == 'alpine': - uses_systemd = False - expected_service_call = ['rc-service', service_name, 'restart'] - # _mock_ntp_client_config call above did not specify a client - # value and so it defaults to "ntp" which on Alpine Linux only - # supports servers and not pools. - expected_content = "servers {0}\npools []\n".format(hosts) - - m_sysd.return_value = uses_systemd - with mock.patch('cloudinit.config.cc_ntp.util') as m_util: - # allow use of util.mergemanydict - m_util.mergemanydict.side_effect = util.mergemanydict - # default client is present - m_subp.which.return_value = True - # use the config 'enabled' value - m_util.is_false.return_value = util.is_false( - cfg['ntp']['enabled']) - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - m_dsubp.subp.assert_called_with( - expected_service_call, capture=True) - - self.assertEqual(expected_content, util.load_file(confpath)) - - @mock.patch('cloudinit.util.system_info') - def test_opensuse_picks_chrony(self, m_sysinfo): - """Test opensuse picks chrony or ntp on certain distro versions""" - # < 15.0 => ntp - m_sysinfo.return_value = { - 'dist': ('openSUSE', '13.2', 'Harlequin') - } - mycloud = self._get_cloud('opensuse') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('ntp', expected_client) - - # >= 15.0 and not openSUSE => chrony - m_sysinfo.return_value = { - 'dist': ('SLES', '15.0', 'SUSE Linux Enterprise Server 15') - } - mycloud = self._get_cloud('sles') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('chrony', expected_client) - - # >= 15.0 and openSUSE and ver != 42 => chrony - m_sysinfo.return_value = { - 'dist': ('openSUSE Tumbleweed', '20180326', 'timbleweed') - } - mycloud = self._get_cloud('opensuse') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('chrony', expected_client) - - @mock.patch('cloudinit.util.system_info') - def test_ubuntu_xenial_picks_ntp(self, m_sysinfo): - """Test Ubuntu picks ntp on xenial release""" - - m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')} - mycloud = self._get_cloud('ubuntu') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('ntp', expected_client) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_snappy_system_picks_timesyncd(self, m_which): - """Test snappy systems prefer installed clients""" - - # we are on ubuntu-core here - self.m_snappy.return_value = True - - # ubuntu core systems will have timesyncd installed - m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd', - None, None, None]) - distro = 'ubuntu' - mycloud = self._get_cloud(distro) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_client = 'systemd-timesyncd' - expected_cfg = distro_configs[expected_client] - expected_calls = [] - # we only get to timesyncd - for client in mycloud.distro.preferred_ntp_clients[0:2]: - cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) - result = cc_ntp.select_ntp_client(None, mycloud.distro) - m_which.assert_has_calls(expected_calls) - self.assertEqual(sorted(expected_cfg), sorted(cfg)) - self.assertEqual(sorted(expected_cfg), sorted(result)) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_distro_searches_all_preferred_clients(self, m_which): - """Test select_ntp_client search all distro perferred clients """ - # nothing is installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_client = mycloud.distro.preferred_ntp_clients[0] - expected_cfg = distro_configs[expected_client] - expected_calls = [] - for client in mycloud.distro.preferred_ntp_clients: - cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) - cc_ntp.select_ntp_client({}, mycloud.distro) - m_which.assert_has_calls(expected_calls) - self.assertEqual(sorted(expected_cfg), sorted(cfg)) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which): - """Test user_cfg.ntp_client='auto' defaults to distro search""" - # nothing is installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_client = mycloud.distro.preferred_ntp_clients[0] - expected_cfg = distro_configs[expected_client] - expected_calls = [] - for client in mycloud.distro.preferred_ntp_clients: - cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) - cc_ntp.select_ntp_client('auto', mycloud.distro) - m_which.assert_has_calls(expected_calls) - self.assertEqual(sorted(expected_cfg), sorted(cfg)) - - @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template') - @mock.patch('cloudinit.cloud.Cloud.get_template_filename') - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_custom_client_overrides_installed_clients(self, m_which, - m_tmpfn, m_write): - """Test user client is installed despite other clients present """ - client = 'ntpdate' - cfg = {'ntp': {'ntp_client': client}} - for distro in cc_ntp.distros: - # client is not installed - m_which.side_effect = iter([None]) - mycloud = self._get_cloud(distro) - with mock.patch.object(mycloud.distro, - 'install_packages') as m_install: - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - m_install.assert_called_with([client]) - m_which.assert_called_with(client) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which): - """Test distro system_config overrides builtin preferred ntp clients""" - system_client = 'chrony' - sys_cfg = {'ntp_client': system_client} - # no clients installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_cfg = distro_configs[system_client] - result = cc_ntp.select_ntp_client(None, mycloud.distro) - self.assertEqual(sorted(expected_cfg), sorted(result)) - m_which.assert_has_calls([]) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_user_config_overrides_system_cfg(self, m_which): - """Test user-data overrides system_config ntp_client""" - system_client = 'chrony' - sys_cfg = {'ntp_client': system_client} - user_client = 'systemd-timesyncd' - # no clients installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_cfg = distro_configs[user_client] - result = cc_ntp.select_ntp_client(user_client, mycloud.distro) - self.assertEqual(sorted(expected_cfg), sorted(result)) - m_which.assert_has_calls([]) - - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') - def test_ntp_user_provided_config_with_template(self, m_install): - custom = r'\n#MyCustomTemplate' - user_template = NTP_TEMPLATE + custom - confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') - cfg = { - 'ntp': { - 'pools': ['mypool.org'], - 'ntp_client': 'myntpd', - 'config': { - 'check_exe': 'myntpd', - 'confpath': confpath, - 'packages': ['myntp'], - 'service_name': 'myntp', - 'template': user_template, - } - } - } - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - self.assertEqual( - "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_user_provided_config_template_only(self, m_select, m_install, - m_schema): - """Test custom template for default client""" - custom = r'\n#MyCustomTemplate' - user_template = NTP_TEMPLATE + custom - client = 'chrony' - cfg = { - 'pools': ['mypool.org'], - 'ntp_client': client, - 'config': { - 'template': user_template, - } - } - expected_merged_cfg = { - 'check_exe': 'chronyd', - 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), - 'template_name': 'client.conf', 'template': user_template, - 'service_name': 'chrony', 'packages': ['chrony']} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(client=client, - distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.handle('notimportant', - {'ntp': cfg}, mycloud, None, None) - self.assertEqual( - "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath)) - m_schema.assert_called_with(expected_merged_cfg) - - -class TestSupplementalSchemaValidation(CiTestCase): - - def test_error_on_missing_keys(self): - """ValueError raised reporting any missing required ntp:config keys""" - cfg = {} - match = (r'Invalid ntp configuration:\\nMissing required ntp:config' - ' keys: check_exe, confpath, packages, service_name') - with self.assertRaisesRegex(ValueError, match): - cc_ntp.supplemental_schema_validation(cfg) - - def test_error_requiring_either_template_or_template_name(self): - """ValueError raised if both template not template_name are None.""" - cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', - 'template': None, 'template_name': None, 'packages': []} - match = (r'Invalid ntp configuration:\\nEither ntp:config:template' - ' or ntp:config:template_name values are required') - with self.assertRaisesRegex(ValueError, match): - cc_ntp.supplemental_schema_validation(cfg) - - def test_error_on_non_list_values(self): - """ValueError raised when packages is not of type list.""" - cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', - 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} - match = (r'Invalid ntp configuration:\\nExpected a list of required' - ' package names for ntp:config:packages. Found \\(NOPE\\)') - with self.assertRaisesRegex(ValueError, match): - cc_ntp.supplemental_schema_validation(cfg) - - def test_error_on_non_string_values(self): - """ValueError raised for any values expected as string type.""" - cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3, - 'template': 4, 'template_name': 5, 'packages': []} - errors = [ - 'Expected a config file path ntp:config:confpath. Found (1)', - 'Expected a string type for ntp:config:check_exe. Found (2)', - 'Expected a string type for ntp:config:service_name. Found (3)', - 'Expected a string type for ntp:config:template. Found (4)', - 'Expected a string type for ntp:config:template_name. Found (5)'] - with self.assertRaises(ValueError) as context_mgr: - cc_ntp.supplemental_schema_validation(cfg) - error_msg = str(context_mgr.exception) - for error in errors: - self.assertIn(error, error_msg) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py deleted file mode 100644 index 4ac49424..00000000 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ /dev/null @@ -1,159 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import sys - -from cloudinit.config import cc_power_state_change as psc - -from cloudinit import distros -from cloudinit import helpers - -from cloudinit.tests import helpers as t_help -from cloudinit.tests.helpers import mock - - -class TestLoadPowerState(t_help.TestCase): - def setUp(self): - super(TestLoadPowerState, self).setUp() - cls = distros.fetch('ubuntu') - paths = helpers.Paths({}) - self.dist = cls('ubuntu', {}, paths) - - def test_no_config(self): - # completely empty config should mean do nothing - (cmd, _timeout, _condition) = psc.load_power_state({}, self.dist) - self.assertIsNone(cmd) - - def test_irrelevant_config(self): - # no power_state field in config should return None for cmd - (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}, - self.dist) - self.assertIsNone(cmd) - - def test_invalid_mode(self): - - cfg = {'power_state': {'mode': 'gibberish'}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - cfg = {'power_state': {'mode': ''}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_empty_mode(self): - cfg = {'power_state': {'message': 'goodbye'}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_valid_modes(self): - cfg = {'power_state': {}} - for mode in ('halt', 'poweroff', 'reboot'): - cfg['power_state']['mode'] = mode - check_lps_ret(psc.load_power_state(cfg, self.dist), mode=mode) - - def test_invalid_delay(self): - cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_valid_delay(self): - cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} - for delay in ("now", "+1", "+30"): - cfg['power_state']['delay'] = delay - check_lps_ret(psc.load_power_state(cfg, self.dist)) - - def test_message_present(self): - cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}} - ret = psc.load_power_state(cfg, self.dist) - check_lps_ret(psc.load_power_state(cfg, self.dist)) - self.assertIn(cfg['power_state']['message'], ret[0]) - - def test_no_message(self): - # if message is not present, then no argument should be passed for it - cfg = {'power_state': {'mode': 'poweroff'}} - (cmd, _timeout, _condition) = psc.load_power_state(cfg, self.dist) - self.assertNotIn("", cmd) - check_lps_ret(psc.load_power_state(cfg, self.dist)) - self.assertTrue(len(cmd) == 3) - - def test_condition_null_raises(self): - cfg = {'power_state': {'mode': 'poweroff', 'condition': None}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_condition_default_is_true(self): - cfg = {'power_state': {'mode': 'poweroff'}} - _cmd, _timeout, cond = psc.load_power_state(cfg, self.dist) - self.assertEqual(cond, True) - - def test_freebsd_poweroff_uses_lowercase_p(self): - cls = distros.fetch('freebsd') - paths = helpers.Paths({}) - freebsd = cls('freebsd', {}, paths) - cfg = {'power_state': {'mode': 'poweroff'}} - ret = psc.load_power_state(cfg, freebsd) - self.assertIn('-p', ret[0]) - - def test_alpine_delay(self): - # alpine takes delay in seconds. - cls = distros.fetch('alpine') - paths = helpers.Paths({}) - alpine = cls('alpine', {}, paths) - cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} - for delay, value in (('now', 0), ("+1", 60), ("+30", 1800)): - cfg['power_state']['delay'] = delay - ret = psc.load_power_state(cfg, alpine) - self.assertEqual('-d', ret[0][1]) - self.assertEqual(str(value), ret[0][2]) - - -class TestCheckCondition(t_help.TestCase): - def cmd_with_exit(self, rc): - return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc]) - - def test_true_is_true(self): - self.assertEqual(psc.check_condition(True), True) - - def test_false_is_false(self): - self.assertEqual(psc.check_condition(False), False) - - def test_cmd_exit_zero_true(self): - self.assertEqual(psc.check_condition(self.cmd_with_exit(0)), True) - - def test_cmd_exit_one_false(self): - self.assertEqual(psc.check_condition(self.cmd_with_exit(1)), False) - - def test_cmd_exit_nonzero_warns(self): - mocklog = mock.Mock() - self.assertEqual( - psc.check_condition(self.cmd_with_exit(2), mocklog), False) - self.assertEqual(mocklog.warning.call_count, 1) - - -def check_lps_ret(psc_return, mode=None): - if len(psc_return) != 3: - raise TypeError("length returned = %d" % len(psc_return)) - - errs = [] - cmd = psc_return[0] - timeout = psc_return[1] - condition = psc_return[2] - - if 'shutdown' not in psc_return[0][0]: - errs.append("string 'shutdown' not in cmd") - - if condition is None: - errs.append("condition was not returned") - - if mode is not None: - opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode] - if opt not in psc_return[0]: - errs.append("opt '%s' not in cmd: %s" % (opt, cmd)) - - if len(cmd) != 3 and len(cmd) != 4: - errs.append("Invalid command length: %s" % len(cmd)) - - try: - float(timeout) - except Exception: - errs.append("timeout failed convert to float") - - if len(errs): - lines = ["Errors in result: %s" % str(psc_return)] + errs - raise Exception('\n'.join(lines)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py deleted file mode 100644 index 8d99f535..00000000 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ /dev/null @@ -1,380 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import textwrap - -from cloudinit.config import cc_puppet -from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -@mock.patch('cloudinit.config.cc_puppet.subp.subp') -@mock.patch('cloudinit.config.cc_puppet.os') -class TestAutostartPuppet(CiTestCase): - - def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp): - """Update /etc/default/puppet to autostart if it exists.""" - - def _fake_exists(path): - return path == '/etc/default/puppet' - - m_os.path.exists.side_effect = _fake_exists - cc_puppet._autostart_puppet(LOG) - self.assertEqual( - [mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False)], - m_subp.call_args_list) - - def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp): - """If systemctl is present, enable puppet via systemctl.""" - - def _fake_exists(path): - return path == '/bin/systemctl' - - m_os.path.exists.side_effect = _fake_exists - cc_puppet._autostart_puppet(LOG) - expected_calls = [mock.call( - ['/bin/systemctl', 'enable', 'puppet.service'], capture=False)] - self.assertEqual(expected_calls, m_subp.call_args_list) - - def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp): - """If chkconfig is present, enable puppet via checkcfg.""" - - def _fake_exists(path): - return path == '/sbin/chkconfig' - - m_os.path.exists.side_effect = _fake_exists - cc_puppet._autostart_puppet(LOG) - expected_calls = [mock.call( - ['/sbin/chkconfig', 'puppet', 'on'], capture=False)] - self.assertEqual(expected_calls, m_subp.call_args_list) - - -@mock.patch('cloudinit.config.cc_puppet._autostart_puppet') -class TestPuppetHandle(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestPuppetHandle, self).setUp() - self.new_root = self.tmp_dir() - self.conf = self.tmp_path('puppet.conf') - self.csr_attributes_path = self.tmp_path( - 'csr_attributes.yaml') - self.cloud = get_cloud() - - def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto): - """Cloud-config containing no 'puppet' key is skipped.""" - - cfg = {} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertIn( - "no 'puppet' configuration found", self.logs.getvalue()) - self.assertEqual(0, m_auto.call_count) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_starts_puppet_service(self, m_subp, m_auto): - """Cloud-config 'puppet' configuration starts puppet.""" - - cfg = {'puppet': {'install': False}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto): - """Cloud-config empty 'puppet' configuration installs latest puppet.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual( - [mock.call(('puppet', None))], - self.cloud.distro.install_packages.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_on_true(self, m_subp, _): - """Cloud-config with 'puppet' key installs when 'install' is True.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual( - [mock.call(('puppet', None))], - self.cloud.distro.install_packages.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio'.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_version(self, - m_subp, m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and 'version' is specified.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'version': '6.24.0', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - '6.24.0', None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_collection(self, - m_subp, - m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and 'collection' is specified.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'collection': 'puppet6', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, 'puppet6', True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_custom_url(self, - m_subp, - m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and 'aio_install_url' is specified.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': - {'install': True, - 'aio_install_url': 'http://test.url/path/to/script.sh', - 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - 'http://test.url/path/to/script.sh', None, None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_without_cleanup(self, - m_subp, - m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and no cleanup.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'cleanup': False, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, None, False) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_version(self, m_subp, _): - """Cloud-config 'puppet' configuration can specify a version.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'version': '3.8'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual( - [mock.call(('puppet', '3.8'))], - self.cloud.distro.install_packages.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.get_config_value') - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_updates_puppet_conf(self, - m_subp, m_default, m_auto): - """When 'conf' is provided update values in PUPPET_CONF_PATH.""" - - def _fake_get_config_value(puppet_bin, setting): - return self.conf - - m_default.side_effect = _fake_get_config_value - - cfg = { - 'puppet': { - 'conf': {'agent': {'server': 'puppetserver.example.org'}}}} - util.write_file( - self.conf, '[agent]\nserver = origpuppet\nother = 3') - self.cloud.distro = mock.MagicMock() - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - content = util.load_file(self.conf) - expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n' - self.assertEqual(expected, content) - - @mock.patch('cloudinit.config.cc_puppet.get_config_value') - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_puppet_writes_csr_attributes_file(self, - m_subp, m_default, m_auto): - """When csr_attributes is provided - creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" - - def _fake_get_config_value(puppet_bin, setting): - return self.csr_attributes_path - - m_default.side_effect = _fake_get_config_value - - self.cloud.distro = mock.MagicMock() - cfg = { - 'puppet': { - 'csr_attributes': { - 'custom_attributes': { - '1.2.840.113549.1.9.7': - '342thbjkt82094y0uthhor289jnqthpc2290' - }, - 'extension_requests': { - 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E', - 'pp_image_name': 'my_ami_image', - 'pp_preshared_key': - '342thbjkt82094y0uthhor289jnqthpc2290' - } - } - } - } - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - content = util.load_file(self.csr_attributes_path) - expected = textwrap.dedent("""\ - custom_attributes: - 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 - extension_requests: - pp_image_name: my_ami_image - pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 - pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E - """) - self.assertEqual(expected, content) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto): - """Run puppet with default args if 'exec' is set to True.""" - - cfg = {'puppet': {'exec': True}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call(['puppet', 'agent', '--test'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_starts_puppetd(self, m_subp, m_auto): - """Run puppet with default args if 'exec' is set to True.""" - - cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_skips_puppetd(self, m_subp, m_auto): - """Run puppet with default args if 'exec' is set to True.""" - - cfg = {'puppet': {'start_service': False}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(0, m_auto.call_count) - self.assertNotIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_with_args_list_if_requested(self, - m_subp, m_auto): - """Run puppet with 'exec_args' list if 'exec' is set to True.""" - - cfg = {'puppet': {'exec': True, 'exec_args': [ - '--onetime', '--detailed-exitcodes']}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call( - ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], - capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_with_args_string_if_requested(self, - m_subp, m_auto): - """Run puppet with 'exec_args' string if 'exec' is set to True.""" - - cfg = {'puppet': {'exec': True, - 'exec_args': '--onetime --detailed-exitcodes'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call( - ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], - capture=False)], - m_subp.call_args_list) - - -URL_MOCK = mock.Mock() -URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"' - - -@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=(None, None)) -@mock.patch( - 'cloudinit.config.cc_puppet.url_helper.readurl', - return_value=URL_MOCK, autospec=True, -) -class TestInstallPuppetAio(HttprettyTestCase): - def test_install_with_default_arguments(self, m_readurl, m_subp): - """Install AIO with no arguments""" - cc_puppet.install_puppet_aio() - - self.assertEqual( - [mock.call([mock.ANY, '--cleanup'], capture=False)], - m_subp.call_args_list) - - def test_install_with_custom_url(self, m_readurl, m_subp): - """Install AIO from custom URL""" - cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh') - m_readurl.assert_called_with( - url='http://custom.url/path/to/script.sh', - retries=5) - - self.assertEqual( - [mock.call([mock.ANY, '--cleanup'], capture=False)], - m_subp.call_args_list) - - def test_install_with_version(self, m_readurl, m_subp): - """Install AIO with specific version""" - cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0') - - self.assertEqual( - [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)], - m_subp.call_args_list) - - def test_install_with_collection(self, m_readurl, m_subp): - """Install AIO with specific collection""" - cc_puppet.install_puppet_aio( - cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly') - - self.assertEqual( - [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'], - capture=False)], - m_subp.call_args_list) - - def test_install_with_no_cleanup(self, m_readurl, m_subp): - """Install AIO with no cleanup""" - cc_puppet.install_puppet_aio( - cc_puppet.AIO_INSTALL_URL, None, None, False) - - self.assertEqual( - [mock.call([mock.ANY], capture=False)], - m_subp.call_args_list) diff --git a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py b/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py deleted file mode 100644 index e13b7793..00000000 --- a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py +++ /dev/null @@ -1,109 +0,0 @@ -from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci - -from cloudinit import util - -from cloudinit.tests import helpers as t_help -from cloudinit.tests.helpers import mock - -from textwrap import dedent -import logging - -LOG = logging.getLogger(__name__) -MPATH = "cloudinit.config.cc_refresh_rmc_and_interface" -NET_INFO = { - 'lo': {'ipv4': [{'ip': '127.0.0.1', - 'bcast': '', 'mask': '255.0.0.0', - 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', - 'scope6': 'host'}], 'hwaddr': '', - 'up': 'True'}, - 'env2': {'ipv4': [{'ip': '8.0.0.19', - 'bcast': '8.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20', - 'up': 'True'}, - 'env3': {'ipv4': [{'ip': '90.0.0.14', - 'bcast': '90.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21', - 'up': 'True'}, - 'env4': {'ipv4': [{'ip': '9.114.23.7', - 'bcast': '9.114.23.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22', - 'up': 'True'}, - 'env5': {'ipv4': [], - 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64', - 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c', - 'up': 'True'}} - - -class TestRsctNodeFile(t_help.CiTestCase): - def test_disable_ipv6_interface(self): - """test parsing of iface files.""" - fname = self.tmp_path("iface-eth5") - util.write_file(fname, dedent("""\ - BOOTPROTO=static - DEVICE=eth5 - HWADDR=42:20:86:df:fa:4c - IPV6INIT=yes - IPADDR6=fe80::9c26:c3ff:fea4:62c8/64 - IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64 - NM_CONTROLLED=yes - ONBOOT=yes - STARTMODE=auto - TYPE=Ethernet - USERCTL=no - """)) - - ccrmci.disable_ipv6(fname) - self.assertEqual(dedent("""\ - BOOTPROTO=static - DEVICE=eth5 - HWADDR=42:20:86:df:fa:4c - ONBOOT=yes - STARTMODE=auto - TYPE=Ethernet - USERCTL=no - NM_CONTROLLED=no - """), util.load_file(fname)) - - @mock.patch(MPATH + '.refresh_rmc') - @mock.patch(MPATH + '.restart_network_manager') - @mock.patch(MPATH + '.disable_ipv6') - @mock.patch(MPATH + '.refresh_ipv6') - @mock.patch(MPATH + '.netinfo.netdev_info') - @mock.patch(MPATH + '.subp.which') - def test_handle(self, m_refresh_rmc, - m_netdev_info, m_refresh_ipv6, m_disable_ipv6, - m_restart_nm, m_which): - """Basic test of handle.""" - m_netdev_info.return_value = NET_INFO - m_which.return_value = '/opt/rsct/bin/rmcctrl' - ccrmci.handle( - "refresh_rmc_and_interface", None, None, None, None) - self.assertEqual(1, m_netdev_info.call_count) - m_refresh_ipv6.assert_called_with('env5') - m_disable_ipv6.assert_called_with( - '/etc/sysconfig/network-scripts/ifcfg-env5') - self.assertEqual(1, m_restart_nm.call_count) - self.assertEqual(1, m_refresh_rmc.call_count) - - @mock.patch(MPATH + '.netinfo.netdev_info') - def test_find_ipv6(self, m_netdev_info): - """find_ipv6_ifaces parses netdev_info returning those with ipv6""" - m_netdev_info.return_value = NET_INFO - found = ccrmci.find_ipv6_ifaces() - self.assertEqual(['env5'], found) - - @mock.patch(MPATH + '.subp.subp') - def test_refresh_ipv6(self, m_subp): - """refresh_ipv6 should ip down and up the interface.""" - iface = "myeth0" - ccrmci.refresh_ipv6(iface) - m_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', iface, 'down']), - mock.call(['ip', 'link', 'set', iface, 'up'])]) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py deleted file mode 100644 index 28d55072..00000000 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ /dev/null @@ -1,398 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config.cc_resizefs import ( - can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs, - _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs) - -from collections import namedtuple -import logging - -from cloudinit.subp import ProcessExecutionError -from cloudinit.tests.helpers import ( - CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call) - - -LOG = logging.getLogger(__name__) - - -class TestResizefs(CiTestCase): - with_logs = True - - def setUp(self): - super(TestResizefs, self).setUp() - self.name = "resizefs" - - @mock.patch('cloudinit.subp.subp') - def test_skip_ufs_resize(self, m_subp): - fs_type = "ufs" - resize_what = "/" - devpth = "/dev/da0p2" - err = ("growfs: requested size 2.0GB is not larger than the " - "current filesystem size 2.0GB\n") - exception = ProcessExecutionError(stderr=err, exit_code=1) - m_subp.side_effect = exception - res = can_skip_resize(fs_type, resize_what, devpth) - self.assertTrue(res) - - @mock.patch('cloudinit.subp.subp') - def test_cannot_skip_ufs_resize(self, m_subp): - fs_type = "ufs" - resize_what = "/" - devpth = "/dev/da0p2" - m_subp.return_value = ( - ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"), - ("growfs: no room to allocate last cylinder group; " - "leaving 364KB unused\n") - ) - res = can_skip_resize(fs_type, resize_what, devpth) - self.assertFalse(res) - - @mock.patch('cloudinit.subp.subp') - def test_cannot_skip_ufs_growfs_exception(self, m_subp): - fs_type = "ufs" - resize_what = "/" - devpth = "/dev/da0p2" - err = "growfs: /dev/da0p2 is not clean - run fsck.\n" - exception = ProcessExecutionError(stderr=err, exit_code=1) - m_subp.side_effect = exception - with self.assertRaises(ProcessExecutionError): - can_skip_resize(fs_type, resize_what, devpth) - - def test_can_skip_resize_ext(self): - self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1')) - - def test_handle_noops_on_disabled(self): - """The handle function logs when the configuration disables resize.""" - cfg = {'resize_rootfs': False} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - self.assertIn( - 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', - self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self): - """The handle reports json schema violations as a warning. - - Invalid values for resize_rootfs result in disabling the module. - """ - cfg = {'resize_rootfs': 'junk'} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - logs = self.logs.getvalue() - self.assertIn( - "WARNING: Invalid config:\nresize_rootfs: 'junk' is not one of" - " [True, False, 'noblock']", - logs) - self.assertIn( - 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', - logs) - - @mock.patch('cloudinit.config.cc_resizefs.util.get_mount_info') - def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info): - """handle warns when get_mount_info sees unknown filesystem for /.""" - m_get_mount_info.return_value = None - cfg = {'resize_rootfs': True} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - logs = self.logs.getvalue() - self.assertNotIn("WARNING: Invalid config:\nresize_rootfs:", logs) - self.assertIn( - 'WARNING: Could not determine filesystem type of /\n', - logs) - self.assertEqual( - [mock.call('/', LOG)], - m_get_mount_info.call_args_list) - - def test_handle_warns_on_undiscoverable_root_path_in_commandline(self): - """handle noops when the root path is not found on the commandline.""" - cfg = {'resize_rootfs': True} - exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' - - def fake_mount_info(path, log): - self.assertEqual('/', path) - self.assertEqual(LOG, log) - return ('/dev/root', 'ext4', '/') - - with mock.patch(exists_mock_path) as m_exists: - m_exists.return_value = False - wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}, - 'get_mount_info': {'side_effect': fake_mount_info}, - 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, - handle, 'cc_resizefs', cfg, _cloud=None, log=LOG, - args=[]) - logs = self.logs.getvalue() - self.assertIn("WARNING: Unable to find device '/dev/root'", logs) - - def test_resize_zfs_cmd_return(self): - zpool = 'zroot' - devpth = 'gpt/system' - self.assertEqual(('zpool', 'online', '-e', zpool, devpth), - _resize_zfs(zpool, devpth)) - - def test_resize_xfs_cmd_return(self): - mount_point = '/mnt/test' - devpth = '/dev/sda1' - self.assertEqual(('xfs_growfs', mount_point), - _resize_xfs(mount_point, devpth)) - - def test_resize_ext_cmd_return(self): - mount_point = '/' - devpth = '/dev/sdb1' - self.assertEqual(('resize2fs', devpth), - _resize_ext(mount_point, devpth)) - - def test_resize_ufs_cmd_return(self): - mount_point = '/' - devpth = '/dev/sda2' - self.assertEqual(('growfs', '-y', mount_point), - _resize_ufs(mount_point, devpth)) - - @mock.patch('cloudinit.util.is_container', return_value=False) - @mock.patch('cloudinit.util.parse_mount') - @mock.patch('cloudinit.util.get_device_info_from_zpool') - @mock.patch('cloudinit.util.get_mount_info') - def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount, - is_container): - devpth = 'vmzroot/ROOT/freebsd' - disk = 'gpt/system' - fs_type = 'zfs' - mount_point = '/' - - mount_info.return_value = (devpth, fs_type, mount_point) - zpool_info.return_value = disk - parse_mount.return_value = (devpth, fs_type, mount_point) - - cfg = {'resize_rootfs': True} - - with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - ret = dresize.call_args[0][0] - - self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret) - - @mock.patch('cloudinit.util.is_container', return_value=False) - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.get_device_info_from_zpool') - @mock.patch('cloudinit.util.parse_mount') - def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount, - is_container): - devpth = 'zroot/ROOT/default' - disk = 'da0p3' - fs_type = 'zfs' - mount_point = '/' - - mount_info.return_value = (devpth, fs_type, mount_point) - zpool_info.return_value = disk - parse_mount.return_value = (devpth, fs_type, mount_point) - - cfg = {'resize_rootfs': True} - - def fake_stat(devpath): - if devpath == disk: - raise OSError("not here") - FakeStat = namedtuple( - 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat - return FakeStat(25008, 0, 1) # fake char block device - - with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: - with mock.patch('cloudinit.config.cc_resizefs.os.stat') as m_stat: - m_stat.side_effect = fake_stat - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - - self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk), - dresize.call_args[0][0]) - - -class TestRootDevFromCmdline(CiTestCase): - - def test_rootdev_from_cmdline_with_no_root(self): - """Return None from rootdev_from_cmdline when root is not present.""" - invalid_cases = [ - 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', ''] - for case in invalid_cases: - self.assertIsNone(util.rootdev_from_cmdline(case)) - - def test_rootdev_from_cmdline_with_root_startswith_dev(self): - """Return the cmdline root when the path starts with /dev.""" - self.assertEqual( - '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this')) - - def test_rootdev_from_cmdline_with_root_without_dev_prefix(self): - """Add /dev prefix to cmdline root when the path lacks the prefix.""" - self.assertEqual( - '/dev/this', util.rootdev_from_cmdline('asdf root=this')) - - def test_rootdev_from_cmdline_with_root_with_label(self): - """When cmdline root contains a LABEL, our root is disk/by-label.""" - self.assertEqual( - '/dev/disk/by-label/unique', - util.rootdev_from_cmdline('asdf root=LABEL=unique')) - - def test_rootdev_from_cmdline_with_root_with_uuid(self): - """When cmdline root contains a UUID, our root is disk/by-uuid.""" - self.assertEqual( - '/dev/disk/by-uuid/adsfdsaf-adsf', - util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) - - -class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): - - with_logs = True - - def test_maybe_get_writable_device_path_none_on_overlayroot(self): - """When devpath is overlayroot (on MAAS), is_dev_writable is False.""" - info = 'does not matter' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, 'overlayroot', info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "Not attempting to resize devpath 'overlayroot'", - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self): - """When root does not exist isn't in the cmdline, log warning.""" - info = 'does not matter' - - def fake_mount_info(path, log): - self.assertEqual('/', path) - self.assertEqual(LOG, log) - return ('/dev/root', 'ext4', '/') - - exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' - with mock.patch(exists_mock_path) as m_exists: - m_exists.return_value = False - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}, - 'get_mount_info': {'side_effect': fake_mount_info}, - 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, - maybe_get_writable_device_path, '/dev/root', info, LOG) - self.assertIsNone(devpath) - logs = self.logs.getvalue() - self.assertIn("WARNING: Unable to find device '/dev/root'", logs) - - def test_maybe_get_writable_device_path_does_not_exist(self): - """When devpath does not exist, a warning is logged.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "WARNING: Device '/dev/I/dont/exist' did not exist." - ' cannot resize: %s' % info, - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_does_not_exist_in_container(self): - """When devpath does not exist in a container, log a debug message.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': True}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "DEBUG: Device '/dev/I/dont/exist' did not exist in container." - ' cannot resize: %s' % info, - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_raises_oserror(self): - """When unexpected OSError is raises by os.stat it is reraised.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' - with self.assertRaises(OSError) as context_manager: - wrap_and_call( - 'cloudinit.config.cc_resizefs', - {'util.is_container': {'return_value': True}, - 'os.stat': {'side_effect': OSError('Something unexpected')}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) - self.assertEqual( - 'Something unexpected', str(context_manager.exception)) - - def test_maybe_get_writable_device_path_non_block(self): - """When device is not a block device, emit warning return False.""" - fake_devpath = self.tmp_path('dev/readwrite') - util.write_file(fake_devpath, '', mode=0o600) # read-write - info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) - - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, fake_devpath, info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "WARNING: device '{0}' not a block device. cannot resize".format( - fake_devpath), - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_non_block_on_container(self): - """When device is non-block device in container, emit debug log.""" - fake_devpath = self.tmp_path('dev/readwrite') - util.write_file(fake_devpath, '', mode=0o600) # read-write - info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) - - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': True}}, - maybe_get_writable_device_path, fake_devpath, info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "DEBUG: device '{0}' not a block device in container." - ' cannot resize'.format(fake_devpath), - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_returns_cmdline_root(self): - """When root device is UUID in kernel commandline, update devpath.""" - # XXX Long-term we want to use FilesystemMocking test to avoid - # touching os.stat. - FakeStat = namedtuple( - 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def. - info = 'dev=/dev/root mnt_point=/ path=/does/not/matter' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs', - {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'}, - 'util.is_container': False, - 'os.path.exists': False, # /dev/root doesn't exist - 'os.stat': { - 'return_value': FakeStat(25008, 0, 1)} # char block device - }, - maybe_get_writable_device_path, '/dev/root', info, LOG) - self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath) - self.assertIn( - "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'" - " per kernel cmdline", - self.logs.getvalue()) - - @mock.patch('cloudinit.util.mount_is_read_write') - @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') - def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw): - """Do not resize / directly if it is read-only. (LP: #1734787).""" - m_is_rw.return_value = False - m_is_dir.return_value = True - self.assertEqual( - ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'), - _resize_btrfs("/", "/dev/sda1")) - - @mock.patch('cloudinit.util.mount_is_read_write') - @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') - def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw): - """Do not resize / directly if it is read-only. (LP: #1734787).""" - m_is_rw.return_value = True - m_is_dir.return_value = True - self.assertEqual( - ('btrfs', 'filesystem', 'resize', 'max', '/'), - _resize_btrfs("/", "/dev/sda1")) - - @mock.patch('cloudinit.util.is_container', return_value=True) - @mock.patch('cloudinit.util.is_FreeBSD') - def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd, - m_is_container): - freebsd.return_value = True - info = 'dev=gpt/system mnt_point=/ path=/' - devpth = maybe_get_writable_device_path('gpt/system', info, LOG) - self.assertEqual('gpt/system', devpth) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_resolv_conf.py b/tests/unittests/test_handler/test_handler_resolv_conf.py deleted file mode 100644 index 96139001..00000000 --- a/tests/unittests/test_handler/test_handler_resolv_conf.py +++ /dev/null @@ -1,105 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_resolv_conf - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util -from copy import deepcopy - -from cloudinit.tests import helpers as t_help - -import logging -import os -import shutil -import tempfile -from unittest import mock - -LOG = logging.getLogger(__name__) - - -class TestResolvConf(t_help.FilesystemMockingTestCase): - with_logs = True - cfg = {'manage_resolv_conf': True, 'resolv_conf': {}} - - def setUp(self): - super(TestResolvConf, self).setUp() - self.tmp = tempfile.mkdtemp() - util.ensure_dir(os.path.join(self.tmp, 'data')) - self.addCleanup(shutil.rmtree, self.tmp) - - def _fetch_distro(self, kind, conf=None): - cls = distros.fetch(kind) - paths = helpers.Paths({'cloud_dir': self.tmp}) - conf = {} if conf is None else conf - return cls(kind, conf, paths) - - def call_resolv_conf_handler(self, distro_name, conf, cc=None): - if not cc: - ds = None - distro = self._fetch_distro(distro_name, conf) - paths = helpers.Paths({'cloud_dir': self.tmp}) - cc = cloud.Cloud(ds, paths, {}, distro, None) - cc_resolv_conf.handle('cc_resolv_conf', conf, cc, LOG, []) - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_systemd_resolved(self, m_render_to_file): - self.call_resolv_conf_handler('photon', self.cfg) - - assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) - ] == m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_no_param(self, m_render_to_file): - tmp = deepcopy(self.cfg) - self.logs.truncate(0) - tmp.pop('resolv_conf') - self.call_resolv_conf_handler('photon', tmp) - - self.assertIn('manage_resolv_conf True but no parameters provided', - self.logs.getvalue()) - assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) - ] not in m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file): - tmp = deepcopy(self.cfg) - self.logs.truncate(0) - tmp['manage_resolv_conf'] = False - self.call_resolv_conf_handler('photon', tmp) - self.assertIn("'manage_resolv_conf' present but set to False", - self.logs.getvalue()) - assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) - ] not in m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_etc_resolv_conf(self, m_render_to_file): - self.call_resolv_conf_handler('rhel', self.cfg) - - assert [ - mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) - ] == m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file): - ds = None - distro = self._fetch_distro('rhel', self.cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) - cc = cloud.Cloud(ds, paths, {}, distro, None) - cc.distro.resolve_conf_fn = 'bla' - - self.logs.truncate(0) - self.call_resolv_conf_handler('rhel', self.cfg, cc) - - self.assertIn('No template found, not rendering resolve configs', - self.logs.getvalue()) - - assert [ - mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) - ] not in m_render_to_file.call_args_list - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py deleted file mode 100644 index 8c8e2838..00000000 --- a/tests/unittests/test_handler/test_handler_rsyslog.py +++ /dev/null @@ -1,178 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os -import shutil -import tempfile - -from cloudinit.config.cc_rsyslog import ( - apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config, - parse_remotes_line, remotes_to_rsyslog_cfg) -from cloudinit import util - -from cloudinit.tests import helpers as t_help - - -class TestLoadConfig(t_help.TestCase): - def setUp(self): - super(TestLoadConfig, self).setUp() - self.basecfg = { - 'config_filename': DEF_FILENAME, - 'config_dir': DEF_DIR, - 'service_reload_command': DEF_RELOAD, - 'configs': [], - 'remotes': {}, - } - - def test_legacy_full(self): - found = load_config({ - 'rsyslog': ['*.* @192.168.1.1'], - 'rsyslog_dir': "mydir", - 'rsyslog_filename': "myfilename"}) - self.basecfg.update({ - 'configs': ['*.* @192.168.1.1'], - 'config_dir': "mydir", - 'config_filename': 'myfilename', - 'service_reload_command': 'auto'} - ) - - self.assertEqual(found, self.basecfg) - - def test_legacy_defaults(self): - found = load_config({ - 'rsyslog': ['*.* @192.168.1.1']}) - self.basecfg.update({ - 'configs': ['*.* @192.168.1.1']}) - self.assertEqual(found, self.basecfg) - - def test_new_defaults(self): - self.assertEqual(load_config({}), self.basecfg) - - def test_new_configs(self): - cfgs = ['*.* myhost', '*.* my2host'] - self.basecfg.update({'configs': cfgs}) - self.assertEqual( - load_config({'rsyslog': {'configs': cfgs}}), - self.basecfg) - - -class TestApplyChanges(t_help.TestCase): - def setUp(self): - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_simple(self): - cfgline = "*.* foohost" - changed = apply_rsyslog_changes( - configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp) - - fname = os.path.join(self.tmp, "foo.cfg") - self.assertEqual([fname], changed) - self.assertEqual( - util.load_file(fname), cfgline + "\n") - - def test_multiple_files(self): - configs = [ - '*.* foohost', - {'content': 'abc', 'filename': 'my.cfg'}, - {'content': 'filefoo-content', - 'filename': os.path.join(self.tmp, 'mydir/mycfg')}, - ] - - changed = apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) - - expected = [ - (os.path.join(self.tmp, "default.cfg"), - "*.* foohost\n"), - (os.path.join(self.tmp, "my.cfg"), "abc\n"), - (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"), - ] - self.assertEqual([f[0] for f in expected], changed) - actual = [] - for fname, _content in expected: - util.load_file(fname) - actual.append((fname, util.load_file(fname),)) - self.assertEqual(expected, actual) - - def test_repeat_def(self): - configs = ['*.* foohost', "*.warn otherhost"] - - changed = apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) - - fname = os.path.join(self.tmp, "default.cfg") - self.assertEqual([fname], changed) - - expected_content = '\n'.join([c for c in configs]) + '\n' - found_content = util.load_file(fname) - self.assertEqual(expected_content, found_content) - - def test_multiline_content(self): - configs = ['line1', 'line2\nline3\n'] - - apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) - - fname = os.path.join(self.tmp, "default.cfg") - expected_content = '\n'.join([c for c in configs]) - found_content = util.load_file(fname) - self.assertEqual(expected_content, found_content) - - -class TestParseRemotesLine(t_help.TestCase): - def test_valid_port(self): - r = parse_remotes_line("foo:9") - self.assertEqual(9, r.port) - - def test_invalid_port(self): - with self.assertRaises(ValueError): - parse_remotes_line("*.* foo:abc") - - def test_valid_ipv6(self): - r = parse_remotes_line("*.* [::1]") - self.assertEqual("*.* @[::1]", str(r)) - - def test_valid_ipv6_with_port(self): - r = parse_remotes_line("*.* [::1]:100") - self.assertEqual(r.port, 100) - self.assertEqual(r.addr, "::1") - self.assertEqual("*.* @[::1]:100", str(r)) - - def test_invalid_multiple_colon(self): - with self.assertRaises(ValueError): - parse_remotes_line("*.* ::1:100") - - def test_name_in_string(self): - r = parse_remotes_line("syslog.host", name="foobar") - self.assertEqual("*.* @syslog.host # foobar", str(r)) - - -class TestRemotesToSyslog(t_help.TestCase): - def test_simple(self): - # str rendered line must appear in remotes_to_ryslog_cfg return - mycfg = "*.* myhost" - myline = str(parse_remotes_line(mycfg, name="myname")) - r = remotes_to_rsyslog_cfg({'myname': mycfg}) - lines = r.splitlines() - self.assertEqual(1, len(lines)) - self.assertTrue(myline in r.splitlines()) - - def test_header_footer(self): - header = "#foo head" - footer = "#foo foot" - r = remotes_to_rsyslog_cfg( - {'myname': "*.* myhost"}, header=header, footer=footer) - lines = r.splitlines() - self.assertTrue(header, lines[0]) - self.assertTrue(footer, lines[-1]) - - def test_with_empty_or_null(self): - mycfg = "*.* myhost" - myline = str(parse_remotes_line(mycfg, name="myname")) - r = remotes_to_rsyslog_cfg( - {'myname': mycfg, 'removed': None, 'removed2': ""}) - lines = r.splitlines() - self.assertEqual(1, len(lines)) - self.assertTrue(myline in r.splitlines()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py deleted file mode 100644 index 672e8093..00000000 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ /dev/null @@ -1,129 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import os -import stat -from unittest.mock import patch - -from cloudinit.config.cc_runcmd import handle, schema -from cloudinit import (helpers, subp, util) -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, - skipUnlessJsonSchema) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestRuncmd(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestRuncmd, self).setUp() - self.subp = subp.subp - self.new_root = self.tmp_dir() - self.patchUtils(self.new_root) - self.paths = helpers.Paths({'scripts': self.new_root}) - - def test_handler_skip_if_no_runcmd(self): - """When the provided config doesn't contain runcmd, skip it.""" - cfg = {} - mycloud = get_cloud(paths=self.paths) - handle('notimportant', cfg, mycloud, LOG, None) - self.assertIn( - "Skipping module named notimportant, no 'runcmd' key", - self.logs.getvalue()) - - @patch('cloudinit.util.shellify') - def test_runcmd_shellify_fails(self, cls): - """When shellify fails throw exception""" - cls.side_effect = TypeError("patched shellify") - valid_config = {'runcmd': ['echo 42']} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - with self.allow_subp(['/bin/sh']): - handle('cc_runcmd', valid_config, cc, LOG, None) - self.assertIn("Failed to shellify", str(cm.exception)) - - def test_handler_invalid_command_set(self): - """Commands which can't be converted to shell will raise errors.""" - invalid_config = {'runcmd': 1} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) - self.assertIn( - 'Failed to shellify 1 into file' - ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd', - str(cm.exception)) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_type(self): - """Schema validation warns of non-array type for runcmd key. - - Schema validation is not strict, so runcmd attempts to shellify the - invalid content. - """ - invalid_config = {'runcmd': 1} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) - self.assertIn( - 'Invalid config:\nruncmd: 1 is not of type \'array\'', - self.logs.getvalue()) - self.assertIn('Failed to shellify', str(cm.exception)) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_item_type(self): - """Schema validation warns of non-array or string runcmd items. - - Schema validation is not strict, so runcmd attempts to shellify the - invalid content. - """ - invalid_config = { - 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) - expected_warnings = [ - 'runcmd.1: 20 is not valid under any of the given schemas', - 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given' - ' schema' - ] - logs = self.logs.getvalue() - for warning in expected_warnings: - self.assertIn(warning, logs) - self.assertIn('Failed to shellify', str(cm.exception)) - - def test_handler_write_valid_runcmd_schema_to_file(self): - """Valid runcmd schema is written to a runcmd shell script.""" - valid_config = {'runcmd': [['ls', '/']]} - cc = get_cloud(paths=self.paths) - handle('cc_runcmd', valid_config, cc, LOG, []) - runcmd_file = os.path.join( - self.new_root, - 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd') - self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file)) - file_stat = os.stat(runcmd_file) - self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) - - -@skipUnlessJsonSchema() -class TestSchema(CiTestCase, SchemaTestCaseMixin): - """Directly test schema rather than through handle.""" - - schema = schema - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - [["echo", "bye"], ["echo", "bye"]], - "command entries can be duplicate.") - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - ["echo bye", "echo bye"], - "command entries can be duplicate.") - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py deleted file mode 100644 index 2ab153d2..00000000 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ /dev/null @@ -1,205 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Author: Juerg Haefliger -# -# Based on test_handler_set_hostname.py -# -# This file is part of cloud-init. See LICENSE file for license information. -import gzip -import logging -import tempfile -from io import BytesIO - -from cloudinit import subp -from cloudinit import util -from cloudinit.config import cc_seed_random -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestRandomSeed(t_help.TestCase): - def setUp(self): - super(TestRandomSeed, self).setUp() - self._seed_file = tempfile.mktemp() - self.unapply = [] - - # by default 'which' has nothing in its path - self.apply_patches([(subp, 'which', self._which)]) - self.apply_patches([(subp, 'subp', self._subp)]) - self.subp_called = [] - self.whichdata = {} - - def tearDown(self): - apply_patches([i for i in reversed(self.unapply)]) - util.del_file(self._seed_file) - - def apply_patches(self, patches): - ret = apply_patches(patches) - self.unapply += ret - - def _which(self, program): - return self.whichdata.get(program) - - def _subp(self, *args, **kwargs): - # supports subp calling with cmd as args or kwargs - if 'args' not in kwargs: - kwargs['args'] = args[0] - self.subp_called.append(kwargs) - return - - def _compress(self, text): - contents = BytesIO() - gz_fh = gzip.GzipFile(mode='wb', fileobj=contents) - gz_fh.write(text) - gz_fh.close() - return contents.getvalue() - - def test_append_random(self): - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': 'tiny-tim-was-here', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("tiny-tim-was-here", contents) - - def test_append_random_unknown_encoding(self): - data = self._compress(b"tiny-toe") - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'special_encoding', - } - } - self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg, - get_cloud('ubuntu'), LOG, []) - - def test_append_random_gzip(self): - data = self._compress(b"tiny-toe") - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'gzip', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("tiny-toe", contents) - - def test_append_random_gz(self): - data = self._compress(b"big-toe") - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'gz', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("big-toe", contents) - - def test_append_random_base64(self): - data = util.b64e('bubbles') - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'base64', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("bubbles", contents) - - def test_append_random_b64(self): - data = util.b64e('kit-kat') - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'b64', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("kit-kat", contents) - - def test_append_random_metadata(self): - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': 'tiny-tim-was-here', - } - } - c = get_cloud('ubuntu', metadata={'random_seed': '-so-was-josh'}) - cc_seed_random.handle('test', cfg, c, LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual('tiny-tim-was-here-so-was-josh', contents) - - def test_seed_command_provided_and_available(self): - c = get_cloud('ubuntu') - self.whichdata = {'pollinate': '/usr/bin/pollinate'} - cfg = {'random_seed': {'command': ['pollinate', '-q']}} - cc_seed_random.handle('test', cfg, c, LOG, []) - - subp_args = [f['args'] for f in self.subp_called] - self.assertIn(['pollinate', '-q'], subp_args) - - def test_seed_command_not_provided(self): - c = get_cloud('ubuntu') - self.whichdata = {} - cc_seed_random.handle('test', {}, c, LOG, []) - - # subp should not have been called as which would say not available - self.assertFalse(self.subp_called) - - def test_unavailable_seed_command_and_required_raises_error(self): - c = get_cloud('ubuntu') - self.whichdata = {} - cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'], - 'command_required': True}} - self.assertRaises(ValueError, cc_seed_random.handle, - 'test', cfg, c, LOG, []) - - def test_seed_command_and_required(self): - c = get_cloud('ubuntu') - self.whichdata = {'foo': 'foo'} - cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} - cc_seed_random.handle('test', cfg, c, LOG, []) - - self.assertIn(['foo'], [f['args'] for f in self.subp_called]) - - def test_file_in_environment_for_command(self): - c = get_cloud('ubuntu') - self.whichdata = {'foo': 'foo'} - cfg = {'random_seed': {'command_required': True, 'command': ['foo'], - 'file': self._seed_file}} - cc_seed_random.handle('test', cfg, c, LOG, []) - - # this just instists that the first time subp was called, - # RANDOM_SEED_FILE was in the environment set up correctly - subp_env = [f['env'] for f in self.subp_called] - self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file) - - -def apply_patches(patches): - ret = [] - for (ref, name, replace) in patches: - if replace is None: - continue - orig = getattr(ref, name) - setattr(ref, name, replace) - ret.append((ref, name, orig)) - return ret - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py deleted file mode 100644 index 1a524c7d..00000000 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ /dev/null @@ -1,207 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_set_hostname - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util - -from cloudinit.tests import helpers as t_help - -from configobj import ConfigObj -import logging -import os -import shutil -import tempfile -from io import BytesIO -from unittest import mock - -LOG = logging.getLogger(__name__) - - -class TestHostname(t_help.FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestHostname, self).setUp() - self.tmp = tempfile.mkdtemp() - util.ensure_dir(os.path.join(self.tmp, 'data')) - self.addCleanup(shutil.rmtree, self.tmp) - - def _fetch_distro(self, kind, conf=None): - cls = distros.fetch(kind) - paths = helpers.Paths({'cloud_dir': self.tmp}) - conf = {} if conf is None else conf - return cls(kind, conf, paths) - - def test_debian_write_hostname_prefer_fqdn(self): - cfg = { - 'hostname': 'blah', - 'prefer_fqdn_over_hostname': True, - 'fqdn': 'blah.yahoo.com', - } - distro = self._fetch_distro('debian', cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('blah.yahoo.com', contents.strip()) - - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) - def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd): - cfg = { - 'hostname': 'blah', - 'prefer_fqdn_over_hostname': False, - 'fqdn': 'blah.yahoo.com', - } - distro = self._fetch_distro('rhel', cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/sysconfig/network", decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual( - {'HOSTNAME': 'blah'}, - dict(n_cfg)) - - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) - def test_write_hostname_rhel(self, m_uses_systemd): - cfg = { - 'hostname': 'blah', - 'fqdn': 'blah.blah.blah.yahoo.com' - } - distro = self._fetch_distro('rhel') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/sysconfig/network", decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual( - {'HOSTNAME': 'blah.blah.blah.yahoo.com'}, - dict(n_cfg)) - - def test_write_hostname_debian(self): - cfg = { - 'hostname': 'blah', - 'fqdn': 'blah.blah.blah.yahoo.com', - } - distro = self._fetch_distro('debian') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('blah', contents.strip()) - - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) - def test_write_hostname_sles(self, m_uses_systemd): - cfg = { - 'hostname': 'blah.blah.blah.suse.com', - } - distro = self._fetch_distro('sles') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) - contents = util.load_file(distro.hostname_conf_fn) - self.assertEqual('blah', contents.strip()) - - @mock.patch('cloudinit.distros.photon.subp.subp') - def test_photon_hostname(self, m_subp): - cfg1 = { - 'hostname': 'photon', - 'prefer_fqdn_over_hostname': True, - 'fqdn': 'test1.vmware.com', - } - cfg2 = { - 'hostname': 'photon', - 'prefer_fqdn_over_hostname': False, - 'fqdn': 'test2.vmware.com', - } - - ds = None - m_subp.return_value = (None, None) - distro = self._fetch_distro('photon', cfg1) - paths = helpers.Paths({'cloud_dir': self.tmp}) - cc = cloud.Cloud(ds, paths, {}, distro, None) - for c in [cfg1, cfg2]: - cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, []) - print("\n", m_subp.call_args_list) - if c['prefer_fqdn_over_hostname']: - assert [ - mock.call(['hostnamectl', 'set-hostname', c['fqdn']], - capture=True) - ] in m_subp.call_args_list - assert [ - mock.call(['hostnamectl', 'set-hostname', c['hostname']], - capture=True) - ] not in m_subp.call_args_list - else: - assert [ - mock.call(['hostnamectl', 'set-hostname', c['hostname']], - capture=True) - ] in m_subp.call_args_list - assert [ - mock.call(['hostnamectl', 'set-hostname', c['fqdn']], - capture=True) - ] not in m_subp.call_args_list - - def test_multiple_calls_skips_unchanged_hostname(self): - """Only new hostname or fqdn values will generate a hostname call.""" - distro = self._fetch_distro('debian') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('hostname1', contents.strip()) - cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) - self.assertIn( - 'DEBUG: No hostname changes. Skipping set-hostname\n', - self.logs.getvalue()) - cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('hostname2', contents.strip()) - self.assertIn( - 'Non-persistently setting the system hostname to hostname2', - self.logs.getvalue()) - - def test_error_on_distro_set_hostname_errors(self): - """Raise SetHostnameError on exceptions from distro.set_hostname.""" - distro = self._fetch_distro('debian') - - def set_hostname_error(hostname, fqdn): - raise Exception("OOPS on: %s" % fqdn) - - distro.set_hostname = set_hostname_error - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr: - cc_set_hostname.handle( - 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, []) - self.assertEqual( - 'Failed to set the hostname to hostname1.me.com (hostname1):' - ' OOPS on: hostname1.me.com', - str(ctx_mgr.exception)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py deleted file mode 100644 index 26f7648f..00000000 --- a/tests/unittests/test_handler/test_handler_spacewalk.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_spacewalk -from cloudinit import subp - -from cloudinit.tests import helpers - -import logging -from unittest import mock - -LOG = logging.getLogger(__name__) - - -class TestSpacewalk(helpers.TestCase): - space_cfg = { - 'spacewalk': { - 'server': 'localhost', - 'profile_name': 'test', - } - } - - @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") - def test_not_is_registered(self, mock_subp): - mock_subp.side_effect = subp.ProcessExecutionError(exit_code=1) - self.assertFalse(cc_spacewalk.is_registered()) - - @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") - def test_is_registered(self, mock_subp): - mock_subp.side_effect = None - self.assertTrue(cc_spacewalk.is_registered()) - - @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") - def test_do_register(self, mock_subp): - cc_spacewalk.do_register(**self.space_cfg['spacewalk']) - mock_subp.assert_called_with([ - 'rhnreg_ks', - '--serverUrl', 'https://localhost/XMLRPC', - '--profilename', 'test', - '--sslCACert', cc_spacewalk.def_ca_cert_path, - ], capture=False) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py deleted file mode 100644 index 77cdb0c2..00000000 --- a/tests/unittests/test_handler/test_handler_timezone.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Author: Juerg Haefliger -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_timezone - -from cloudinit import util - - -import logging -import shutil -import tempfile -from configobj import ConfigObj -from io import BytesIO - -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestTimezone(t_help.FilesystemMockingTestCase): - def setUp(self): - super(TestTimezone, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - self.patchUtils(self.new_root) - self.patchOS(self.new_root) - - def test_set_timezone_sles(self): - - cfg = { - 'timezone': 'Tatooine/Bestine', - } - cc = get_cloud('sles') - - # Create a dummy timezone file - dummy_contents = '0123456789abcdefgh' - util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'], - dummy_contents) - - cc_timezone.handle('cc_timezone', cfg, cc, LOG, []) - - contents = util.load_file('/etc/sysconfig/clock', decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({'TIMEZONE': cfg['timezone']}, dict(n_cfg)) - - contents = util.load_file('/etc/localtime') - self.assertEqual(dummy_contents, contents.strip()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py deleted file mode 100644 index 0af92805..00000000 --- a/tests/unittests/test_handler/test_handler_write_files.py +++ /dev/null @@ -1,246 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import base64 -import copy -import gzip -import io -import shutil -import tempfile - -from cloudinit.config.cc_write_files import ( - handle, decode_perms, write_files) -from cloudinit import log as logging -from cloudinit import util - -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) - -LOG = logging.getLogger(__name__) - -YAML_TEXT = """ -write_files: - - encoding: gzip - content: !!binary | - H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= - path: /usr/bin/hello - permissions: '0755' - - content: !!binary | - Zm9vYmFyCg== - path: /wark - permissions: '0755' - - content: | - hi mom line 1 - hi mom line 2 - path: /tmp/message -""" - -YAML_CONTENT_EXPECTED = { - '/usr/bin/hello': "#!/bin/sh\necho hello world\n", - '/wark': "foobar\n", - '/tmp/message': "hi mom line 1\nhi mom line 2\n", -} - -VALID_SCHEMA = { - 'write_files': [ - {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', - 'path': '/some', 'permissions': '0777'} - ] -} - -INVALID_SCHEMA = { # Dropped required path key - 'write_files': [ - {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', - 'permissions': '0777'} - ] -} - - -@skipUnlessJsonSchema() -@mock.patch('cloudinit.config.cc_write_files.write_files') -class TestWriteFilesSchema(CiTestCase): - - with_logs = True - - def test_schema_validation_warns_missing_path(self, m_write_files): - """The only required file item property is 'path'.""" - cc = self.tmp_cloud('ubuntu') - valid_config = {'write_files': [{'path': '/some/path'}]} - handle('cc_write_file', valid_config, cc, LOG, []) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - handle('cc_write_file', INVALID_SCHEMA, cc, LOG, []) - self.assertIn('Invalid config:', self.logs.getvalue()) - self.assertIn("'path' is a required property", self.logs.getvalue()) - - def test_schema_validation_warns_non_string_type_for_files( - self, m_write_files): - """Schema validation warns of non-string values for each file item.""" - cc = self.tmp_cloud('ubuntu') - for key in VALID_SCHEMA['write_files'][0].keys(): - if key == 'append': - key_type = 'boolean' - else: - key_type = 'string' - invalid_config = copy.deepcopy(VALID_SCHEMA) - invalid_config['write_files'][0][key] = 1 - handle('cc_write_file', invalid_config, cc, LOG, []) - self.assertIn( - mock.call('cc_write_file', invalid_config['write_files']), - m_write_files.call_args_list) - self.assertIn( - 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type), - self.logs.getvalue()) - self.assertIn('Invalid config:', self.logs.getvalue()) - - def test_schema_validation_warns_on_additional_undefined_propertes( - self, m_write_files): - """Schema validation warns on additional undefined file properties.""" - cc = self.tmp_cloud('ubuntu') - invalid_config = copy.deepcopy(VALID_SCHEMA) - invalid_config['write_files'][0]['bogus'] = 'value' - handle('cc_write_file', invalid_config, cc, LOG, []) - self.assertIn( - "Invalid config:\nwrite_files.0: Additional properties" - " are not allowed ('bogus' was unexpected)", - self.logs.getvalue()) - - -class TestWriteFiles(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestWriteFiles, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_type(self): - """Schema validation warns of non-array value.""" - invalid_config = {'write_files': 1} - cc = self.tmp_cloud('ubuntu') - with self.assertRaises(TypeError): - handle('cc_write_file', invalid_config, cc, LOG, []) - self.assertIn( - 'Invalid config:\nwrite_files: 1 is not of type \'array\'', - self.logs.getvalue()) - - def test_simple(self): - self.patchUtils(self.tmp) - expected = "hello world\n" - filename = "/tmp/my.file" - write_files( - "test_simple", [{"content": expected, "path": filename}]) - self.assertEqual(util.load_file(filename), expected) - - def test_append(self): - self.patchUtils(self.tmp) - existing = "hello " - added = "world\n" - expected = existing + added - filename = "/tmp/append.file" - util.write_file(filename, existing) - write_files( - "test_append", - [{"content": added, "path": filename, "append": "true"}]) - self.assertEqual(util.load_file(filename), expected) - - def test_yaml_binary(self): - self.patchUtils(self.tmp) - data = util.load_yaml(YAML_TEXT) - write_files("testname", data['write_files']) - for path, content in YAML_CONTENT_EXPECTED.items(): - self.assertEqual(util.load_file(path), content) - - def test_all_decodings(self): - self.patchUtils(self.tmp) - - # build a 'files' array that has a dictionary of encodings - # for 'gz', 'gzip', 'gz+base64' ... - data = b"foobzr" - utf8_valid = b"foobzr" - utf8_invalid = b'ab\xaadef' - files = [] - expected = [] - - gz_aliases = ('gz', 'gzip') - gz_b64_aliases = ('gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64') - b64_aliases = ('base64', 'b64') - - datum = (("utf8", utf8_valid), ("no-utf8", utf8_invalid)) - for name, data in datum: - gz = (_gzip_bytes(data), gz_aliases) - gz_b64 = (base64.b64encode(_gzip_bytes(data)), gz_b64_aliases) - b64 = (base64.b64encode(data), b64_aliases) - for content, aliases in (gz, gz_b64, b64): - for enc in aliases: - cur = {'content': content, - 'path': '/tmp/file-%s-%s' % (name, enc), - 'encoding': enc} - files.append(cur) - expected.append((cur['path'], data)) - - write_files("test_decoding", files) - - for path, content in expected: - self.assertEqual(util.load_file(path, decode=False), content) - - # make sure we actually wrote *some* files. - flen_expected = ( - len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum)) - self.assertEqual(len(expected), flen_expected) - - def test_deferred(self): - self.patchUtils(self.tmp) - file_path = '/tmp/deferred.file' - config = { - 'write_files': [ - {'path': file_path, 'defer': True} - ] - } - cc = self.tmp_cloud('ubuntu') - handle('cc_write_file', config, cc, LOG, []) - with self.assertRaises(FileNotFoundError): - util.load_file(file_path) - - -class TestDecodePerms(CiTestCase): - - with_logs = True - - def test_none_returns_default(self): - """If None is passed as perms, then default should be returned.""" - default = object() - found = decode_perms(None, default) - self.assertEqual(default, found) - - def test_integer(self): - """A valid integer should return itself.""" - found = decode_perms(0o755, None) - self.assertEqual(0o755, found) - - def test_valid_octal_string(self): - """A string should be read as octal.""" - found = decode_perms("644", None) - self.assertEqual(0o644, found) - - def test_invalid_octal_string_returns_default_and_warns(self): - """A string with invalid octal should warn and return default.""" - found = decode_perms("999", None) - self.assertIsNone(found) - self.assertIn("WARNING: Undecodable", self.logs.getvalue()) - - -def _gzip_bytes(data): - buf = io.BytesIO() - fp = None - try: - fp = gzip.GzipFile(fileobj=buf, mode="wb") - fp.write(data) - fp.close() - return buf.getvalue() - finally: - if fp: - fp.close() - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_write_files_deferred.py b/tests/unittests/test_handler/test_handler_write_files_deferred.py deleted file mode 100644 index 57b6934a..00000000 --- a/tests/unittests/test_handler/test_handler_write_files_deferred.py +++ /dev/null @@ -1,77 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import tempfile -import shutil - -from cloudinit.config.cc_write_files_deferred import (handle) -from .test_handler_write_files import (VALID_SCHEMA) -from cloudinit import log as logging -from cloudinit import util - -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) - -LOG = logging.getLogger(__name__) - - -@skipUnlessJsonSchema() -@mock.patch('cloudinit.config.cc_write_files_deferred.write_files') -class TestWriteFilesDeferredSchema(CiTestCase): - - with_logs = True - - def test_schema_validation_warns_invalid_value(self, - m_write_files_deferred): - """If 'defer' is defined, it must be of type 'bool'.""" - - valid_config = { - 'write_files': [ - {**VALID_SCHEMA.get('write_files')[0], 'defer': True} - ] - } - - invalid_config = { - 'write_files': [ - {**VALID_SCHEMA.get('write_files')[0], 'defer': str('no')} - ] - } - - cc = self.tmp_cloud('ubuntu') - handle('cc_write_files_deferred', valid_config, cc, LOG, []) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - handle('cc_write_files_deferred', invalid_config, cc, LOG, []) - self.assertIn('Invalid config:', self.logs.getvalue()) - self.assertIn("defer: 'no' is not of type 'boolean'", - self.logs.getvalue()) - - -class TestWriteFilesDeferred(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestWriteFilesDeferred, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_filtering_deferred_files(self): - self.patchUtils(self.tmp) - expected = "hello world\n" - config = { - 'write_files': [ - { - 'path': '/tmp/deferred.file', - 'defer': True, - 'content': expected - }, - {'path': '/tmp/not_deferred.file'} - ] - } - cc = self.tmp_cloud('ubuntu') - handle('cc_write_files_deferred', config, cc, LOG, []) - self.assertEqual(util.load_file('/tmp/deferred.file'), expected) - with self.assertRaises(FileNotFoundError): - util.load_file('/tmp/not_deferred.file') - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py deleted file mode 100644 index 7c61bbf9..00000000 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ /dev/null @@ -1,111 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import configparser -import logging -import shutil -import tempfile - -from cloudinit import util -from cloudinit.config import cc_yum_add_repo -from cloudinit.tests import helpers - -LOG = logging.getLogger(__name__) - - -class TestConfig(helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_bad_config(self): - cfg = { - 'yum_repos': { - 'epel-testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - # Missing this should cause the repo not to be written - # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch', - 'enabled': False, - 'gpgcheck': True, - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'failovermethod': 'priority', - }, - }, - } - self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - self.assertRaises(IOError, util.load_file, - "/etc/yum.repos.d/epel_testing.repo") - - def test_write_config(self): - cfg = { - 'yum_repos': { - 'epel-testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', - 'enabled': False, - 'gpgcheck': True, - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'failovermethod': 'priority', - }, - }, - } - self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") - parser = configparser.ConfigParser() - parser.read_string(contents) - expected = { - 'epel_testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - 'failovermethod': 'priority', - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'enabled': '0', - 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', - 'gpgcheck': '1', - } - } - for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) - for k, v in expected[section].items(): - self.assertEqual(parser.get(section, k), v) - - def test_write_config_array(self): - cfg = { - 'yum_repos': { - 'puppetlabs-products': { - 'name': 'Puppet Labs Products El 6 - $basearch', - 'baseurl': - 'http://yum.puppetlabs.com/el/6/products/$basearch', - 'gpgkey': [ - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs', - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', - ], - 'enabled': True, - 'gpgcheck': True, - } - } - } - self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") - parser = configparser.ConfigParser() - parser.read_string(contents) - expected = { - 'puppetlabs_products': { - 'name': 'Puppet Labs Products El 6 - $basearch', - 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch', - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n' - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', - 'enabled': '1', - 'gpgcheck': '1', - } - } - for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) - for k, v in expected[section].items(): - self.assertEqual(parser.get(section, k), v) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py deleted file mode 100644 index 0fb1de1a..00000000 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ /dev/null @@ -1,231 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import configparser -import glob -import logging -import os - -from cloudinit import util -from cloudinit.config import cc_zypper_add_repo -from cloudinit.tests import helpers -from cloudinit.tests.helpers import mock - -LOG = logging.getLogger(__name__) - - -class TestConfig(helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.tmp = self.tmp_dir() - self.zypp_conf = 'etc/zypp/zypp.conf' - - def test_bad_repo_config(self): - """Config has no baseurl, no file should be written""" - cfg = { - 'repos': [ - { - 'id': 'foo', - 'name': 'suse-test', - 'enabled': '1' - }, - ] - } - self.patchUtils(self.tmp) - cc_zypper_add_repo._write_repos(cfg['repos'], '/etc/zypp/repos.d') - self.assertRaises(IOError, util.load_file, - "/etc/zypp/repos.d/foo.repo") - - def test_write_repos(self): - """Verify valid repos get written""" - cfg = self._get_base_config_repos() - root_d = self.tmp_dir() - cc_zypper_add_repo._write_repos(cfg['zypper']['repos'], root_d) - repos = glob.glob('%s/*.repo' % root_d) - expected_repos = ['testing-foo.repo', 'testing-bar.repo'] - if len(repos) != 2: - assert 'Number of repos written is "%d" expected 2' % len(repos) - for repo in repos: - repo_name = os.path.basename(repo) - if repo_name not in expected_repos: - assert 'Found repo with name "%s"; unexpected' % repo_name - # Validation that the content gets properly written is in another test - - def test_write_repo(self): - """Verify the content of a repo file""" - cfg = { - 'repos': [ - { - 'baseurl': 'http://foo', - 'name': 'test-foo', - 'id': 'testing-foo' - }, - ] - } - root_d = self.tmp_dir() - cc_zypper_add_repo._write_repos(cfg['repos'], root_d) - contents = util.load_file("%s/testing-foo.repo" % root_d) - parser = configparser.ConfigParser() - parser.read_string(contents) - expected = { - 'testing-foo': { - 'name': 'test-foo', - 'baseurl': 'http://foo', - 'enabled': '1', - 'autorefresh': '1' - } - } - for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) - for k, v in expected[section].items(): - self.assertEqual(parser.get(section, k), v) - - def test_config_write(self): - """Write valid configuration data""" - cfg = { - 'config': { - 'download.deltarpm': 'False', - 'reposdir': 'foo' - } - } - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg['config']) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - 'reposdir=foo' - ] - for item in contents.split('\n'): - if item not in expected: - self.assertIsNone(item) - - @mock.patch('cloudinit.log.logging') - def test_config_write_skip_configdir(self, mock_logging): - """Write configuration but skip writing 'configdir' setting""" - cfg = { - 'config': { - 'download.deltarpm': 'False', - 'reposdir': 'foo', - 'configdir': 'bar' - } - } - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg['config']) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - 'reposdir=foo' - ] - for item in contents.split('\n'): - if item not in expected: - self.assertIsNone(item) - # Not finding teh right path for mocking :( - # assert mock_logging.warning.called - - def test_empty_config_section_no_new_data(self): - """When the config section is empty no new data should be written to - zypp.conf""" - cfg = self._get_base_config_repos() - cfg['zypper']['config'] = None - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') - - def test_empty_config_value_no_new_data(self): - """When the config section is not empty but there are no values - no new data should be written to zypp.conf""" - cfg = self._get_base_config_repos() - cfg['zypper']['config'] = { - 'download.deltarpm': None - } - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') - - def test_handler_full_setup(self): - """Test that the handler ends up calling the renderers""" - cfg = self._get_base_config_repos() - cfg['zypper']['config'] = { - 'download.deltarpm': 'False', - } - root_d = self.tmp_dir() - os.makedirs('%s/etc/zypp/repos.d' % root_d) - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) - self.reRoot(root_d) - cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, []) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - ] - for item in contents.split('\n'): - if item not in expected: - self.assertIsNone(item) - repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d) - expected_repos = ['testing-foo.repo', 'testing-bar.repo'] - if len(repos) != 2: - assert 'Number of repos written is "%d" expected 2' % len(repos) - for repo in repos: - repo_name = os.path.basename(repo) - if repo_name not in expected_repos: - assert 'Found repo with name "%s"; unexpected' % repo_name - - def test_no_config_section_no_new_data(self): - """When there is no config section no new data should be written to - zypp.conf""" - cfg = self._get_base_config_repos() - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') - - def test_no_repo_data(self): - """When there is no repo data nothing should happen""" - root_d = self.tmp_dir() - self.reRoot(root_d) - cc_zypper_add_repo._write_repos(None, root_d) - content = glob.glob('%s/*' % root_d) - self.assertEqual(len(content), 0) - - def _get_base_config_repos(self): - """Basic valid repo configuration""" - cfg = { - 'zypper': { - 'repos': [ - { - 'baseurl': 'http://foo', - 'name': 'test-foo', - 'id': 'testing-foo' - }, - { - 'baseurl': 'http://bar', - 'name': 'test-bar', - 'id': 'testing-bar' - } - ] - } - } - return cfg diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py deleted file mode 100644 index 1dae223d..00000000 --- a/tests/unittests/test_handler/test_schema.py +++ /dev/null @@ -1,515 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import cloudinit -from cloudinit.config.schema import ( - CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, - get_schema_doc, get_schema, validate_cloudconfig_file, - validate_cloudconfig_schema, main) -from cloudinit.util import write_file - -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema - -from copy import copy -import itertools -import pytest -from pathlib import Path -from textwrap import dedent -from yaml import safe_load - - -class GetSchemaTest(CiTestCase): - - def test_get_schema_coalesces_known_schema(self): - """Every cloudconfig module with schema is listed in allOf keyword.""" - schema = get_schema() - self.assertCountEqual( - [ - 'cc_apk_configure', - 'cc_apt_configure', - 'cc_bootcmd', - 'cc_locale', - 'cc_ntp', - 'cc_resizefs', - 'cc_runcmd', - 'cc_snap', - 'cc_ubuntu_advantage', - 'cc_ubuntu_drivers', - 'cc_write_files', - 'cc_write_files_deferred', - 'cc_zypper_add_repo', - 'cc_chef', - 'cc_install_hotplug', - ], - [subschema['id'] for subschema in schema['allOf']]) - self.assertEqual('cloud-config-schema', schema['id']) - self.assertEqual( - 'http://json-schema.org/draft-04/schema#', - schema['$schema']) - # FULL_SCHEMA is updated by the get_schema call - from cloudinit.config.schema import FULL_SCHEMA - self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys()) - - def test_get_schema_returns_global_when_set(self): - """When FULL_SCHEMA global is already set, get_schema returns it.""" - m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA' - with mock.patch(m_schema_path, {'here': 'iam'}): - self.assertEqual({'here': 'iam'}, get_schema()) - - -class SchemaValidationErrorTest(CiTestCase): - """Test validate_cloudconfig_schema""" - - def test_schema_validation_error_expects_schema_errors(self): - """SchemaValidationError is initialized from schema_errors.""" - errors = (('key.path', 'unexpected key "junk"'), - ('key2.path', '"-123" is not a valid "hostname" format')) - exception = SchemaValidationError(schema_errors=errors) - self.assertIsInstance(exception, Exception) - self.assertEqual(exception.schema_errors, errors) - self.assertEqual( - 'Cloud config schema errors: key.path: unexpected key "junk", ' - 'key2.path: "-123" is not a valid "hostname" format', - str(exception)) - self.assertTrue(isinstance(exception, ValueError)) - - -class ValidateCloudConfigSchemaTest(CiTestCase): - """Tests for validate_cloudconfig_schema.""" - - with_logs = True - - @skipUnlessJsonSchema() - def test_validateconfig_schema_non_strict_emits_warnings(self): - """When strict is False validate_cloudconfig_schema emits warnings.""" - schema = {'properties': {'p1': {'type': 'string'}}} - validate_cloudconfig_schema({'p1': -1}, schema, strict=False) - self.assertIn( - "Invalid config:\np1: -1 is not of type 'string'\n", - self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self): - """Warning from validate_cloudconfig_schema when missing jsonschema.""" - schema = {'properties': {'p1': {'type': 'string'}}} - with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): - validate_cloudconfig_schema({'p1': -1}, schema, strict=True) - self.assertIn( - 'Ignoring schema validation. python-jsonschema is not present', - self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_validateconfig_schema_strict_raises_errors(self): - """When strict is True validate_cloudconfig_schema raises errors.""" - schema = {'properties': {'p1': {'type': 'string'}}} - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({'p1': -1}, schema, strict=True) - self.assertEqual( - "Cloud config schema errors: p1: -1 is not of type 'string'", - str(context_mgr.exception)) - - @skipUnlessJsonSchema() - def test_validateconfig_schema_honors_formats(self): - """With strict True, validate_cloudconfig_schema errors on format.""" - schema = { - 'properties': {'p1': {'type': 'string', 'format': 'email'}}} - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True) - self.assertEqual( - "Cloud config schema errors: p1: '-1' is not a 'email'", - str(context_mgr.exception)) - - -class TestCloudConfigExamples: - schema = get_schema() - params = [ - (schema["id"], example) - for schema in schema["allOf"] for example in schema["examples"]] - - @pytest.mark.parametrize("schema_id,example", params) - @skipUnlessJsonSchema() - def test_validateconfig_schema_of_example(self, schema_id, example): - """ For a given example in a config module we test if it is valid - according to the unified schema of all config modules - """ - config_load = safe_load(example) - validate_cloudconfig_schema( - config_load, self.schema, strict=True) - - -class ValidateCloudConfigFileTest(CiTestCase): - """Tests for validate_cloudconfig_file.""" - - def setUp(self): - super(ValidateCloudConfigFileTest, self).setUp() - self.config_file = self.tmp_path('cloudcfg.yaml') - - def test_validateconfig_file_error_on_absent_file(self): - """On absent config_path, validate_cloudconfig_file errors.""" - with self.assertRaises(RuntimeError) as context_mgr: - validate_cloudconfig_file('/not/here', {}) - self.assertEqual( - 'Configfile /not/here does not exist', - str(context_mgr.exception)) - - def test_validateconfig_file_error_on_invalid_header(self): - """On invalid header, validate_cloudconfig_file errors. - - A SchemaValidationError is raised when the file doesn't begin with - CLOUD_CONFIG_HEADER. - """ - write_file(self.config_file, '#junk') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, {}) - self.assertEqual( - 'Cloud config schema errors: format-l1.c1: File {0} needs to begin' - ' with "{1}"'.format( - self.config_file, CLOUD_CONFIG_HEADER.decode()), - str(context_mgr.exception)) - - def test_validateconfig_file_error_on_non_yaml_scanner_error(self): - """On non-yaml scan issues, validate_cloudconfig_file errors.""" - # Generate a scanner error by providing text on a single line with - # improper indent. - write_file(self.config_file, '#cloud-config\nasdf:\nasdf') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, {}) - self.assertIn( - 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format( - self.config_file), - str(context_mgr.exception)) - - def test_validateconfig_file_error_on_non_yaml_parser_error(self): - """On non-yaml parser issues, validate_cloudconfig_file errors.""" - write_file(self.config_file, '#cloud-config\n{}}') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, {}) - self.assertIn( - 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format( - self.config_file), - str(context_mgr.exception)) - - @skipUnlessJsonSchema() - def test_validateconfig_file_sctrictly_validates_schema(self): - """validate_cloudconfig_file raises errors on invalid schema.""" - schema = { - 'properties': {'p1': {'type': 'string', 'format': 'string'}}} - write_file(self.config_file, '#cloud-config\np1: -1') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, schema) - self.assertEqual( - "Cloud config schema errors: p1: -1 is not of type 'string'", - str(context_mgr.exception)) - - -class GetSchemaDocTest(CiTestCase): - """Tests for get_schema_doc.""" - - def setUp(self): - super(GetSchemaDocTest, self).setUp() - self.required_schema = { - 'title': 'title', 'description': 'description', 'id': 'id', - 'name': 'name', 'frequency': 'frequency', - 'distros': ['debian', 'rhel']} - - def test_get_schema_doc_returns_restructured_text(self): - """get_schema_doc returns restructured text for a cloudinit schema.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': 'array', 'description': 'prop-description', - 'items': {'type': 'integer'}}}}) - self.assertEqual( - dedent(""" - name - ---- - **Summary:** title - - description - - **Internal name:** ``id`` - - **Module frequency:** frequency - - **Supported distros:** debian, rhel - - **Config schema**: - **prop1:** (array of integer) prop-description\n\n"""), - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_multiple_types(self): - """get_schema_doc delimits multiple property types with a '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': ['string', 'integer'], - 'description': 'prop-description'}}}) - self.assertIn( - '**prop1:** (string/integer) prop-description', - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_enum_types(self): - """get_schema_doc converts enum types to yaml and delimits with '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'enum': [True, False, 'stuff'], - 'description': 'prop-description'}}}) - self.assertIn( - '**prop1:** (true/false/stuff) prop-description', - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_nested_oneof_property_types(self): - """get_schema_doc describes array items oneOf declarations in type.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': 'array', - 'items': { - 'oneOf': [{'type': 'string'}, - {'type': 'integer'}]}, - 'description': 'prop-description'}}}) - self.assertIn( - '**prop1:** (array of (string)/(integer)) prop-description', - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_string_examples(self): - """get_schema_doc properly indented examples as a list of strings.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'], - 'properties': { - 'prop1': {'type': 'array', 'description': 'prop-description', - 'items': {'type': 'integer'}}}}) - self.assertIn( - dedent(""" - **Config schema**: - **prop1:** (array of integer) prop-description - - **Examples**:: - - ex1: - [don't, expand, "this"] - # --- Example2 --- - ex2: true - """), - get_schema_doc(full_schema)) - - def test_get_schema_doc_properly_parse_description(self): - """get_schema_doc description properly formatted""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'p1': { - 'type': 'string', - 'description': dedent("""\ - This item - has the - following options: - - - option1 - - option2 - - option3 - - The default value is - option1""") - } - }} - ) - - self.assertIn( - dedent(""" - **Config schema**: - **p1:** (string) This item has the following options: - - - option1 - - option2 - - option3 - - The default value is option1 - """), - get_schema_doc(full_schema)) - - def test_get_schema_doc_raises_key_errors(self): - """get_schema_doc raises KeyErrors on missing keys.""" - for key in self.required_schema: - invalid_schema = copy(self.required_schema) - invalid_schema.pop(key) - with self.assertRaises(KeyError) as context_mgr: - get_schema_doc(invalid_schema) - self.assertIn(key, str(context_mgr.exception)) - - -class AnnotatedCloudconfigFileTest(CiTestCase): - maxDiff = None - - def test_annotated_cloudconfig_file_no_schema_errors(self): - """With no schema_errors, print the original content.""" - content = b'ntp:\n pools: [ntp1.pools.com]\n' - self.assertEqual( - content, - annotated_cloudconfig_file({}, content, schema_errors=[])) - - def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self): - """With schema_errors, error lines are annotated and a footer added.""" - content = dedent("""\ - #cloud-config - # comment - ntp: - pools: [-99, 75] - """).encode() - expected = dedent("""\ - #cloud-config - # comment - ntp: # E1 - pools: [-99, 75] # E2,E3 - - # Errors: ------------- - # E1: Some type error - # E2: -99 is not a string - # E3: 75 is not a string - - """) - parsed_config = safe_load(content[13:]) - schema_errors = [ - ('ntp', 'Some type error'), ('ntp.pools.0', '-99 is not a string'), - ('ntp.pools.1', '75 is not a string')] - self.assertEqual( - expected, - annotated_cloudconfig_file(parsed_config, content, schema_errors)) - - def test_annotated_cloudconfig_file_annotates_separate_line_items(self): - """Errors are annotated for lists with items on separate lines.""" - content = dedent("""\ - #cloud-config - # comment - ntp: - pools: - - -99 - - 75 - """).encode() - expected = dedent("""\ - ntp: - pools: - - -99 # E1 - - 75 # E2 - """) - parsed_config = safe_load(content[13:]) - schema_errors = [ - ('ntp.pools.0', '-99 is not a string'), - ('ntp.pools.1', '75 is not a string')] - self.assertIn( - expected, - annotated_cloudconfig_file(parsed_config, content, schema_errors)) - - -class TestMain: - - exclusive_combinations = itertools.combinations( - ["--system", "--docs all", "--config-file something"], 2 - ) - - @pytest.mark.parametrize("params", exclusive_combinations) - def test_main_exclusive_args(self, params, capsys): - """Main exits non-zero and error on required exclusive args.""" - params = list(itertools.chain(*[a.split() for a in params])) - with mock.patch('sys.argv', ['mycmd'] + params): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - - _out, err = capsys.readouterr() - expected = ( - 'Expected one of --config-file, --system or --docs arguments\n' - ) - assert expected == err - - def test_main_missing_args(self, capsys): - """Main exits non-zero and reports an error on missing parameters.""" - with mock.patch('sys.argv', ['mycmd']): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - - _out, err = capsys.readouterr() - expected = ( - 'Expected one of --config-file, --system or --docs arguments\n' - ) - assert expected == err - - def test_main_absent_config_file(self, capsys): - """Main exits non-zero when config file is absent.""" - myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE'] - with mock.patch('sys.argv', myargs): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - _out, err = capsys.readouterr() - assert 'Configfile NOT_A_FILE does not exist\n' == err - - def test_main_prints_docs(self, capsys): - """When --docs parameter is provided, main generates documentation.""" - myargs = ['mycmd', '--docs', 'all'] - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' - out, _err = capsys.readouterr() - assert '\nNTP\n---\n' in out - assert '\nRuncmd\n------\n' in out - - def test_main_validates_config_file(self, tmpdir, capsys): - """When --config-file parameter is provided, main validates schema.""" - myyaml = tmpdir.join('my.yaml') - myargs = ['mycmd', '--config-file', myyaml.strpath] - myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' - out, _err = capsys.readouterr() - assert 'Valid cloud-config: {0}\n'.format(myyaml) == out - - @mock.patch('cloudinit.config.schema.read_cfg_paths') - @mock.patch('cloudinit.config.schema.os.getuid', return_value=0) - def test_main_validates_system_userdata( - self, m_getuid, m_read_cfg_paths, capsys, paths - ): - """When --system is provided, main validates system userdata.""" - m_read_cfg_paths.return_value = paths - ud_file = paths.get_ipath_cur("userdata_raw") - write_file(ud_file, b'#cloud-config\nntp:') - myargs = ['mycmd', '--system'] - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' - out, _err = capsys.readouterr() - assert 'Valid cloud-config: system userdata\n' == out - - @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000) - def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths): - """Non-root user can't use --system param""" - myargs = ['mycmd', '--system'] - with mock.patch('sys.argv', myargs): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - _out, err = capsys.readouterr() - expected = ( - 'Unable to read system userdata as non-root user. Try using sudo\n' - ) - assert expected == err - - -def _get_schema_doc_examples(): - examples_dir = Path( - cloudinit.__file__).parent.parent / 'doc' / 'examples' - assert examples_dir.is_dir() - - all_text_files = (f for f in examples_dir.glob('cloud-config*.txt') - if not f.name.startswith('cloud-config-archive')) - return all_text_files - - -class TestSchemaDocExamples: - schema = get_schema() - - @pytest.mark.parametrize("example_path", _get_schema_doc_examples()) - @skipUnlessJsonSchema() - def test_schema_doc_examples(self, example_path): - validate_cloudconfig_file(str(example_path), self.schema) - -# vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index 2e4582a0..c6f9b94a 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -4,7 +4,7 @@ import os -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import sources diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index e069a487..3d1b9582 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -9,7 +9,7 @@ import time from cloudinit import log as ci_logging from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase class TestCloudInitLogger(CiTestCase): diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 10871bcf..48ab6602 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit.handlers import cloud_config from cloudinit.handlers import (CONTENT_START, CONTENT_END) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 57edc89a..b5c38c55 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -12,7 +12,7 @@ from cloudinit import subp from cloudinit import util from cloudinit import safeyaml as yaml -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) import base64 diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index e339e132..f0dde097 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -3,7 +3,7 @@ import os import cloudinit.net import cloudinit.net.network_state from cloudinit import safeyaml -from cloudinit.tests.helpers import (CiTestCase, mock, readResource, dir2dict) +from tests.unittests.helpers import (CiTestCase, mock, readResource, dir2dict) SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") diff --git a/tests/unittests/test_netinfo.py b/tests/unittests/test_netinfo.py new file mode 100644 index 00000000..238f7b0a --- /dev/null +++ b/tests/unittests/test_netinfo.py @@ -0,0 +1,181 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests netinfo module functions and classes.""" + +from copy import copy + +from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat +from tests.unittests.helpers import CiTestCase, mock, readResource + + +# Example ifconfig and route output +SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") +SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") +SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") +SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") +SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") +SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") +SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") +NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") +ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") +FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") + + +class TestNetInfo(CiTestCase): + + maxDiff = None + with_logs = True + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_old_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering old nettools info.""" + m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + self.assertEqual(NETDEV_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_new_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + self.assertEqual(NETDEV_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + print() + print(content) + print() + self.assertEqual(FREEBSD_NETDEV_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_iproute_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering ip route info.""" + m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') + m_which.side_effect = lambda x: x if x == 'ip' else None + content = netdev_pformat() + new_output = copy(NETDEV_FORMATTED_OUT) + # ip route show describes global scopes on ipv4 addresses + # whereas ifconfig does not. Add proper global/host scope to output. + new_output = new_output.replace('| . | 50:7b', '| global | 50:7b') + new_output = new_output.replace( + '255.0.0.0 | . |', '255.0.0.0 | host |') + self.assertEqual(new_output, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_warn_on_missing_commands(self, m_subp, m_which): + """netdev_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = netdev_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print networks: missing 'ip' and 'ifconfig'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_info_nettools_down(self, m_subp, m_which): + """test netdev_info using nettools and down interfaces.""" + m_subp.return_value = ( + readResource("netinfo/new-ifconfig-output-down"), "") + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + self.assertEqual( + {'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}, + 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}}, + netdev_info(".")) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_info_iproute_down(self, m_subp, m_which): + """Test netdev_info with ip and down interfaces.""" + m_subp.return_value = ( + readResource("netinfo/sample-ipaddrshow-output-down"), "") + m_which.side_effect = lambda x: x if x == 'ip' else None + self.assertEqual( + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.', + 'mask': '255.0.0.0', 'scope': 'host'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}, + 'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}, + netdev_info(".")) + + @mock.patch('cloudinit.netinfo.netdev_info') + def test_netdev_pformat_with_down(self, m_netdev_info): + """test netdev_pformat when netdev_info returns 'down' interfaces.""" + m_netdev_info.return_value = ( + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0', + 'scope': 'host'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}, + 'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}) + self.assertEqual( + readResource("netinfo/netdev-formatted-output-down"), + netdev_pformat()) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_route_nettools_pformat(self, m_subp, m_which): + """route_pformat properly rendering nettools route info.""" + + def subp_netstat_route_selector(*args, **kwargs): + if args[0] == ['netstat', '--route', '--numeric', '--extend']: + return (SAMPLE_ROUTE_OUT_V4, '') + if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']: + return (SAMPLE_ROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_netstat_route_selector + m_which.side_effect = lambda x: x if x == 'netstat' else None + content = route_pformat() + self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_route_iproute_pformat(self, m_subp, m_which): + """route_pformat properly rendering ip route info.""" + + def subp_iproute_selector(*args, **kwargs): + if ['ip', '-o', 'route', 'list'] == args[0]: + return (SAMPLE_IPROUTE_OUT_V4, '') + v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all'] + if v6cmd == args[0]: + return (SAMPLE_IPROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_iproute_selector + m_which.side_effect = lambda x: x if x == 'ip' else None + content = route_pformat() + self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_route_warn_on_missing_commands(self, m_subp, m_which): + """route_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = route_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print routes: missing 'ip' and 'netstat'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py index abbb29b8..4e737ad7 100644 --- a/tests/unittests/test_pathprefix2dict.py +++ b/tests/unittests/test_pathprefix2dict.py @@ -2,7 +2,7 @@ from cloudinit import util -from cloudinit.tests.helpers import TestCase, populate_dir +from tests.unittests.helpers import TestCase, populate_dir import shutil import tempfile diff --git a/tests/unittests/test_persistence.py b/tests/unittests/test_persistence.py new file mode 100644 index 00000000..ec1152a9 --- /dev/null +++ b/tests/unittests/test_persistence.py @@ -0,0 +1,127 @@ +# Copyright (C) 2020 Canonical Ltd. +# +# Author: Daniel Watkins +# +# This file is part of cloud-init. See LICENSE file for license information. +""" +Tests for cloudinit.persistence. + +Per https://docs.python.org/3/library/pickle.html, only "classes that are +defined at the top level of a module" can be pickled. This means that all of +our ``CloudInitPickleMixin`` subclasses for testing must be defined at +module-level (rather than being defined inline or dynamically in the body of +test methods, as we would do without this constraint). + +``TestPickleMixin.test_subclasses`` iterates over a list of all of these +classes, and tests that they round-trip through a pickle dump/load. As the +interface we're testing is that ``_unpickle`` is called appropriately on +subclasses, our subclasses define their assertions in their ``_unpickle`` +implementation. (This means that the assertions will not be executed if +``_unpickle`` is not called at all; we have +``TestPickleMixin.test_unpickle_called`` to ensure it is called.) + +To avoid manually maintaining a list of classes for parametrization we use a +simple metaclass, ``_Collector``, to gather them up. +""" + +import pickle +from unittest import mock + +import pytest + +from cloudinit.persistence import CloudInitPickleMixin + + +class _Collector(type): + """Any class using this as a metaclass will be stored in test_classes.""" + + test_classes = [] + + def __new__(cls, *args): + new_cls = super().__new__(cls, *args) + _Collector.test_classes.append(new_cls) + return new_cls + + +class InstanceVersionNotUsed(CloudInitPickleMixin, metaclass=_Collector): + """Test that the class version is used over one set in instance state.""" + + _ci_pkl_version = 1 + + def __init__(self): + self._ci_pkl_version = 2 + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 1 == ci_pkl_version + + +class MissingVersionHandled(CloudInitPickleMixin, metaclass=_Collector): + """Test that pickles without ``_ci_pkl_version`` are handled gracefully. + + This is tested by overriding ``__getstate__`` so the dumped pickle of this + class will not have ``_ci_pkl_version`` included. + """ + + def __getstate__(self): + return self.__dict__ + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 0 == ci_pkl_version + + +class OverridenVersionHonored(CloudInitPickleMixin, metaclass=_Collector): + """Test that the subclass's version is used.""" + + _ci_pkl_version = 1 + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 1 == ci_pkl_version + + +class StateIsRestored(CloudInitPickleMixin, metaclass=_Collector): + """Instance state should be restored before ``_unpickle`` is called.""" + + def __init__(self): + self.some_state = "some state" + + def _unpickle(self, ci_pkl_version: int) -> None: + assert "some state" == self.some_state + + +class UnpickleCanBeUnoverriden(CloudInitPickleMixin, metaclass=_Collector): + """Subclasses should not need to override ``_unpickle``.""" + + +class VersionDefaultsToZero(CloudInitPickleMixin, metaclass=_Collector): + """Test that the default version is 0.""" + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 0 == ci_pkl_version + + +class VersionIsPoppedFromState(CloudInitPickleMixin, metaclass=_Collector): + """Test _ci_pkl_version is popped from state before being restored.""" + + def _unpickle(self, ci_pkl_version: int) -> None: + # `self._ci_pkl_version` returns the type's _ci_pkl_version if it isn't + # in instance state, so we need to explicitly check self.__dict__. + assert "_ci_pkl_version" not in self.__dict__ + + +class TestPickleMixin: + def test_unpickle_called(self): + """Test that self._unpickle is called on unpickle.""" + with mock.patch.object( + CloudInitPickleMixin, "_unpickle" + ) as m_unpickle: + pickle.loads(pickle.dumps(CloudInitPickleMixin())) + assert 1 == m_unpickle.call_count + + @pytest.mark.parametrize("cls", _Collector.test_classes) + def test_subclasses(self, cls): + """For each collected class, round-trip through pickle dump/load. + + Assertions are implemented in ``cls._unpickle``, and so are evoked as + part of the pickle load. + """ + pickle.loads(pickle.dumps(cls())) diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py index 2b625026..4c7df186 100644 --- a/tests/unittests/test_registry.py +++ b/tests/unittests/test_registry.py @@ -2,7 +2,7 @@ from cloudinit.registry import DictRegistry -from cloudinit.tests.helpers import (mock, TestCase) +from tests.unittests.helpers import (mock, TestCase) class TestDictRegistry(TestCase): diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index b78a6939..3aaeea43 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -8,7 +8,7 @@ from cloudinit import reporting from cloudinit.reporting import events from cloudinit.reporting import handlers -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase def _fake_registry(): diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index 9324b78d..24a1dcc7 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -13,7 +13,7 @@ import re from unittest import mock from cloudinit import util -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from cloudinit.sources.helpers import azure diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py deleted file mode 100644 index 53d3cd5a..00000000 --- a/tests/unittests/test_rh_subscription.py +++ /dev/null @@ -1,234 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for registering RHEL subscription via rh_subscription.""" - -import copy -import logging - -from cloudinit.config import cc_rh_subscription -from cloudinit import subp - -from cloudinit.tests.helpers import CiTestCase, mock - -SUBMGR = cc_rh_subscription.SubscriptionManager -SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli' - - -@mock.patch(SUB_MAN_CLI) -class GoodTests(CiTestCase): - with_logs = True - - def setUp(self): - super(GoodTests, self).setUp() - self.name = "cc_rh_subscription" - self.cloud_init = None - self.log = logging.getLogger("good_tests") - self.args = [] - self.handle = cc_rh_subscription.handle - - self.config = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks' - }} - self.config_full = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'auto-attach': True, - 'service-level': 'self-support', - 'add-pool': ['pool1', 'pool2', 'pool3'], - 'enable-repo': ['repo1', 'repo2', 'repo3'], - 'disable-repo': ['repo4', 'repo5'] - }} - - def test_already_registered(self, m_sman_cli): - ''' - Emulates a system that is already registered. Ensure it gets - a non-ProcessExecution error from is_registered() - ''' - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 1) - self.assertIn('System is already registered', self.logs.getvalue()) - - def test_simple_registration(self, m_sman_cli): - ''' - Simple registration with username and password - ''' - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')] - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) - self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list) - self.assertIn(mock.call(['register', '--username=scooby@do.com', - '--password=scooby-snacks'], - logstring_val=True), - m_sman_cli.call_args_list) - self.assertIn('rh_subscription plugin completed successfully', - self.logs.getvalue()) - self.assertEqual(m_sman_cli.call_count, 2) - - @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos") - def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): - cfg = copy.deepcopy(self.config) - m_get_repos.return_value = ([], ['repo1']) - cfg['rh_subscription'].update( - {'enable-repo': ['repo1'], 'disable-repo': None}) - mysm = cc_rh_subscription.SubscriptionManager(cfg) - self.assertEqual(True, mysm.update_repos()) - m_get_repos.assert_called_with() - self.assertEqual(m_sman_cli.call_args_list, - [mock.call(['repos', '--enable=repo1'])]) - - def test_full_registration(self, m_sman_cli): - ''' - Registration with auto-attach, service-level, adding pools, - and enabling and disabling yum repos - ''' - call_lists = [] - call_lists.append(['attach', '--pool=pool1', '--pool=pool3']) - call_lists.append(['repos', '--disable=repo5', '--enable=repo2', - '--enable=repo3']) - call_lists.append(['attach', '--auto', '--servicelevel=self-support']) - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - m_sman_cli.side_effect = [ - subp.ProcessExecutionError, - (reg, 'bar'), - ('Service level set to: self-support', ''), - ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), - ('Repo ID: repo1\nRepo ID: repo5\n', ''), - ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''), - ('', '')] - self.handle(self.name, self.config_full, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 9) - for call in call_lists: - self.assertIn(mock.call(call), m_sman_cli.call_args_list) - self.assertIn("rh_subscription plugin completed successfully", - self.logs.getvalue()) - - -@mock.patch(SUB_MAN_CLI) -class TestBadInput(CiTestCase): - with_logs = True - name = "cc_rh_subscription" - cloud_init = None - log = logging.getLogger("bad_tests") - args = [] - SM = cc_rh_subscription.SubscriptionManager - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - - config_no_password = {'rh_subscription': - {'username': 'scooby@do.com' - }} - - config_no_key = {'rh_subscription': - {'activation-key': '1234abcde', - }} - - config_service = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'service-level': 'self-support' - }} - - config_badpool = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'add-pool': 'not_a_list' - }} - config_badrepo = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'enable-repo': 'not_a_list' - }} - config_badkey = {'rh_subscription': - {'activation-key': 'abcdef1234', - 'fookey': 'bar', - 'org': '123', - }} - - def setUp(self): - super(TestBadInput, self).setUp() - self.handle = cc_rh_subscription.handle - - def assert_logged_warnings(self, warnings): - logs = self.logs.getvalue() - missing = [w for w in warnings if "WARNING: " + w not in logs] - self.assertEqual([], missing, "Missing expected warnings.") - - def test_no_password(self, m_sman_cli): - '''Attempt to register without the password key/value.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_no_password, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 0) - - def test_no_org(self, m_sman_cli): - '''Attempt to register without the org key/value.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError] - self.handle(self.name, self.config_no_key, self.cloud_init, - self.log, self.args) - m_sman_cli.assert_called_with(['identity']) - self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'Unable to register system due to incomplete information.', - 'Use either activationkey and org *or* userid and password', - 'Registration failed or did not run completely', - 'rh_subscription plugin did not complete successfully')) - - def test_service_level_without_auto(self, m_sman_cli): - '''Attempt to register using service-level without auto-attach key.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_service, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'The service-level key must be used in conjunction with ', - 'rh_subscription plugin did not complete successfully')) - - def test_pool_not_a_list(self, m_sman_cli): - ''' - Register with pools that are not in the format of a list - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badpool, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 2) - self.assert_logged_warnings(( - 'Pools must in the format of a list', - 'rh_subscription plugin did not complete successfully')) - - def test_repo_not_a_list(self, m_sman_cli): - ''' - Register with repos that are not in the format of a list - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badrepo, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 2) - self.assert_logged_warnings(( - 'Repo IDs must in the format of a list.', - 'Unable to add or remove repos', - 'rh_subscription plugin did not complete successfully')) - - def test_bad_key_value(self, m_sman_cli): - ''' - Attempt to register with a key that we don't know - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badkey, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'fookey is not a valid key for rh_subscription. Valid keys are:', - 'rh_subscription plugin did not complete successfully')) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py deleted file mode 100644 index ff27a280..00000000 --- a/tests/unittests/test_runs/test_merge_run.py +++ /dev/null @@ -1,60 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os -import shutil -import tempfile - -from cloudinit.tests import helpers - -from cloudinit.settings import PER_INSTANCE -from cloudinit import safeyaml -from cloudinit import stages -from cloudinit import util - - -class TestMergeRun(helpers.FilesystemMockingTestCase): - def _patchIn(self, root): - self.patchOS(root) - self.patchUtils(root) - - def test_none_ds(self): - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self.replicateTestRoot('simple_ubuntu', new_root) - cfg = { - 'datasource_list': ['None'], - 'cloud_init_modules': ['write-files'], - 'system_info': {'paths': {'run_dir': new_root}} - } - ud = helpers.readResource('user_data.1.txt') - cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) - util.write_file(os.path.join(new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - self._patchIn(new_root) - - # Now start verifying whats created - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.datasource.userdata_raw = ud - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) - mirrors = initer.distro.get_option('package_mirrors') - self.assertEqual(1, len(mirrors)) - mirror = mirrors[0] - self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah']) - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertTrue(os.path.exists('/etc/blah.ini')) - self.assertIn('write-files', which_ran) - contents = util.load_file('/etc/blah.ini') - self.assertEqual(contents, 'blah') - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py deleted file mode 100644 index cb3aae60..00000000 --- a/tests/unittests/test_runs/test_simple_run.py +++ /dev/null @@ -1,182 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import os - - -from cloudinit.settings import PER_INSTANCE -from cloudinit import safeyaml -from cloudinit import stages -from cloudinit.tests import helpers -from cloudinit import util - - -class TestSimpleRun(helpers.FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestSimpleRun, self).setUp() - self.new_root = self.tmp_dir() - self.replicateTestRoot('simple_ubuntu', self.new_root) - - # Seed cloud.cfg file for our tests - self.cfg = { - 'datasource_list': ['None'], - 'runcmd': ['ls /etc'], # test ALL_DISTROS - 'spacewalk': {}, # test non-ubuntu distros module definition - 'system_info': {'paths': {'run_dir': self.new_root}}, - 'write_files': [ - { - 'path': '/etc/blah.ini', - 'content': 'blah', - 'permissions': 0o755, - }, - ], - 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], - } - cloud_cfg = safeyaml.dumps(self.cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - self.patchOS(self.new_root) - self.patchUtils(self.new_root) - - def test_none_ds_populates_var_lib_cloud(self): - """Init and run_section default behavior creates appropriate dirs.""" - # Now start verifying whats created - initer = stages.Init() - initer.read_cfg() - initer.initialize() - self.assertTrue(os.path.exists("/var/lib/cloud")) - for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']: - self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) - - initer.fetch() - iid = initer.instancify() - self.assertEqual(iid, 'iid-datasource-none') - initer.update() - self.assertTrue(os.path.islink("var/lib/cloud/instance")) - - def test_none_ds_runs_modules_which_do_not_define_distros(self): - """Any modules which do not define a distros attribute are run.""" - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertTrue(os.path.exists('/etc/blah.ini')) - self.assertIn('write-files', which_ran) - contents = util.load_file('/etc/blah.ini') - self.assertEqual(contents, 'blah') - self.assertNotIn( - "Skipping modules ['write-files'] because they are not verified on" - " distro 'ubuntu'", - self.logs.getvalue()) - - def test_none_ds_skips_modules_which_define_unmatched_distros(self): - """Skip modules which define distros which don't match the current.""" - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertIn( - "Skipping modules 'spacewalk' because they are not verified on" - " distro 'ubuntu'", - self.logs.getvalue()) - self.assertNotIn('spacewalk', which_ran) - - def test_none_ds_runs_modules_which_distros_all(self): - """Skip modules which define distros attribute as supporting 'all'. - - This is done in the module with the declaration: - distros = [ALL_DISTROS]. runcmd is an example. - """ - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertIn('runcmd', which_ran) - self.assertNotIn( - "Skipping modules 'runcmd' because they are not verified on" - " distro 'ubuntu'", - self.logs.getvalue()) - - def test_none_ds_forces_run_via_unverified_modules(self): - """run_section forced skipped modules by using unverified_modules.""" - - # re-write cloud.cfg with unverified_modules override - cfg = copy.deepcopy(self.cfg) - cfg['unverified_modules'] = ['spacewalk'] # Would have skipped - cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertIn('spacewalk', which_ran) - self.assertIn( - "running unverified_modules: 'spacewalk'", - self.logs.getvalue()) - - def test_none_ds_run_with_no_config_modules(self): - """run_section will report no modules run when none are configured.""" - - # re-write cloud.cfg with unverified_modules override - cfg = copy.deepcopy(self.cfg) - # Represent empty configuration in /etc/cloud/cloud.cfg - cfg['cloud_init_modules'] = None - cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertEqual([], which_ran) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_simpletable.py b/tests/unittests/test_simpletable.py new file mode 100644 index 00000000..69b30f0e --- /dev/null +++ b/tests/unittests/test_simpletable.py @@ -0,0 +1,106 @@ +# Copyright (C) 2017 Amazon.com, Inc. or its affiliates +# +# Author: Andrew Jorgensen +# +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests that SimpleTable works just like PrettyTable for cloud-init. + +Not all possible PrettyTable cases are tested because we're not trying to +reimplement the entire library, only the minimal parts we actually use. +""" + +from cloudinit.simpletable import SimpleTable +from tests.unittests.helpers import CiTestCase + +# Examples rendered by cloud-init using PrettyTable +NET_DEVICE_FIELDS = ( + 'Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address') +NET_DEVICE_ROWS = ( + ('ens3', True, '172.31.4.203', '255.255.240.0', '.', '0a:1f:07:15:98:70'), + ('ens3', True, 'fe80::81f:7ff:fe15:9870/64', '.', 'link', + '0a:1f:07:15:98:70'), + ('lo', True, '127.0.0.1', '255.0.0.0', '.', '.'), + ('lo', True, '::1/128', '.', 'host', '.'), +) +NET_DEVICE_TABLE = """\ ++--------+------+----------------------------+---------------+-------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++--------+------+----------------------------+---------------+-------+-------------------+ +| ens3 | True | 172.31.4.203 | 255.255.240.0 | . | 0a:1f:07:15:98:70 | +| ens3 | True | fe80::81f:7ff:fe15:9870/64 | . | link | 0a:1f:07:15:98:70 | +| lo | True | 127.0.0.1 | 255.0.0.0 | . | . | +| lo | True | ::1/128 | . | host | . | ++--------+------+----------------------------+---------------+-------+-------------------+""" # noqa: E501 +ROUTE_IPV4_FIELDS = ( + 'Route', 'Destination', 'Gateway', 'Genmask', 'Interface', 'Flags') +ROUTE_IPV4_ROWS = ( + ('0', '0.0.0.0', '172.31.0.1', '0.0.0.0', 'ens3', 'UG'), + ('1', '169.254.0.0', '0.0.0.0', '255.255.0.0', 'ens3', 'U'), + ('2', '172.31.0.0', '0.0.0.0', '255.255.240.0', 'ens3', 'U'), +) +ROUTE_IPV4_TABLE = """\ ++-------+-------------+------------+---------------+-----------+-------+ +| Route | Destination | Gateway | Genmask | Interface | Flags | ++-------+-------------+------------+---------------+-----------+-------+ +| 0 | 0.0.0.0 | 172.31.0.1 | 0.0.0.0 | ens3 | UG | +| 1 | 169.254.0.0 | 0.0.0.0 | 255.255.0.0 | ens3 | U | +| 2 | 172.31.0.0 | 0.0.0.0 | 255.255.240.0 | ens3 | U | ++-------+-------------+------------+---------------+-----------+-------+""" + +AUTHORIZED_KEYS_FIELDS = ( + 'Keytype', 'Fingerprint (md5)', 'Options', 'Comment') +AUTHORIZED_KEYS_ROWS = ( + ('ssh-rsa', '24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36', '-', + 'ajorgens'), +) +AUTHORIZED_KEYS_TABLE = """\ ++---------+-------------------------------------------------+---------+----------+ +| Keytype | Fingerprint (md5) | Options | Comment | ++---------+-------------------------------------------------+---------+----------+ +| ssh-rsa | 24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36 | - | ajorgens | ++---------+-------------------------------------------------+---------+----------+""" # noqa: E501 + +# from prettytable import PrettyTable +# pt = PrettyTable(('HEADER',)) +# print(pt) +NO_ROWS_FIELDS = ('HEADER',) +NO_ROWS_TABLE = """\ ++--------+ +| HEADER | ++--------+ ++--------+""" + + +class TestSimpleTable(CiTestCase): + + def test_no_rows(self): + """An empty table is rendered as PrettyTable would have done it.""" + table = SimpleTable(NO_ROWS_FIELDS) + self.assertEqual(str(table), NO_ROWS_TABLE) + + def test_net_dev(self): + """Net device info is rendered as it was with PrettyTable.""" + table = SimpleTable(NET_DEVICE_FIELDS) + for row in NET_DEVICE_ROWS: + table.add_row(row) + self.assertEqual(str(table), NET_DEVICE_TABLE) + + def test_route_ipv4(self): + """Route IPv4 info is rendered as it was with PrettyTable.""" + table = SimpleTable(ROUTE_IPV4_FIELDS) + for row in ROUTE_IPV4_ROWS: + table.add_row(row) + self.assertEqual(str(table), ROUTE_IPV4_TABLE) + + def test_authorized_keys(self): + """SSH authorized keys are rendered as they were with PrettyTable.""" + table = SimpleTable(AUTHORIZED_KEYS_FIELDS) + for row in AUTHORIZED_KEYS_ROWS: + table.add_row(row) + + def test_get_string(self): + """get_string() method returns the same content as str().""" + table = SimpleTable(AUTHORIZED_KEYS_FIELDS) + for row in AUTHORIZED_KEYS_ROWS: + table.add_row(row) + self.assertEqual(table.get_string(), str(table)) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 08e20050..b210bd3b 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -7,7 +7,7 @@ from functools import partial from unittest.mock import patch from cloudinit import ssh_util -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import util # https://stackoverflow.com/questions/11351032/ diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py new file mode 100644 index 00000000..a722f03f --- /dev/null +++ b/tests/unittests/test_stages.py @@ -0,0 +1,478 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests related to cloudinit.stages module.""" +import os +import stat + +import pytest + +from cloudinit import stages +from cloudinit import sources +from cloudinit.sources import NetworkConfigSource + +from cloudinit.event import EventScope, EventType +from cloudinit.util import write_file + +from tests.unittests.helpers import CiTestCase, mock + +TEST_INSTANCE_ID = 'i-testing' + + +class FakeDataSource(sources.DataSource): + + def __init__(self, paths=None, userdata=None, vendordata=None, + network_config=''): + super(FakeDataSource, self).__init__({}, None, paths=paths) + self.metadata = {'instance-id': TEST_INSTANCE_ID} + self.userdata_raw = userdata + self.vendordata_raw = vendordata + self._network_config = None + if network_config: # Permit for None value to setup attribute + self._network_config = network_config + + @property + def network_config(self): + return self._network_config + + def _get_data(self): + return True + + +class TestInit(CiTestCase): + with_logs = True + allowed_subp = False + + def setUp(self): + super(TestInit, self).setUp() + self.tmpdir = self.tmp_dir() + self.init = stages.Init() + # Setup fake Paths for Init to reference + self.init._cfg = {'system_info': { + 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir, + 'run_dir': self.tmpdir}}} + self.init.datasource = FakeDataSource(paths=self.init.paths) + self._real_is_new_instance = self.init.is_new_instance + self.init.is_new_instance = mock.Mock(return_value=True) + + def test_wb__find_networking_config_disabled(self): + """find_networking_config returns no config when disabled.""" + disable_file = os.path.join( + self.init.paths.get_cpath('data'), 'upgraded-network') + write_file(disable_file, '') + self.assertEqual( + (None, disable_file), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_kernel( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by kernel cmdline.""" + m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': ['fake_initrd']} + self.assertEqual( + (None, NetworkConfigSource.cmdline), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by cmdline\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_initrd( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by kernel cmdline.""" + m_cmdline.return_value = {} + m_initramfs.return_value = {'config': 'disabled'} + self.assertEqual( + (None, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by initramfs\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_datasrc( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by datasource cfg.""" + m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {}} # system config doesn't disable + + self.init.datasource = FakeDataSource( + network_config={'config': 'disabled'}) + self.assertEqual( + (None, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by ds\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_sysconfig( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by system config.""" + m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': 'disabled'}} + self.assertEqual( + (None, NetworkConfigSource.system_cfg), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by system_cfg\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_uses_datasrc_order( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + # cmdline and initramfs, which would normally be preferred over other + # sources, disable networking; in this case, though, the DS moves them + # later so its own config is preferred + m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': 'disabled'} + + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.ds, NetworkConfigSource.system_cfg, + NetworkConfigSource.cmdline, NetworkConfigSource.initramfs] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_invalid_src( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + 'invalid_src', NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an invalid network' + ' cfg_source: invalid_src', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.fallback, NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an unavailable network' + ' cfg_source: fallback', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_kernel( + self, m_cmdline, m_initramfs): + """find_networking_config returns kernel cmdline config if present.""" + expected_cfg = {'config': ['fakekernel']} + m_cmdline.return_value = expected_cfg + m_initramfs.return_value = {'config': ['fake_initrd']} + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': ['fakesys_config']}} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.cmdline), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_initramfs( + self, m_cmdline, m_initramfs): + """find_networking_config returns kernel cmdline config if present.""" + expected_cfg = {'config': ['fake_initrd']} + m_cmdline.return_value = {} + m_initramfs.return_value = expected_cfg + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': ['fakesys_config']}} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_system_cfg( + self, m_cmdline, m_initramfs): + """find_networking_config returns system config when present.""" + m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config + expected_cfg = {'config': ['fakesys_config']} + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': expected_cfg} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.system_cfg), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_datasrc_cfg( + self, m_cmdline, m_initramfs): + """find_networking_config returns datasource net config if present.""" + m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config + # No system config for network in setUp + expected_cfg = {'config': ['fakedatasource']} + self.init.datasource = FakeDataSource(network_config=expected_cfg) + self.assertEqual( + (expected_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_fallback( + self, m_cmdline, m_initramfs): + """find_networking_config returns fallback config if not defined.""" + m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # no initramfs network config + # Neither datasource nor system_info disable or provide network + + fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], + 'version': 1} + + def fake_generate_fallback(): + return fake_cfg + + # Monkey patch distro which gets cached on self.init + distro = self.init.distro + distro.generate_fallback_config = fake_generate_fallback + self.assertEqual( + (fake_cfg, NetworkConfigSource.fallback), + self.init._find_networking_config()) + self.assertNotIn('network config disabled', self.logs.getvalue()) + + def test_apply_network_config_disabled(self): + """Log when network is disabled by upgraded-network.""" + disable_file = os.path.join( + self.init.paths.get_cpath('data'), 'upgraded-network') + + def fake_network_config(): + return (None, disable_file) + + self.init._find_networking_config = fake_network_config + + self.init.apply_network_config(True) + self.assertIn( + 'INFO: network config is disabled by %s' % disable_file, + self.logs.getvalue()) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + def test_apply_network_on_new_instance(self, m_ubuntu, m_macs): + """Call distro apply_network_config methods on is_new_instance.""" + net_cfg = { + 'version': 1, 'config': [ + {'subnets': [{'type': 'dhcp'}], 'type': 'physical', + 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + + def fake_network_config(): + return net_cfg, NetworkConfigSource.fallback + + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + + self.init._find_networking_config = fake_network_config + + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with(net_cfg) + self.init.distro.apply_network_config.assert_called_with( + net_cfg, bring_up=True) + + @mock.patch('cloudinit.distros.ubuntu.Distro') + def test_apply_network_on_same_instance_id(self, m_ubuntu): + """Only call distro.apply_network_config_names on same instance id.""" + self.init.is_new_instance = self._real_is_new_instance + old_instance_id = os.path.join( + self.init.paths.get_cpath('data'), 'instance-id') + write_file(old_instance_id, TEST_INSTANCE_ID) + net_cfg = { + 'version': 1, 'config': [ + {'subnets': [{'type': 'dhcp'}], 'type': 'physical', + 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + + def fake_network_config(): + return net_cfg, NetworkConfigSource.fallback + + self.init._find_networking_config = fake_network_config + + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with(net_cfg) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + # CiTestCase doesn't work with pytest.mark.parametrize, and moving this + # functionality to a separate class is more cumbersome than it'd be worth + # at the moment, so use this as a simple setup + def _apply_network_setup(self, m_macs): + old_instance_id = os.path.join( + self.init.paths.get_cpath('data'), 'instance-id') + write_file(old_instance_id, TEST_INSTANCE_ID) + net_cfg = { + 'version': 1, 'config': [ + {'subnets': [{'type': 'dhcp'}], 'type': 'physical', + 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + + def fake_network_config(): + return net_cfg, NetworkConfigSource.fallback + + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + + self.init._find_networking_config = fake_network_config + self.init.datasource = FakeDataSource(paths=self.init.paths) + self.init.is_new_instance = mock.Mock(return_value=False) + return net_cfg + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}) + def test_apply_network_allowed_when_default_boot( + self, m_ubuntu, m_macs + ): + """Apply network if datasource permits BOOT event.""" + net_cfg = self._apply_network_setup(m_macs) + + self.init.apply_network_config(True) + assert mock.call( + net_cfg + ) == self.init.distro.apply_network_config_names.call_args_list[-1] + assert mock.call( + net_cfg, bring_up=True + ) == self.init.distro.apply_network_config.call_args_list[-1] + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_no_default_boot( + self, m_ubuntu, m_macs + ): + """Don't apply network if datasource has no BOOT event.""" + self._apply_network_setup(m_macs) + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_allowed_with_userdata_overrides( + self, m_ubuntu, m_macs + ): + """Apply network if userdata overrides default config""" + net_cfg = self._apply_network_setup(m_macs) + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with( + net_cfg) + self.init.distro.apply_network_config.assert_called_with( + net_cfg, bring_up=True) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_unsupported( + self, m_ubuntu, m_macs + ): + """Don't apply network config if unsupported. + + Shouldn't work even when specified as userdata + """ + self._apply_network_setup(m_macs) + + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + +class TestInit_InitializeFilesystem: + """Tests for cloudinit.stages.Init._initialize_filesystem. + + TODO: Expand these tests to cover all of _initialize_filesystem's behavior. + """ + + @pytest.yield_fixture + def init(self, paths): + """A fixture which yields a stages.Init instance with paths and cfg set + + As it is replaced with a mock, consumers of this fixture can set + `init._cfg` if the default empty dict configuration is not appropriate. + """ + with mock.patch("cloudinit.stages.util.ensure_dirs"): + init = stages.Init() + init._cfg = {} + init._paths = paths + yield init + + @mock.patch("cloudinit.stages.util.ensure_file") + def test_ensure_file_not_called_if_no_log_file_configured( + self, m_ensure_file, init + ): + """If no log file is configured, we should not ensure its existence.""" + init._cfg = {} + + init._initialize_filesystem() + + assert 0 == m_ensure_file.call_count + + def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir): + """If a log file is configured, we should ensure its existence.""" + log_file = tmpdir.join("cloud-init.log") + init._cfg = {"def_log_file": str(log_file)} + + init._initialize_filesystem() + + assert log_file.exists() + # Assert we create it 0o640 by default if it doesn't already exist + assert 0o640 == stat.S_IMODE(log_file.stat().mode) + + def test_existing_file_permissions_are_not_modified(self, init, tmpdir): + """If the log file already exists, we should not modify its permissions + + See https://bugs.launchpad.net/cloud-init/+bug/1900837. + """ + # Use a mode that will never be made the default so this test will + # always be valid + mode = 0o606 + log_file = tmpdir.join("cloud-init.log") + log_file.ensure() + log_file.chmod(mode) + init._cfg = {"def_log_file": str(log_file)} + + init._initialize_filesystem() + + assert mode == stat.S_IMODE(log_file.stat().mode) + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py new file mode 100644 index 00000000..ec513d01 --- /dev/null +++ b/tests/unittests/test_subp.py @@ -0,0 +1,286 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.subp utility functions""" + +import json +import os +import sys +import stat + +from unittest import mock + +from cloudinit import subp, util +from tests.unittests.helpers import CiTestCase + + +BASH = subp.which('bash') +BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' + + +class TestPrependBaseCommands(CiTestCase): + + with_logs = True + + def test_prepend_base_command_errors_on_neither_string_nor_list(self): + """Raise an error for each command which is not a string or list.""" + orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']] + with self.assertRaises(TypeError) as context_manager: + subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual( + "Invalid basecmd config. These commands are not a string or" + " list:\n1\n{'not': 'gonna work'}", + str(context_manager.exception)) + + def test_prepend_base_command_warns_on_non_base_string_commands(self): + """Warn on each non-base for commands of type string.""" + orig_commands = [ + 'ls', 'basecmd list', 'touch /blah', 'basecmd install x'] + fixed_commands = subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual( + 'WARNING: Non-basecmd commands in basecmd config:\n' + 'ls\ntouch /blah\n', + self.logs.getvalue()) + self.assertEqual(orig_commands, fixed_commands) + + def test_prepend_base_command_prepends_on_non_base_list_commands(self): + """Prepend 'basecmd' for each non-basecmd command of type list.""" + orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'], + ['basecmd', 'install', 'x']] + expected = [['basecmd', 'ls'], ['basecmd', 'list'], + ['basecmd', 'basecmda', '/blah'], + ['basecmd', 'install', 'x']] + fixed_commands = subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual('', self.logs.getvalue()) + self.assertEqual(expected, fixed_commands) + + def test_prepend_base_command_removes_first_item_when_none(self): + """Remove the first element of a non-basecmd when it is None.""" + orig_commands = [[None, 'ls'], ['basecmd', 'list'], + [None, 'touch', '/blah'], + ['basecmd', 'install', 'x']] + expected = [['ls'], ['basecmd', 'list'], + ['touch', '/blah'], + ['basecmd', 'install', 'x']] + fixed_commands = subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual('', self.logs.getvalue()) + self.assertEqual(expected, fixed_commands) + + +class TestSubp(CiTestCase): + allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE, + BOGUS_COMMAND, sys.executable] + + stdin2err = [BASH, '-c', 'cat >&2'] + stdin2out = ['cat'] + utf8_invalid = b'ab\xaadef' + utf8_valid = b'start \xc3\xa9 end' + utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' + printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + + def printf_cmd(self, *args): + # bash's printf supports \xaa. So does /usr/bin/printf + # but by using bash, we remove dependency on another program. + return([BASH, '-c', 'printf "$@"', 'printf'] + list(args)) + + def test_subp_handles_bytestrings(self): + """subp can run a bytestring command if shell is True.""" + tmp_file = self.tmp_path('test.out') + cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) + (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True) + self.assertEqual('', out) + self.assertEqual('', _err) + self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + + def test_subp_handles_strings(self): + """subp can run a string command if shell is True.""" + tmp_file = self.tmp_path('test.out') + cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) + (out, _err) = subp.subp(cmd, shell=True) + self.assertEqual('', out) + self.assertEqual('', _err) + self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + + def test_subp_handles_utf8(self): + # The given bytes contain utf-8 accented characters as seen in e.g. + # the "deja dup" package in Ubuntu. + cmd = self.printf_cmd(self.utf8_valid_2) + (out, _err) = subp.subp(cmd, capture=True) + self.assertEqual(out, self.utf8_valid_2.decode('utf-8')) + + def test_subp_respects_decode_false(self): + (out, err) = subp.subp(self.stdin2out, capture=True, decode=False, + data=self.utf8_valid) + self.assertTrue(isinstance(out, bytes)) + self.assertTrue(isinstance(err, bytes)) + self.assertEqual(out, self.utf8_valid) + + def test_subp_decode_ignore(self): + # this executes a string that writes invalid utf-8 to stdout + (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'), + capture=True, decode='ignore') + self.assertEqual(out, 'abcdef') + + def test_subp_decode_strict_valid_utf8(self): + (out, _err) = subp.subp(self.stdin2out, capture=True, + decode='strict', data=self.utf8_valid) + self.assertEqual(out, self.utf8_valid.decode('utf-8')) + + def test_subp_decode_invalid_utf8_replaces(self): + (out, _err) = subp.subp(self.stdin2out, capture=True, + data=self.utf8_invalid) + expected = self.utf8_invalid.decode('utf-8', 'replace') + self.assertEqual(out, expected) + + def test_subp_decode_strict_raises(self): + args = [] + kwargs = {'args': self.stdin2out, 'capture': True, + 'decode': 'strict', 'data': self.utf8_invalid} + self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs) + + def test_subp_capture_stderr(self): + data = b'hello world' + (out, err) = subp.subp(self.stdin2err, capture=True, + decode=False, data=data, + update_env={'LC_ALL': 'C'}) + self.assertEqual(err, data) + self.assertEqual(out, b'') + + def test_subp_reads_env(self): + with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): + out, _err = subp.subp(self.printenv + ['FOO'], capture=True) + self.assertEqual('FOO=BAR', out.splitlines()[0]) + + def test_subp_env_and_update_env(self): + out, _err = subp.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + env={'FOO': 'BAR'}, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines()) + + def test_subp_update_env(self): + extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} + with mock.patch.dict("os.environ", values=extra): + out, _err = subp.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines()) + + def test_subp_warn_missing_shebang(self): + """Warn on no #! in script""" + noshebang = self.tmp_path('noshebang') + util.write_file(noshebang, 'true\n') + + print("os is %s" % os) + os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) + with self.allow_subp([noshebang]): + self.assertRaisesRegex(subp.ProcessExecutionError, + r'Missing #! in script\?', + subp.subp, (noshebang,)) + + def test_subp_combined_stderr_stdout(self): + """Providing combine_capture as True redirects stderr to stdout.""" + data = b'hello world' + (out, err) = subp.subp(self.stdin2err, capture=True, + combine_capture=True, decode=False, data=data) + self.assertEqual(b'', err) + self.assertEqual(data, out) + + def test_returns_none_if_no_capture(self): + (out, err) = subp.subp(self.stdin2out, data=b'', capture=False) + self.assertIsNone(err) + self.assertIsNone(out) + + def test_exception_has_out_err_are_bytes_if_decode_false(self): + """Raised exc should have stderr, stdout as bytes if no decode.""" + with self.assertRaises(subp.ProcessExecutionError) as cm: + subp.subp([BOGUS_COMMAND], decode=False) + self.assertTrue(isinstance(cm.exception.stdout, bytes)) + self.assertTrue(isinstance(cm.exception.stderr, bytes)) + + def test_exception_has_out_err_are_bytes_if_decode_true(self): + """Raised exc should have stderr, stdout as string if no decode.""" + with self.assertRaises(subp.ProcessExecutionError) as cm: + subp.subp([BOGUS_COMMAND], decode=True) + self.assertTrue(isinstance(cm.exception.stdout, str)) + self.assertTrue(isinstance(cm.exception.stderr, str)) + + def test_bunch_of_slashes_in_path(self): + self.assertEqual("/target/my/path/", + subp.target_path("/target/", "//my/path/")) + self.assertEqual("/target/my/path/", + subp.target_path("/target/", "///my/path/")) + + def test_c_lang_can_take_utf8_args(self): + """Independent of system LC_CTYPE, args can contain utf-8 strings. + + When python starts up, its default encoding gets set based on + the value of LC_CTYPE. If no system locale is set, the default + encoding for both python2 and python3 in some paths will end up + being ascii. + + Attempts to use setlocale or patching (or changing) os.environ + in the current environment seem to not be effective. + + This test starts up a python with LC_CTYPE set to C so that + the default encoding will be set to ascii. In such an environment + Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError. + """ + python_prog = '\n'.join([ + 'import json, sys', + 'from cloudinit.subp import subp', + 'data = sys.stdin.read()', + 'cmd = json.loads(data)', + 'subp(cmd, capture=False)', + '']) + cmd = [BASH, '-c', 'echo -n "$@"', '--', + self.utf8_valid.decode("utf-8")] + python_subp = [sys.executable, '-c', python_prog] + + out, _err = subp.subp( + python_subp, update_env={'LC_CTYPE': 'C'}, + data=json.dumps(cmd).encode("utf-8"), + decode=False) + self.assertEqual(self.utf8_valid, out) + + def test_bogus_command_logs_status_messages(self): + """status_cb gets status messages logs on bogus commands provided.""" + logs = [] + + def status_cb(log): + logs.append(log) + + with self.assertRaises(subp.ProcessExecutionError): + subp.subp([BOGUS_COMMAND], status_cb=status_cb) + + expected = [ + 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND), + 'ERROR: End run command: invalid command provided\n'] + self.assertEqual(expected, logs) + + def test_command_logs_exit_codes_to_status_cb(self): + """status_cb gets status messages containing command exit code.""" + logs = [] + + def status_cb(log): + logs.append(log) + + with self.assertRaises(subp.ProcessExecutionError): + subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb) + subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb) + + expected = [ + 'Begin run command: %s -c exit 2\n' % BASH, + 'ERROR: End run command: exit(2)\n', + 'Begin run command: %s -c exit 0\n' % BASH, + 'End run command: exit(0)\n'] + self.assertEqual(expected, logs) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py new file mode 100644 index 00000000..9d56d0d0 --- /dev/null +++ b/tests/unittests/test_temp_utils.py @@ -0,0 +1,117 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.temp_utils""" + +from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir +from tests.unittests.helpers import CiTestCase, wrap_and_call +import os + + +class TestTempUtils(CiTestCase): + + def test_mkdtemp_default_non_root(self): + """mkdtemp creates a dir under /tmp for the unprivileged.""" + calls = [] + + def fake_mkdtemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 1000, + 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkdtemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/tmp'}], calls) + + def test_mkdtemp_default_non_root_needs_exe(self): + """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe.""" + calls = [] + + def fake_mkdtemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 1000, + 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkdtemp, needs_exe=True) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/var/tmp/cloud-init'}], calls) + + def test_mkdtemp_default_root(self): + """mkdtemp creates a dir under /run/cloud-init for the privileged.""" + calls = [] + + def fake_mkdtemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 0, + 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkdtemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + + def test_mkstemp_default_non_root(self): + """mkstemp creates secure tempfile under /tmp for the unprivileged.""" + calls = [] + + def fake_mkstemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 1000, + 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkstemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/tmp'}], calls) + + def test_mkstemp_default_root(self): + """mkstemp creates a secure tempfile in /run/cloud-init for root.""" + calls = [] + + def fake_mkstemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 0, + 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkstemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + + def test_tempdir_error_suppression(self): + """test tempdir suppresses errors during directory removal.""" + + with self.assertRaises(OSError): + with tempdir(prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # As a result, the directory is already gone, + # so shutil.rmtree should raise OSError + + with tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # Since the directory is already gone, shutil.rmtree would raise + # OSError, but we suppress that + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index cba09830..459e017b 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers import textwrap from cloudinit import templater diff --git a/tests/unittests/test_upgrade.py b/tests/unittests/test_upgrade.py new file mode 100644 index 00000000..d7a721a2 --- /dev/null +++ b/tests/unittests/test_upgrade.py @@ -0,0 +1,52 @@ +# Copyright (C) 2020 Canonical Ltd. +# +# Author: Daniel Watkins +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Upgrade testing for cloud-init. + +This module tests cloud-init's behaviour across upgrades. Specifically, it +specifies a set of invariants that the current codebase expects to be true (as +tests in ``TestUpgrade``) and then checks that these hold true after unpickling +``obj.pkl``s from previous versions of cloud-init; those pickles are stored in +``tests/data/old_pickles/``. +""" + +import operator +import pathlib + +import pytest + +from cloudinit.stages import _pkl_load +from tests.unittests.helpers import resourceLocation + + +class TestUpgrade: + @pytest.fixture( + params=pathlib.Path(resourceLocation("old_pickles")).glob("*.pkl"), + scope="class", + ids=operator.attrgetter("name"), + ) + def previous_obj_pkl(self, request): + """Load each pickle to memory once, then run all tests against it. + + Test implementations _must not_ modify the ``previous_obj_pkl`` which + they are passed, as that will affect tests that run after them. + """ + return _pkl_load(str(request.param)) + + def test_networking_set_on_distro(self, previous_obj_pkl): + """We always expect to have ``.networking`` on ``Distro`` objects.""" + assert previous_obj_pkl.distro.networking is not None + + def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl): + """We always expect Networking.blacklist_drivers to be initialised.""" + assert previous_obj_pkl.distro.networking.blacklist_drivers is None + + def test_paths_has_run_dir_attribute(self, previous_obj_pkl): + assert previous_obj_pkl.paths.run_dir is not None + + def test_vendordata_exists(self, previous_obj_pkl): + assert previous_obj_pkl.vendordata2 is None + assert previous_obj_pkl.vendordata2_raw is None diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py new file mode 100644 index 00000000..501d9533 --- /dev/null +++ b/tests/unittests/test_url_helper.py @@ -0,0 +1,178 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.url_helper import ( + NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, + retry_on_url_exc) +from tests.unittests.helpers import CiTestCase, mock, skipIf +from cloudinit import util +from cloudinit import version + +import httpretty +import logging +import requests + + +try: + import oauthlib + assert oauthlib # avoid pyflakes error F401: import unused + _missing_oauthlib_dep = False +except ImportError: + _missing_oauthlib_dep = True + + +M_PATH = 'cloudinit.url_helper.' + + +class TestOAuthHeaders(CiTestCase): + + def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): + """oauth_headers raises a NotImplemented error when oauth absent.""" + with mock.patch.dict('sys.modules', {'oauthlib': None}): + with self.assertRaises(NotImplementedError) as context_manager: + oauth_headers(1, 2, 3, 4, 5) + self.assertEqual( + 'oauth support is not available', + str(context_manager.exception)) + + @skipIf(_missing_oauthlib_dep, "No python-oauthlib dependency") + @mock.patch('oauthlib.oauth1.Client') + def test_oauth_headers_calls_oathlibclient_when_available(self, m_client): + """oauth_headers calls oaut1.hClient.sign with the provided url.""" + class fakeclient(object): + def sign(self, url): + # The first and 3rd item of the client.sign tuple are ignored + return ('junk', url, 'junk2') + + m_client.return_value = fakeclient() + + return_value = oauth_headers( + 'url', 'consumer_key', 'token_key', 'token_secret', + 'consumer_secret') + self.assertEqual('url', return_value) + + +class TestReadFileOrUrl(CiTestCase): + + with_logs = True + + def test_read_file_or_url_str_from_file(self): + """Test that str(result.contents) on file is text version of contents. + It should not be "b'data'", but just "'data'" """ + tmpf = self.tmp_path("myfile1") + data = b'This is my file content\n' + util.write_file(tmpf, data, omode="wb") + result = read_file_or_url("file://%s" % tmpf) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + + @httpretty.activate + def test_read_file_or_url_str_from_url(self): + """Test that str(result.contents) on url is text version of contents. + It should not be "b'data'", but just "'data'" """ + url = 'http://hostname/path' + data = b'This is my url content\n' + httpretty.register_uri(httpretty.GET, url, data) + result = read_file_or_url(url) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + + @httpretty.activate + def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self): + """Headers are redacted from logs but unredacted in requests.""" + url = 'http://hostname/path' + headers = {'sensitive': 'sekret', 'server': 'blah'} + httpretty.register_uri(httpretty.GET, url) + # By default, httpretty will log our request along with the header, + # so if we don't change this the secret will show up in the logs + logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) + + read_file_or_url(url, headers=headers, headers_redact=['sensitive']) + logs = self.logs.getvalue() + for k in headers.keys(): + self.assertEqual(headers[k], httpretty.last_request().headers[k]) + self.assertIn(REDACTED, logs) + self.assertNotIn('sekret', logs) + + @httpretty.activate + def test_read_file_or_url_str_from_url_redacts_noheaders(self): + """When no headers_redact, header values are in logs and requests.""" + url = 'http://hostname/path' + headers = {'sensitive': 'sekret', 'server': 'blah'} + httpretty.register_uri(httpretty.GET, url) + + read_file_or_url(url, headers=headers) + for k in headers.keys(): + self.assertEqual(headers[k], httpretty.last_request().headers[k]) + logs = self.logs.getvalue() + self.assertNotIn(REDACTED, logs) + self.assertIn('sekret', logs) + + @mock.patch(M_PATH + 'readurl') + def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): + """read_file_or_url passes all params through to readurl.""" + url = 'http://hostname/path' + response = 'This is my url content\n' + m_readurl.return_value = response + params = {'url': url, 'timeout': 1, 'retries': 2, + 'headers': {'somehdr': 'val'}, + 'data': 'data', 'sec_between': 1, + 'ssl_details': {'cert_file': '/path/cert.pem'}, + 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} + self.assertEqual(response, read_file_or_url(**params)) + params.pop('url') # url is passed in as a positional arg + self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) + + def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): + """Readurl param defaults used when unspecified by read_file_or_url + + Param defaults tested are as follows: + retries: 0, additional headers None beyond default, method: GET, + data: None, check_status: True and allow_redirects: True + """ + url = 'http://hostname/path' + + m_response = mock.MagicMock() + + class FakeSession(requests.Session): + @classmethod + def request(cls, **kwargs): + self.assertEqual( + {'url': url, 'allow_redirects': True, 'method': 'GET', + 'headers': { + 'User-Agent': 'Cloud-Init/%s' % ( + version.version_string())}}, + kwargs) + return m_response + + with mock.patch(M_PATH + 'requests.Session') as m_session: + error = requests.exceptions.HTTPError('broke') + m_session.side_effect = [error, FakeSession()] + # assert no retries and check_status == True + with self.assertRaises(UrlError) as context_manager: + response = read_file_or_url(url) + self.assertEqual('broke', str(context_manager.exception)) + # assert default headers, method, url and allow_redirects True + # Success on 2nd call with FakeSession + response = read_file_or_url(url) + self.assertEqual(m_response, response._response) + + +class TestRetryOnUrlExc(CiTestCase): + + def test_do_not_retry_non_urlerror(self): + """When exception is not UrlError return False.""" + myerror = IOError('something unexcpected') + self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_not_found(self): + """When exception is UrlError with a 404 status code return True.""" + myerror = UrlError(cause=RuntimeError( + 'something was not found'), code=NOT_FOUND) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_timeout(self): + """When exception is a requests.Timout return True.""" + myerror = UrlError(cause=requests.Timeout('something timed out')) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index bc30c90b..1290cbc6 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,23 +1,1311 @@ # This file is part of cloud-init. See LICENSE file for license information. -import io +"""Tests for cloudinit.util""" + +import base64 import logging +import json +import platform +import pytest + +import io import os import re import shutil import stat import tempfile -import pytest import yaml from unittest import mock from cloudinit import subp from cloudinit import importer, util -from cloudinit.tests import helpers +from tests.unittests import helpers + + +from tests.unittests.helpers import CiTestCase +from textwrap import dedent + +LOG = logging.getLogger(__name__) + +MOUNT_INFO = [ + '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', + '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2', +] + +OS_RELEASE_SLES = dedent( + """\ + NAME="SLES" + VERSION="12-SP3" + VERSION_ID="12.3" + PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" + ID="sles" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:suse:sles:12:sp3" +""" +) + +OS_RELEASE_OPENSUSE = dedent( + """\ + NAME="openSUSE Leap" + VERSION="42.3" + ID=opensuse + ID_LIKE="suse" + VERSION_ID="42.3" + PRETTY_NAME="openSUSE Leap 42.3" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:42.3" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_OPENSUSE_L15 = dedent( + """\ + NAME="openSUSE Leap" + VERSION="15.0" + ID="opensuse-leap" + ID_LIKE="suse opensuse" + VERSION_ID="15.0" + PRETTY_NAME="openSUSE Leap 15.0" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:15.0" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_OPENSUSE_TW = dedent( + """\ + NAME="openSUSE Tumbleweed" + ID="opensuse-tumbleweed" + ID_LIKE="opensuse suse" + VERSION_ID="20180920" + PRETTY_NAME="openSUSE Tumbleweed" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_CENTOS = dedent( + """\ + NAME="CentOS Linux" + VERSION="7 (Core)" + ID="centos" + ID_LIKE="rhel fedora" + VERSION_ID="7" + PRETTY_NAME="CentOS Linux 7 (Core)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:centos:centos:7" + HOME_URL="https://www.centos.org/" + BUG_REPORT_URL="https://bugs.centos.org/" + + CENTOS_MANTISBT_PROJECT="CentOS-7" + CENTOS_MANTISBT_PROJECT_VERSION="7" + REDHAT_SUPPORT_PRODUCT="centos" + REDHAT_SUPPORT_PRODUCT_VERSION="7" +""" +) + +OS_RELEASE_REDHAT_7 = dedent( + """\ + NAME="Red Hat Enterprise Linux Server" + VERSION="7.5 (Maipo)" + ID="rhel" + ID_LIKE="fedora" + VARIANT="Server" + VARIANT_ID="server" + VERSION_ID="7.5" + PRETTY_NAME="Red Hat" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server" + HOME_URL="https://www.redhat.com/" + BUG_REPORT_URL="https://bugzilla.redhat.com/" + + REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.5 + REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.5" +""" +) + +OS_RELEASE_ALMALINUX_8 = dedent( + """\ + NAME="AlmaLinux" + VERSION="8.3 (Purple Manul)" + ID="almalinux" + ID_LIKE="rhel centos fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA" + HOME_URL="https://almalinux.org/" + BUG_REPORT_URL="https://bugs.almalinux.org/" + + ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8" + ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" +""" +) + +OS_RELEASE_EUROLINUX_7 = dedent( + """\ + VERSION="7.9 (Minsk)" + ID="eurolinux" + ID_LIKE="rhel scientific centos fedora" + VERSION_ID="7.9" + PRETTY_NAME="EuroLinux 7.9 (Minsk)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA" + HOME_URL="http://www.euro-linux.com/" + BUG_REPORT_URL="mailto:support@euro-linux.com" + REDHAT_BUGZILLA_PRODUCT="EuroLinux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.9 + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.9" +""" +) + +OS_RELEASE_EUROLINUX_8 = dedent( + """\ + NAME="EuroLinux" + VERSION="8.4 (Vaduz)" + ID="eurolinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="EuroLinux 8.4 (Vaduz)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:eurolinux:eurolinux:8" + HOME_URL="https://www.euro-linux.com/" + BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/" + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="8" +""" +) + +OS_RELEASE_ROCKY_8 = dedent( + """\ + NAME="Rocky Linux" + VERSION="8.3 (Green Obsidian)" + ID="rocky" + ID_LIKE="rhel fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:rocky:rocky:8" + HOME_URL="https://rockylinux.org/" + BUG_REPORT_URL="https://bugs.rockylinux.org/" + ROCKY_SUPPORT_PRODUCT="Rocky Linux" + ROCKY_SUPPORT_PRODUCT_VERSION="8" +""" +) + +OS_RELEASE_VIRTUOZZO_8 = dedent( + """\ + NAME="Virtuozzo Linux" + VERSION="8" + ID="virtuozzo" + ID_LIKE="rhel fedora" + VERSION_ID="8" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Virtuozzo Linux" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8" + HOME_URL="https://www.vzlinux.org" + BUG_REPORT_URL="https://bugs.openvz.org" +""" +) + +OS_RELEASE_CLOUDLINUX_8 = dedent( + """\ + NAME="CloudLinux" + VERSION="8.4 (Valery Rozhdestvensky)" + ID="cloudlinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server" + HOME_URL="https://www.cloudlinux.com/" + BUG_REPORT_URL="https://www.cloudlinux.com/support" +""" +) + +OS_RELEASE_OPENEULER_20 = dedent( + """\ + NAME="openEuler" + VERSION="20.03 (LTS-SP2)" + ID="openEuler" + VERSION_ID="20.03" + PRETTY_NAME="openEuler 20.03 (LTS-SP2)" + ANSI_COLOR="0;31" +""" +) + +REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" +REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" +REDHAT_RELEASE_REDHAT_6 = ( + "Red Hat Enterprise Linux Server release 6.10 (Santiago)" +) +REDHAT_RELEASE_REDHAT_7 = "Red Hat Enterprise Linux Server release 7.5 (Maipo)" +REDHAT_RELEASE_ALMALINUX_8 = "AlmaLinux release 8.3 (Purple Manul)" +REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" +REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" +REDHAT_RELEASE_ROCKY_8 = "Rocky Linux release 8.3 (Green Obsidian)" +REDHAT_RELEASE_VIRTUOZZO_8 = "Virtuozzo Linux release 8" +REDHAT_RELEASE_CLOUDLINUX_8 = "CloudLinux release 8.4 (Valery Rozhdestvensky)" +OS_RELEASE_DEBIAN = dedent( + """\ + PRETTY_NAME="Debian GNU/Linux 9 (stretch)" + NAME="Debian GNU/Linux" + VERSION_ID="9" + VERSION="9 (stretch)" + ID=debian + HOME_URL="https://www.debian.org/" + SUPPORT_URL="https://www.debian.org/support" + BUG_REPORT_URL="https://bugs.debian.org/" +""" +) + +OS_RELEASE_UBUNTU = dedent( + """\ + NAME="Ubuntu"\n + # comment test + VERSION="16.04.3 LTS (Xenial Xerus)"\n + ID=ubuntu\n + ID_LIKE=debian\n + PRETTY_NAME="Ubuntu 16.04.3 LTS"\n + VERSION_ID="16.04"\n + HOME_URL="http://www.ubuntu.com/"\n + SUPPORT_URL="http://help.ubuntu.com/"\n + BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n + VERSION_CODENAME=xenial\n + UBUNTU_CODENAME=xenial\n +""" +) + +OS_RELEASE_PHOTON = """\ + NAME="VMware Photon OS" + VERSION="4.0" + ID=photon + VERSION_ID=4.0 + PRETTY_NAME="VMware Photon OS/Linux" + ANSI_COLOR="1;34" + HOME_URL="https://vmware.github.io/photon/" + BUG_REPORT_URL="https://github.com/vmware/photon/issues" +""" + + +class FakeCloud(object): + def __init__(self, hostname, fqdn): + self.hostname = hostname + self.fqdn = fqdn + self.calls = [] + + def get_hostname(self, fqdn=None, metadata_only=None): + myargs = {} + if fqdn is not None: + myargs['fqdn'] = fqdn + if metadata_only is not None: + myargs['metadata_only'] = metadata_only + self.calls.append(myargs) + if fqdn: + return self.fqdn + return self.hostname + + +class TestUtil(CiTestCase): + def test_parse_mount_info_no_opts_no_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_no_opts_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_with_opts(self): + result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) + self.assertEqual(('/dev/sda1', 'btrfs', '/', 'ro,relatime'), result) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_rw(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, True) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_ro(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, False) + + +class TestUptime(CiTestCase): + @mock.patch('cloudinit.util.boottime') + @mock.patch('cloudinit.util.os.path.exists') + @mock.patch('cloudinit.util.time.time') + def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): + boottime = 1000.0 + uptime = 10.0 + m_boottime.return_value = boottime + m_time.return_value = boottime + uptime + m_exists.return_value = False + result = util.uptime() + self.assertEqual(str(uptime), result) + + +class TestShellify(CiTestCase): + def test_input_dict_raises_type_error(self): + self.assertRaisesRegex( + TypeError, + 'Input.*was.*dict.*xpected', + util.shellify, + {'mykey': 'myval'}, + ) + def test_input_str_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar" + ) -class FakeSelinux(object): + def test_value_with_int_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'shellify.*int', util.shellify, ["foo", 1] + ) + + def test_supports_strings_and_lists(self): + self.assertEqual( + '\n'.join( + [ + "#!/bin/sh", + "echo hi mom", + "'echo' 'hi dad'", + "'echo' 'hi' 'sis'", + "", + ] + ), + util.shellify( + ["echo hi mom", ["echo", "hi dad"], ('echo', 'hi', 'sis')] + ), + ) + + def test_supports_comments(self): + self.assertEqual( + '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]), + util.shellify(["echo start", None, "echo end"]), + ) + + +class TestGetHostnameFqdn(CiTestCase): + def test_get_hostname_fqdn_from_only_cfg_fqdn(self): + """When cfg only has the fqdn key, derive hostname and fqdn from it.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com'}, cloud=None + ) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): + """When cfg has both fqdn and hostname keys, return them.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None + ) + self.assertEqual('other', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): + """When cfg has only hostname key which represents a fqdn, use that.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost.domain.com'}, cloud=None + ) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): + """When cfg has a hostname without a '.' query cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost'}, cloud=mycloud + ) + self.assertEqual('myhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}], mycloud.calls + ) + + def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): + """When cfg has neither hostname nor fqdn cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) + self.assertEqual('cloudhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}, {'metadata_only': False}], + mycloud.calls, + ) + + def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): + """Calls to cloud.get_hostname pass the metadata_only parameter.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + _hn, _fqdn = util.get_hostname_fqdn( + cfg={}, cloud=mycloud, metadata_only=True + ) + self.assertEqual( + [{'fqdn': True, 'metadata_only': True}, {'metadata_only': True}], + mycloud.calls, + ) + + +class TestBlkid(CiTestCase): + ids = { + "id01": "1111-1111", + "id02": "22222222-2222", + "id03": "33333333-3333", + "id04": "44444444-4444", + "id05": "55555555-5555-5555-5555-555555555555", + "id06": "66666666-6666-6666-6666-666666666666", + "id07": "52894610484658920398", + "id08": "86753098675309867530", + "id09": "99999999-9999-9999-9999-999999999999", + } + + blkid_out = dedent( + """\ + /dev/loop0: TYPE="squashfs" + /dev/loop1: TYPE="squashfs" + /dev/loop2: TYPE="squashfs" + /dev/loop3: TYPE="squashfs" + /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" + /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" + /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" + /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ + """TYPE="zfs_member" PARTUUID="{id09}" + /dev/loop4: TYPE="squashfs" + """ + ) + + maxDiff = None + + def _get_expected(self): + return { + "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, + "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, + "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, + "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, + "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, + "/dev/sda1": { + "DEVNAME": "/dev/sda1", + "TYPE": "vfat", + "UUID": self.ids["id01"], + "PARTUUID": self.ids["id02"], + }, + "/dev/sda2": { + "DEVNAME": "/dev/sda2", + "TYPE": "ext4", + "UUID": self.ids["id03"], + "PARTUUID": self.ids["id04"], + }, + "/dev/sda3": { + "DEVNAME": "/dev/sda3", + "TYPE": "ext4", + "UUID": self.ids["id05"], + "PARTUUID": self.ids["id06"], + }, + "/dev/sda4": { + "DEVNAME": "/dev/sda4", + "TYPE": "zfs_member", + "LABEL": "default", + "UUID": self.ids["id07"], + "UUID_SUB": self.ids["id08"], + "PARTUUID": self.ids["id09"], + }, + } + + @mock.patch("cloudinit.subp.subp") + def test_functional_blkid(self, m_subp): + m_subp.return_value = (self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid()) + m_subp.assert_called_with( + ["blkid", "-o", "full"], capture=True, decode="replace" + ) + + @mock.patch("cloudinit.subp.subp") + def test_blkid_no_cache_uses_no_cache(self, m_subp): + """blkid should turn off cache if disable_cache is true.""" + m_subp.return_value = (self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid(disable_cache=True)) + m_subp.assert_called_with( + ["blkid", "-o", "full", "-c", "/dev/null"], + capture=True, + decode="replace", + ) + + +@mock.patch('cloudinit.subp.subp') +class TestUdevadmSettle(CiTestCase): + def test_with_no_params(self, m_subp): + """called with no parameters.""" + util.udevadm_settle() + m_subp.called_once_with(mock.call(['udevadm', 'settle'])) + + def test_with_exists_and_not_exists(self, m_subp): + """with exists=file where file does not exist should invoke subp.""" + mydev = self.tmp_path("mydev") + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + ['udevadm', 'settle', '--exit-if-exists=%s' % mydev] + ) + + def test_with_exists_and_file_exists(self, m_subp): + """with exists=file where file does exist should not invoke subp.""" + mydev = self.tmp_path("mydev") + util.write_file(mydev, "foo\n") + util.udevadm_settle(exists=mydev) + self.assertIsNone(m_subp.call_args) + + def test_with_timeout_int(self, m_subp): + """timeout can be an integer.""" + timeout = 9 + util.udevadm_settle(timeout=timeout) + m_subp.called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout] + ) + + def test_with_timeout_string(self, m_subp): + """timeout can be a string.""" + timeout = "555" + util.udevadm_settle(timeout=timeout) + m_subp.assert_called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout] + ) + + def test_with_exists_and_timeout(self, m_subp): + """test call with both exists and timeout.""" + mydev = self.tmp_path("mydev") + timeout = "3" + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + [ + 'udevadm', + 'settle', + '--exit-if-exists=%s' % mydev, + '--timeout=%s' % timeout, + ] + ) + + def test_subp_exception_raises_to_caller(self, m_subp): + m_subp.side_effect = subp.ProcessExecutionError("BOOM") + self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) + + +@mock.patch('os.path.exists') +class TestGetLinuxDistro(CiTestCase): + def setUp(self): + # python2 has no lru_cache, and therefore, no cache_clear() + if hasattr(util.get_linux_distro, "cache_clear"): + util.get_linux_distro.cache_clear() + + @classmethod + def os_release_exists(self, path): + """Side effect function""" + if path == '/etc/os-release': + return 1 + + @classmethod + def redhat_release_exists(self, path): + """Side effect function""" + if path == '/etc/redhat-release': + return 1 + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): + """Verify we get the correct name if the os-release file has + the distro name in quotes""" + m_os_release.return_value = OS_RELEASE_SLES + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('sles', '12.3', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): + """Verify we get the correct name if the os-release file does not + have the distro name in quotes""" + m_os_release.return_value = OS_RELEASE_UBUNTU + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) + + @mock.patch('platform.system') + @mock.patch('platform.release') + @mock.patch('cloudinit.util._parse_redhat_release') + def test_get_linux_freebsd( + self, + m_parse_redhat_release, + m_platform_release, + m_platform_system, + m_path_exists, + ): + """Verify we get the correct name and release name on FreeBSD.""" + m_path_exists.return_value = False + m_platform_release.return_value = '12.0-RELEASE-p10' + m_platform_system.return_value = 'FreeBSD' + m_parse_redhat_release.return_value = {} + util.is_BSD.cache_clear() + dist = util.get_linux_distro() + self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_centos6(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on CentOS 6.""" + m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '6.10', 'Final'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): + """Verify the correct release info on CentOS 7 without os-release.""" + m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 + m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '7.5.1804', 'Core'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): + """Verify redhat 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_REDHAT_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): + """Verify redhat 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): + """Verify redhat 6 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '6.10', 'Santiago'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_copr_centos(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on COPR CentOS.""" + m_os_release.return_value = OS_RELEASE_CENTOS + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '7', 'Core'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_debian(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on Debian.""" + m_os_release.return_value = OS_RELEASE_DEBIAN + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('debian', '9', 'stretch'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_openeuler(self, m_os_release, m_path_exists): + """Verify get the correct name and release name on Openeuler.""" + m_os_release.return_value = OS_RELEASE_OPENEULER_20 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + prior to openSUSE Leap 15. + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse', '42.3', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Leap 15.0 and later. + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Tumbleweed + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_TW + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual( + ('opensuse-tumbleweed', '20180920', platform.machine()), dist + ) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on PhotonOS""" + m_os_release.return_value = OS_RELEASE_PHOTON + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('photon', '4.0', 'VMware Photon OS/Linux'), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_no_data( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get no information if os-release does not exist""" + m_platform_dist.return_value = ('', '', '') + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('', '', ''), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_no_impl( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get an empty tuple when no information exists and + Exceptions are not propagated""" + m_platform_dist.side_effect = Exception() + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('', '', ''), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_plat_data( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get the correct platform information""" + m_platform_dist.return_value = ('foo', '1.1', 'aarch64') + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('foo', '1.1', 'aarch64'), dist) + + +class TestGetVariant: + @pytest.mark.parametrize( + 'info, expected_variant', + [ + ({'system': 'Linux', 'dist': ('almalinux',)}, 'almalinux'), + ({'system': 'linux', 'dist': ('alpine',)}, 'alpine'), + ({'system': 'linux', 'dist': ('arch',)}, 'arch'), + ({'system': 'linux', 'dist': ('centos',)}, 'centos'), + ({'system': 'linux', 'dist': ('cloudlinux',)}, 'cloudlinux'), + ({'system': 'linux', 'dist': ('debian',)}, 'debian'), + ({'system': 'linux', 'dist': ('eurolinux',)}, 'eurolinux'), + ({'system': 'linux', 'dist': ('fedora',)}, 'fedora'), + ({'system': 'linux', 'dist': ('openEuler',)}, 'openeuler'), + ({'system': 'linux', 'dist': ('photon',)}, 'photon'), + ({'system': 'linux', 'dist': ('rhel',)}, 'rhel'), + ({'system': 'linux', 'dist': ('rocky',)}, 'rocky'), + ({'system': 'linux', 'dist': ('suse',)}, 'suse'), + ({'system': 'linux', 'dist': ('virtuozzo',)}, 'virtuozzo'), + ({'system': 'linux', 'dist': ('ubuntu',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('linuxmint',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('mint',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('redhat',)}, 'rhel'), + ({'system': 'linux', 'dist': ('opensuse',)}, 'suse'), + ({'system': 'linux', 'dist': ('opensuse-tumbleweed',)}, 'suse'), + ({'system': 'linux', 'dist': ('opensuse-leap',)}, 'suse'), + ({'system': 'linux', 'dist': ('sles',)}, 'suse'), + ({'system': 'linux', 'dist': ('sle_hpc',)}, 'suse'), + ({'system': 'linux', 'dist': ('my_distro',)}, 'linux'), + ({'system': 'Windows', 'dist': ('dontcare',)}, 'windows'), + ({'system': 'Darwin', 'dist': ('dontcare',)}, 'darwin'), + ({'system': 'Freebsd', 'dist': ('dontcare',)}, 'freebsd'), + ({'system': 'Netbsd', 'dist': ('dontcare',)}, 'netbsd'), + ({'system': 'Openbsd', 'dist': ('dontcare',)}, 'openbsd'), + ({'system': 'Dragonfly', 'dist': ('dontcare',)}, 'dragonfly'), + ], + ) + def test_get_variant(self, info, expected_variant): + """Verify we get the correct variant name""" + assert util._get_variant(info) == expected_variant + + +class TestJsonDumps(CiTestCase): + def test_is_str(self): + """json_dumps should return a string.""" + self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) + + def test_utf8(self): + smiley = '\\ud83d\\ude03' + self.assertEqual( + {'smiley': smiley}, json.loads(util.json_dumps({'smiley': smiley})) + ) + + def test_non_utf8(self): + blob = b'\xba\x03Qx-#y\xea' + self.assertEqual( + {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, + json.loads(util.json_dumps({'blob': blob})), + ) + + +@mock.patch('os.path.exists') +class TestIsLXD(CiTestCase): + def test_is_lxd_true_on_sock_device(self, m_exists): + """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" + m_exists.return_value = True + self.assertTrue(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + def test_is_lxd_false_when_sock_device_absent(self, m_exists): + """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" + m_exists.return_value = False + self.assertFalse(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + + +class TestReadCcFromCmdline: + @pytest.mark.parametrize( + "cmdline,expected_cfg", + [ + # Return None if cmdline has no cc:end_cc content. + (CiTestCase.random_string(), None), + # Return None if YAML content is empty string. + ('foo cc: end_cc bar', None), + # Return expected dictionary without trailing end_cc marker. + ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}), + # Return expected dictionary w escaped newline and no end_cc. + ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}), + # Return expected dictionary of yaml between cc: and end_cc. + ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}), + # Return dict with list value w escaped newline, no end_cc. + ( + 'cc: ssh_import_id: [smoser, kirkland]\\n', + {'ssh_import_id': ['smoser', 'kirkland']}, + ), + # Parse urlencoded brackets in yaml content. + ( + 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc', + {'ssh_import_id': ['smoser', 'kirkland']}, + ), + # Parse complete urlencoded yaml content. + ( + 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc', + {'ssh_import_id': ['user1', 'user2']}, + ), + # Parse nested dictionary in yaml content. + ( + 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc', + {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}, + ), + # Parse single mapping value in yaml content. + ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}), + # Parse multiline content with multiple mapping and nested lists. + ( + ( + 'cc: ssh_import_id: [smoser, bob]\\n' + 'runcmd: [ [ ls, -l ], echo hi ] end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # Parse multiline encoded content w/ mappings and nested lists. + ( + ( + 'cc: ssh_import_id: %5Bsmoser, bob%5D\\n' + 'runcmd: [ [ ls, -l ], echo hi ] end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # test encoded escaped newlines work. + # + # unquote(encoded_content) + # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]' + ( + ( + 'cc: ' + + ( + 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn' + 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' + '%20echo%20hi%20%5D' + ) + + ' end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # test encoded newlines work. + # + # unquote(encoded_content) + # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]' + ( + ( + "cc: " + + ( + 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A' + 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' + '%20echo%20hi%20%5D' + ) + + ' end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # Parse and merge multiple yaml content sections. + ( + ( + 'cc:ssh_import_id: [smoser, bob] end_cc ' + 'cc: runcmd: [ [ ls, -l ] ] end_cc' + ), + {'ssh_import_id': ['smoser', 'bob'], 'runcmd': [['ls', '-l']]}, + ), + # Parse and merge multiple encoded yaml content sections. + ( + ( + 'cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc ' + 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc' + ), + {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}, + ), + ], + ) + def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline): + assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline) + + +class TestMountCb: + """Tests for ``util.mount_cb``. + + These tests consider the "unit" under test to be ``util.mount_cb`` and + ``util.unmounter``, which is only used by ``mount_cb``. + + TODO: Test default mtype determination + TODO: Test the if/else branch that actually performs the mounting operation + """ + + @pytest.yield_fixture + def already_mounted_device_and_mountdict(self): + """Mock an already-mounted device, and yield (device, mount dict)""" + device = "/dev/fake0" + mountpoint = "/mnt/fake" + with mock.patch("cloudinit.util.subp.subp"): + with mock.patch("cloudinit.util.mounts") as m_mounts: + mounts = {device: {"mountpoint": mountpoint}} + m_mounts.return_value = mounts + yield device, mounts[device] + + @pytest.fixture + def already_mounted_device(self, already_mounted_device_and_mountdict): + """already_mounted_device_and_mountdict, but return only the device""" + return already_mounted_device_and_mountdict[0] + + @pytest.mark.parametrize( + "mtype,expected", + [ + # While the filesystem is called iso9660, the mount type is cd9660 + ("iso9660", "cd9660"), + # vfat is generally called "msdos" on BSD + ("vfat", "msdos"), + # judging from man pages, only FreeBSD has this alias + ("msdosfs", "msdos"), + # Test happy path + ("ufs", "ufs"), + ], + ) + @mock.patch("cloudinit.util.is_Linux", autospec=True) + @mock.patch("cloudinit.util.is_BSD", autospec=True) + @mock.patch("cloudinit.util.subp.subp") + @mock.patch("cloudinit.temp_utils.tempdir", autospec=True) + def test_normalize_mtype_on_bsd( + self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected + ): + m_is_BSD.return_value = True + m_is_Linux.return_value = False + m_tmpdir.return_value.__enter__ = mock.Mock( + autospec=True, return_value="/tmp/fake" + ) + m_tmpdir.return_value.__exit__ = mock.Mock( + autospec=True, return_value=True + ) + callback = mock.Mock(autospec=True) + + util.mount_cb('/dev/fake0', callback, mtype=mtype) + assert ( + mock.call( + [ + "mount", + "-o", + "ro", + "-t", + expected, + "/dev/fake0", + "/tmp/fake", + ], + update_env=None, + ) + in m_subp.call_args_list + ) + + @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()]) + def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype): + with pytest.raises(TypeError): + util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype) + + @mock.patch("cloudinit.util.subp.subp") + def test_already_mounted_does_not_mount_or_umount_anything( + self, m_subp, already_mounted_device + ): + util.mount_cb(already_mounted_device, mock.Mock()) + + assert 0 == m_subp.call_count + + @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""]) + def test_already_mounted_calls_callback( + self, trailing_slash_in_mounts, already_mounted_device_and_mountdict + ): + device, mount_dict = already_mounted_device_and_mountdict + mountpoint = mount_dict["mountpoint"] + mount_dict["mountpoint"] += trailing_slash_in_mounts + + callback = mock.Mock() + util.mount_cb(device, callback) + + # The mountpoint passed to callback should always have a trailing + # slash, regardless of the input + assert [mock.call(mountpoint + "/")] == callback.call_args_list + + def test_already_mounted_calls_callback_with_data( + self, already_mounted_device + ): + callback = mock.Mock() + util.mount_cb( + already_mounted_device, callback, data=mock.sentinel.data + ) + + assert [ + mock.call(mock.ANY, mock.sentinel.data) + ] == callback.call_args_list + + +@mock.patch("cloudinit.util.write_file") +class TestEnsureFile: + """Tests for ``cloudinit.util.ensure_file``.""" + + def test_parameters_passed_through(self, m_write_file): + """Test the parameters in the signature are passed to write_file.""" + util.ensure_file( + mock.sentinel.path, + mode=mock.sentinel.mode, + preserve_mode=mock.sentinel.preserve_mode, + ) + + assert 1 == m_write_file.call_count + args, kwargs = m_write_file.call_args + assert (mock.sentinel.path,) == args + assert mock.sentinel.mode == kwargs["mode"] + assert mock.sentinel.preserve_mode == kwargs["preserve_mode"] + + @pytest.mark.parametrize( + "kwarg,expected", + [ + # Files should be world-readable by default + ("mode", 0o644), + # The previous behaviour of not preserving mode should be retained + ("preserve_mode", False), + ], + ) + def test_defaults(self, m_write_file, kwarg, expected): + """Test that ensure_file defaults appropriately.""" + util.ensure_file(mock.sentinel.path) + + assert 1 == m_write_file.call_count + _args, kwargs = m_write_file.call_args + assert expected == kwargs[kwarg] + + def test_static_parameters_are_passed(self, m_write_file): + """Test that the static write_files parameters are passed correctly.""" + util.ensure_file(mock.sentinel.path) + + assert 1 == m_write_file.call_count + _args, kwargs = m_write_file.call_args + assert "" == kwargs["content"] + assert "ab" == kwargs["omode"] + + +@mock.patch("cloudinit.util.grp.getgrnam") +@mock.patch("cloudinit.util.os.setgid") +@mock.patch("cloudinit.util.os.umask") +class TestRedirectOutputPreexecFn: + """This tests specifically the preexec_fn used in redirect_output.""" + + @pytest.fixture(params=["outfmt", "errfmt"]) + def preexec_fn(self, request): + """A fixture to gather the preexec_fn used by redirect_output. + + This enables simpler direct testing of it, and parameterises any tests + using it to cover both the stdout and stderr code paths. + """ + test_string = "| piped output to invoke subprocess" + if request.param == "outfmt": + args = (test_string, None) + elif request.param == "errfmt": + args = (None, test_string) + with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: + util.redirect_output(*args) + + assert 1 == m_popen.call_count + _args, kwargs = m_popen.call_args + assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" + return kwargs["preexec_fn"] + + def test_preexec_fn_sets_umask( + self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn + ): + """preexec_fn should set a mask that avoids world-readable files.""" + preexec_fn() + + assert [mock.call(0o037)] == m_os_umask.call_args_list + + def test_preexec_fn_sets_group_id_if_adm_group_present( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should setgrp to adm if present, so files are owned by them.""" + fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) + m_getgrnam.return_value = fake_group + + preexec_fn() + + assert [mock.call("adm")] == m_getgrnam.call_args_list + assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list + + def test_preexec_fn_handles_absent_adm_group_gracefully( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should handle an absent adm group gracefully.""" + m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") + + preexec_fn() + + assert 0 == m_setgid.call_count + + +class FakeSelinux(object): def __init__(self, match_what): self.match_what = match_what self.restored = [] @@ -175,8 +1463,9 @@ class TestWriteFile(helpers.TestCase): fake_se = FakeSelinux(my_file) - with mock.patch.object(importer, 'import_module', - return_value=fake_se) as mockobj: + with mock.patch.object( + importer, 'import_module', return_value=fake_se + ) as mockobj: with util.SeLinuxGuard(my_file) as is_on: self.assertTrue(is_on) @@ -261,8 +1550,9 @@ class TestKeyValStrings(helpers.TestCase): class TestGetCmdline(helpers.TestCase): def test_cmdline_reads_debug_env(self): - with mock.patch.dict("os.environ", - values={'DEBUG_PROC_CMDLINE': 'abcd 123'}): + with mock.patch.dict( + "os.environ", values={'DEBUG_PROC_CMDLINE': 'abcd 123'} + ): ret = util.get_cmdline() self.assertEqual("abcd 123", ret) @@ -279,52 +1569,68 @@ class TestLoadYaml(helpers.CiTestCase): '''Any unallowed types result in returning default; log the issue.''' # for now, anything not in the allowed list just returns the default. myyaml = yaml.dump({'1': "one"}) - self.assertEqual(util.load_yaml(blob=myyaml, - default=self.mydefault, - allowed=(str,)), - self.mydefault) + self.assertEqual( + util.load_yaml( + blob=myyaml, default=self.mydefault, allowed=(str,) + ), + self.mydefault, + ) regex = re.compile( r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but' - r' got dict') - self.assertTrue(regex.search(self.logs.getvalue()), - msg='Missing expected yaml load error') + r' got dict' + ) + self.assertTrue( + regex.search(self.logs.getvalue()), + msg='Missing expected yaml load error', + ) def test_bogus_scan_error_returns_default(self): '''On Yaml scan error, load_yaml returns the default and logs issue.''' badyaml = "1\n 2:" - self.assertEqual(util.load_yaml(blob=badyaml, - default=self.mydefault), - self.mydefault) + self.assertEqual( + util.load_yaml(blob=badyaml, default=self.mydefault), + self.mydefault, + ) self.assertIn( 'Failed loading yaml blob. Invalid format at line 2 column 3:' ' "mapping values are not allowed here', - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_bogus_parse_error_returns_default(self): '''On Yaml parse error, load_yaml returns default and logs issue.''' badyaml = "{}}" - self.assertEqual(util.load_yaml(blob=badyaml, - default=self.mydefault), - self.mydefault) + self.assertEqual( + util.load_yaml(blob=badyaml, default=self.mydefault), + self.mydefault, + ) self.assertIn( 'Failed loading yaml blob. Invalid format at line 1 column 3:' " \"expected \'\', but found \'}\'", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_unsafe_types(self): # should not load complex types - unsafe_yaml = yaml.dump((1, 2, 3,)) - self.assertEqual(util.load_yaml(blob=unsafe_yaml, - default=self.mydefault), - self.mydefault) + unsafe_yaml = yaml.dump( + ( + 1, + 2, + 3, + ) + ) + self.assertEqual( + util.load_yaml(blob=unsafe_yaml, default=self.mydefault), + self.mydefault, + ) def test_python_unicode(self): # complex type of python/unicode is explicitly allowed myobj = {'1': "FOOBAR"} safe_yaml = yaml.dump(myobj) - self.assertEqual(util.load_yaml(blob=safe_yaml, - default=self.mydefault), - myobj) + self.assertEqual( + util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj + ) def test_none_returns_default(self): """If yaml.load returns None, then default should be returned.""" @@ -332,13 +1638,16 @@ class TestLoadYaml(helpers.CiTestCase): mdef = self.mydefault self.assertEqual( [(b, self.mydefault) for b in blobs], - [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs]) + [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs], + ) class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): - line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" - "rw,errors=remount-ro,data=ordered") + line = ( + "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" + "rw,errors=remount-ro,data=ordered" + ) elements = line.split() for i in range(len(elements) + 1): lines = [' '.join(elements[0:i])] @@ -398,7 +1707,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), '' + helpers.readResource('zpool_status_simple.txt'), + '', ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -431,7 +1741,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), 'error' + helpers.readResource('zpool_status_simple.txt'), + 'error', ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -440,7 +1751,9 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.subp.subp') def test_parse_mount_with_ext(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_ext.txt'), '') + helpers.readResource('mount_parse_ext.txt'), + '', + ) # this one is valid and exists in mount_parse_ext.txt ret = util.parse_mount('/var') self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret) @@ -457,7 +1770,9 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.subp.subp') def test_parse_mount_with_zfs(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_zfs.txt'), '') + helpers.readResource('mount_parse_zfs.txt'), + '', + ) # this one is valid and exists in mount_parse_zfs.txt ret = util.parse_mount('/var') self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret) @@ -470,20 +1785,21 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): class TestIsX86(helpers.CiTestCase): - def test_is_x86_matches_x86_types(self): """is_x86 returns True if CPU architecture matches.""" matched_arches = ['x86_64', 'i386', 'i586', 'i686'] for arch in matched_arches: self.assertTrue( - util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch) + util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch + ) def test_is_x86_unmatched_types(self): """is_x86 returns Fale on non-intel x86 architectures.""" unmatched_arches = ['ia64', '9000/800', 'arm64v71'] for arch in unmatched_arches: self.assertFalse( - util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch) + util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch + ) @mock.patch('cloudinit.util.os.uname') def test_is_x86_calls_uname_for_architecture(self, m_uname): @@ -493,7 +1809,6 @@ class TestIsX86(helpers.CiTestCase): class TestGetConfigLogfiles(helpers.CiTestCase): - def test_empty_cfg_returns_empty_list(self): """An empty config passed to get_config_logfiles returns empty list.""" self.assertEqual([], util.get_config_logfiles(None)) @@ -502,36 +1817,53 @@ class TestGetConfigLogfiles(helpers.CiTestCase): def test_default_log_file_present(self): """When default_log_file is set get_config_logfiles finds it.""" self.assertEqual( - ['/my.log'], - util.get_config_logfiles({'def_log_file': '/my.log'})) + ['/my.log'], util.get_config_logfiles({'def_log_file': '/my.log'}) + ) def test_output_logs_parsed_when_teeing_files(self): """When output configuration is parsed when teeing files.""" self.assertEqual( ['/himom.log', '/my.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '|tee -a /himom.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '|tee -a /himom.log'}, + } + ) + ), + ) def test_output_logs_parsed_when_redirecting(self): """When output configuration is parsed when redirecting to a file.""" self.assertEqual( ['/my.log', '/test.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '>/test.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '>/test.log'}, + } + ) + ), + ) def test_output_logs_parsed_when_appending(self): """When output configuration is parsed when appending to a file.""" self.assertEqual( ['/my.log', '/test.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '>> /test.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '>> /test.log'}, + } + ) + ), + ) class TestMultiLog(helpers.FilesystemMockingTestCase): - def _createConsole(self, root): os.mkdir(os.path.join(root, 'dev')) open(os.path.join(root, 'dev', 'console'), 'a').close() @@ -580,8 +1912,9 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): log = mock.MagicMock() logged_string = 'something very important' util.multi_log(logged_string, log=log) - self.assertEqual([((mock.ANY, logged_string), {})], - log.log.call_args_list) + self.assertEqual( + [((mock.ANY, logged_string), {})], log.log.call_args_list + ) def test_newlines_stripped_from_log_call(self): log = mock.MagicMock() @@ -602,7 +1935,6 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): class TestMessageFromString(helpers.TestCase): - def test_unicode_not_messed_up(self): roundtripped = util.message_from_string('\n').as_string() self.assertNotIn('\x00', roundtripped) @@ -618,8 +1950,9 @@ class TestReadSeeded(helpers.TestCase): ud = b"userdatablob" vd = b"vendordatablob" helpers.populate_dir( - self.tmp, {'meta-data': "key1: val1", 'user-data': ud, - 'vendor-data': vd}) + self.tmp, + {'meta-data': "key1: val1", 'user-data': ud, 'vendor-data': vd}, + ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) @@ -638,7 +1971,8 @@ class TestReadSeededWithoutVendorData(helpers.TestCase): ud = b"userdatablob" vd = None helpers.populate_dir( - self.tmp, {'meta-data': "key1: val1", 'user-data': ud}) + self.tmp, {'meta-data': "key1: val1", 'user-data': ud} + ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) @@ -649,6 +1983,7 @@ class TestReadSeededWithoutVendorData(helpers.TestCase): class TestEncode(helpers.TestCase): """Test the encoding functions""" + def test_decode_binary_plain_text_with_hex(self): blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd' text = util.decode_binary(blob) @@ -657,12 +1992,14 @@ class TestEncode(helpers.TestCase): class TestProcessExecutionError(helpers.TestCase): - template = ('{description}\n' - 'Command: {cmd}\n' - 'Exit code: {exit_code}\n' - 'Reason: {reason}\n' - 'Stdout: {stdout}\n' - 'Stderr: {stderr}') + template = ( + '{description}\n' + 'Command: {cmd}\n' + 'Exit code: {exit_code}\n' + 'Reason: {reason}\n' + 'Stdout: {stdout}\n' + 'Stderr: {stderr}' + ) empty_attr = '-' empty_description = 'Unexpected error while running command.' @@ -671,23 +2008,37 @@ class TestProcessExecutionError(helpers.TestCase): msg = 'abc\ndef' formatted = 'abc\n{0}def'.format(' ' * 4) self.assertEqual(error._indent_text(msg, indent_level=4), formatted) - self.assertEqual(error._indent_text(msg.encode(), indent_level=4), - formatted.encode()) + self.assertEqual( + error._indent_text(msg.encode(), indent_level=4), + formatted.encode(), + ) self.assertIsInstance( - error._indent_text(msg.encode()), type(msg.encode())) + error._indent_text(msg.encode()), type(msg.encode()) + ) def test_pexec_error_type(self): self.assertIsInstance(subp.ProcessExecutionError(), IOError) def test_pexec_error_empty_msgs(self): error = subp.ProcessExecutionError() - self.assertTrue(all(attr == self.empty_attr for attr in - (error.stderr, error.stdout, error.reason))) + self.assertTrue( + all( + attr == self.empty_attr + for attr in (error.stderr, error.stdout, error.reason) + ) + ) self.assertEqual(error.description, self.empty_description) - self.assertEqual(str(error), self.template.format( - description=self.empty_description, exit_code=self.empty_attr, - reason=self.empty_attr, stdout=self.empty_attr, - stderr=self.empty_attr, cmd=self.empty_attr)) + self.assertEqual( + str(error), + self.template.format( + description=self.empty_description, + exit_code=self.empty_attr, + reason=self.empty_attr, + stdout=self.empty_attr, + stderr=self.empty_attr, + cmd=self.empty_attr, + ), + ) def test_pexec_error_single_line_msgs(self): stdout_msg = 'out out' @@ -695,33 +2046,46 @@ class TestProcessExecutionError(helpers.TestCase): cmd = 'test command' exit_code = 3 error = subp.ProcessExecutionError( - stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd) - self.assertEqual(str(error), self.template.format( - description=self.empty_description, stdout=stdout_msg, - stderr=stderr_msg, exit_code=str(exit_code), - reason=self.empty_attr, cmd=cmd)) + stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd + ) + self.assertEqual( + str(error), + self.template.format( + description=self.empty_description, + stdout=stdout_msg, + stderr=stderr_msg, + exit_code=str(exit_code), + reason=self.empty_attr, + cmd=cmd, + ), + ) def test_pexec_error_multi_line_msgs(self): # make sure bytes is converted handled properly when formatting stdout_msg = 'multi\nline\noutput message'.encode() stderr_msg = 'multi\nline\nerror message\n\n\n' error = subp.ProcessExecutionError( - stdout=stdout_msg, stderr=stderr_msg) + stdout=stdout_msg, stderr=stderr_msg + ) self.assertEqual( str(error), - '\n'.join(( - '{description}', - 'Command: {empty_attr}', - 'Exit code: {empty_attr}', - 'Reason: {empty_attr}', - 'Stdout: multi', - ' line', - ' output message', - 'Stderr: multi', - ' line', - ' error message', - )).format(description=self.empty_description, - empty_attr=self.empty_attr)) + '\n'.join( + ( + '{description}', + 'Command: {empty_attr}', + 'Exit code: {empty_attr}', + 'Reason: {empty_attr}', + 'Stdout: multi', + ' line', + ' output message', + 'Stderr: multi', + ' line', + ' error message', + ) + ).format( + description=self.empty_description, empty_attr=self.empty_attr + ), + ) class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): @@ -758,7 +2122,8 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): "BOOT_IMAGE=(loop)/kernel.img root=LABEL=writable " "snap_core=core_x1.snap snap_kernel=pc-kernel_x1.snap ro " "net.ifnames=0 init=/lib/systemd/systemd console=tty1 " - "console=ttyS0 panic=-1") + "console=ttyS0 panic=-1" + ) m_cmdline.return_value = cmdline self.assertTrue(util.system_is_snappy()) self.assertTrue(m_cmdline.call_count > 0) @@ -777,8 +2142,7 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): m_cmdline.return_value = 'root=/dev/sda' root_d = self.tmp_dir() content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""]) - helpers.populate_dir( - root_d, {'etc/system-image/channel.ini': content}) + helpers.populate_dir(root_d, {'etc/system-image/channel.ini': content}) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) @@ -788,7 +2152,8 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): m_cmdline.return_value = 'root=/dev/sda' root_d = self.tmp_dir() helpers.populate_dir( - root_d, {'etc/system-image/config.d/my.file': "_unused"}) + root_d, {'etc/system-image/config.d/my.file': "_unused"} + ) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) @@ -798,18 +2163,24 @@ class TestLoadShellContent(helpers.TestCase): """Shell comments should be allowed in the content.""" self.assertEqual( {'key1': 'val1', 'key2': 'val2', 'key3': 'val3 #tricky'}, - util.load_shell_content('\n'.join([ - "#top of file comment", - "key1=val1 #this is a comment", - "# second comment", - 'key2="val2" # inlin comment' - '#badkey=wark', - 'key3="val3 #tricky"', - '']))) + util.load_shell_content( + '\n'.join( + [ + "#top of file comment", + "key1=val1 #this is a comment", + "# second comment", + 'key2="val2" # inlin comment#badkey=wark', + 'key3="val3 #tricky"', + '', + ] + ) + ), + ) class TestGetProcEnv(helpers.TestCase): """test get_proc_env.""" + null = b'\x00' simple1 = b'HOME=/' simple2 = b'PATH=/bin:/sbin' @@ -824,14 +2195,19 @@ class TestGetProcEnv(helpers.TestCase): def test_non_utf8_in_environment(self, m_load_file): """env may have non utf-8 decodable content.""" content = self.null.join( - (self.bootflag, self.simple1, self.simple2, self.mixed)) + (self.bootflag, self.simple1, self.simple2, self.mixed) + ) m_load_file.return_value = content self.assertEqual( - {'BOOTABLE_FLAG': self._val_decoded(self.bootflag), - 'HOME': '/', 'PATH': '/bin:/sbin', - 'MIXED': self._val_decoded(self.mixed)}, - util.get_proc_env(1)) + { + 'BOOTABLE_FLAG': self._val_decoded(self.bootflag), + 'HOME': '/', + 'PATH': '/bin:/sbin', + 'MIXED': self._val_decoded(self.mixed), + }, + util.get_proc_env(1), + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -843,7 +2219,8 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual( dict([t.split(b'=') for t in lines]), - util.get_proc_env(1, encoding=None)) + util.get_proc_env(1, encoding=None), + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -852,8 +2229,8 @@ class TestGetProcEnv(helpers.TestCase): content = self.null.join((self.simple1, self.simple2)) m_load_file.return_value = content self.assertEqual( - {'HOME': '/', 'PATH': '/bin:/sbin'}, - util.get_proc_env(1)) + {'HOME': '/', 'PATH': '/bin:/sbin'}, util.get_proc_env(1) + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -871,14 +2248,15 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) -class TestKernelVersion(): +class TestKernelVersion: """test kernel version function""" params = [ ('5.6.19-300.fc32.x86_64', (5, 6)), ('4.15.0-101-generic', (4, 15)), ('3.10.0-1062.12.1.vz7.131.10', (3, 10)), - ('4.18.0-144.el8.x86_64', (4, 18))] + ('4.18.0-144.el8.x86_64', (4, 18)), + ] @mock.patch('os.uname') @pytest.mark.parametrize("uname_release,expected", params) @@ -892,29 +2270,27 @@ class TestFindDevs: def test_find_devs_with(self, m_subp): m_subp.return_value = ( '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"', - '' + '', ) devlist = util.find_devs_with() assert devlist == [ - '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'] + '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"' + ] devlist = util.find_devs_with("LABEL_FATBOOT=A_LABEL") assert devlist == [ - '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'] + '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"' + ] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd(self, m_subp): - m_subp.return_value = ( - 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '' - ) + m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd() assert devlist == ['/dev/cd0a', '/dev/sd1i'] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd_with_criteria(self, m_subp): - m_subp.return_value = ( - 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '' - ) + m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660") assert devlist == ['/dev/cd0a'] @@ -923,7 +2299,8 @@ class TestFindDevs: assert devlist == ['/dev/cd0a', '/dev/sd1i'] @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/msdosfs/EFISYS', '/dev/iso9660/config-2']), ('TYPE=iso9660', ['/dev/iso9660/config-2']), ('TYPE=vfat', ['/dev/msdosfs/EFISYS']), @@ -940,19 +2317,23 @@ class TestFindDevs: elif pattern == "/dev/iso9660/*": return iso9660 raise Exception + m_glob.side_effect = fake_glob devlist = util.find_devs_with_freebsd(criteria=criteria) assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']), ('TYPE=iso9660', ['/dev/cd0']), ('TYPE=vfat', ["/dev/ld0", "/dev/dk0", "/dev/dk1"]), - ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']), - ) + ( + 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0'], + ), + ), ) @mock.patch("cloudinit.subp.subp") def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist): @@ -1000,21 +2381,24 @@ class TestFindDevs: assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']), ('TYPE=vfat', ['/dev/vbd0']), - ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), - ) + ( + 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/vbd0', '/dev/cd0', '/dev/acd0'], + ), + ), ) @mock.patch("cloudinit.subp.subp") - def test_find_devs_with_dragonflybsd(self, m_subp, criteria, - expected_devlist): - m_subp.return_value = ( - 'md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '' - ) + def test_find_devs_with_dragonflybsd( + self, m_subp, criteria, expected_devlist + ): + m_subp.return_value = ('md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '') devlist = util.find_devs_with_dragonflybsd(criteria=criteria) assert devlist == expected_devlist + # vi: ts=4 expandtab diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py new file mode 100644 index 00000000..ed66b09f --- /dev/null +++ b/tests/unittests/test_version.py @@ -0,0 +1,31 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from unittest import mock + +from tests.unittests.helpers import CiTestCase +from cloudinit import version + + +class TestExportsFeatures(CiTestCase): + def test_has_network_config_v1(self): + self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) + + def test_has_network_config_v2(self): + self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) + + +class TestVersionString(CiTestCase): + @mock.patch("cloudinit.version._PACKAGED_VERSION", + "17.2-3-gb05b9972-0ubuntu1") + def test_package_version_respected(self): + """If _PACKAGED_VERSION is filled in, then it should be returned.""" + self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) + + @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") + @mock.patch("cloudinit.version.__VERSION__", "17.2") + def test_package_version_skipped(self): + """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" + self.assertEqual("17.2", version.version_string()) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/__init__.py b/tests/unittests/test_vmware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/test_vmware/test_custom_script.py deleted file mode 100644 index f89f8157..00000000 --- a/tests/unittests/test_vmware/test_custom_script.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2017-2019 VMware INC. -# -# Author: Maitreyee Saikia -# -# This file is part of cloud-init. See LICENSE file for license information. - -import os -import stat -from cloudinit import util -from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( - CustomScriptConstant, - CustomScriptNotFound, - PreCustomScript, - PostCustomScript, -) -from cloudinit.tests.helpers import CiTestCase, mock - - -class TestVmwareCustomScript(CiTestCase): - def setUp(self): - self.tmpDir = self.tmp_dir() - # Mock the tmpDir as the root dir in VM. - self.execDir = os.path.join(self.tmpDir, ".customization") - self.execScript = os.path.join(self.execDir, - ".customize.sh") - - def test_prepare_custom_script(self): - """ - This test is designed to verify the behavior based on the presence of - custom script. Mainly needed for scenario where a custom script is - expected, but was not properly copied. "CustomScriptNotFound" exception - is raised in such cases. - """ - # Custom script does not exist. - preCust = PreCustomScript("random-vmw-test", self.tmpDir) - self.assertEqual("random-vmw-test", preCust.scriptname) - self.assertEqual(self.tmpDir, preCust.directory) - self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), - preCust.scriptpath) - with self.assertRaises(CustomScriptNotFound): - preCust.prepare_script() - - # Custom script exists. - custScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(custScript, "test-CR-strip\r\r") - with mock.patch.object(CustomScriptConstant, - "CUSTOM_TMP_DIR", - self.execDir): - with mock.patch.object(CustomScriptConstant, - "CUSTOM_SCRIPT", - self.execScript): - postCust = PostCustomScript("test-cust", - self.tmpDir, - self.tmpDir) - self.assertEqual("test-cust", postCust.scriptname) - self.assertEqual(self.tmpDir, postCust.directory) - self.assertEqual(custScript, postCust.scriptpath) - postCust.prepare_script() - - # Custom script is copied with exec privilege - self.assertTrue(os.path.exists(self.execScript)) - st = os.stat(self.execScript) - self.assertTrue(st.st_mode & stat.S_IEXEC) - with open(self.execScript, "r") as f: - content = f.read() - self.assertEqual(content, "test-CR-strip") - # Check if all carraige returns are stripped from script. - self.assertFalse("\r" in content) - - def test_execute_post_cust(self): - """ - This test is designed to verify the behavior after execute post - customization. - """ - # Prepare the customize package - postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) - util.write_file(postCustRun, "This is the script to run post cust") - userScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(userScript, "This is the post cust script") - - # Mock the cc_scripts_per_instance dir and marker file. - # Create another tmp dir for cc_scripts_per_instance. - ccScriptDir = self.tmp_dir() - ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") - markerFile = os.path.join(self.tmpDir, ".markerFile") - with mock.patch.object(CustomScriptConstant, - "CUSTOM_TMP_DIR", - self.execDir): - with mock.patch.object(CustomScriptConstant, - "CUSTOM_SCRIPT", - self.execScript): - with mock.patch.object(CustomScriptConstant, - "POST_CUSTOM_PENDING_MARKER", - markerFile): - postCust = PostCustomScript("test-cust", - self.tmpDir, - ccScriptDir) - postCust.execute() - # Check cc_scripts_per_instance and marker file - # are created. - self.assertTrue(os.path.exists(ccScript)) - with open(ccScript, "r") as f: - content = f.read() - self.assertEqual(content, - "This is the script to run post cust") - self.assertTrue(os.path.exists(markerFile)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py deleted file mode 100644 index c8b59d83..00000000 --- a/tests/unittests/test_vmware/test_guestcust_util.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (C) 2019 Canonical Ltd. -# Copyright (C) 2019 VMware INC. -# -# Author: Xiaofeng Wang -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import subp -from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile -from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( - get_tools_config, - set_gc_status, -) -from cloudinit.tests.helpers import CiTestCase, mock - - -class TestGuestCustUtil(CiTestCase): - def test_get_tools_config_not_installed(self): - """ - This test is designed to verify the behavior if vmware-toolbox-cmd - is not installed. - """ - with mock.patch.object(subp, 'which', return_value=None): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') - - def test_get_tools_config_internal_exception(self): - """ - This test is designed to verify the behavior if internal exception - is raised. - """ - with mock.patch.object(subp, 'which', return_value='/dummy/path'): - with mock.patch.object(subp, 'subp', - return_value=('key=value', b''), - side_effect=subp.ProcessExecutionError( - "subp failed", exit_code=99)): - # verify return value is 'defaultVal', not 'value'. - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'defaultVal') - - def test_get_tools_config_normal(self): - """ - This test is designed to verify the value could be parsed from - key = value of the given [section] - """ - with mock.patch.object(subp, 'which', return_value='/dummy/path'): - # value is not blank - with mock.patch.object(subp, 'subp', - return_value=('key = value ', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'value') - # value is blank - with mock.patch.object(subp, 'subp', - return_value=('key = ', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - '') - # value contains = - with mock.patch.object(subp, 'subp', - return_value=('key=Bar=Wark', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'Bar=Wark') - - # value contains specific characters - with mock.patch.object(subp, 'subp', - return_value=('[a] b.c_d=e-f', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'e-f') - - def test_set_gc_status(self): - """ - This test is designed to verify the behavior of set_gc_status - """ - # config is None, return None - self.assertEqual(set_gc_status(None, 'Successful'), None) - - # post gc status is NO, return None - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertEqual(set_gc_status(conf, 'Successful'), None) - - # post gc status is YES, subp is called to execute command - cf._insertKey("MISC|POST-GC-STATUS", "YES") - conf = Config(cf) - with mock.patch.object(subp, 'subp', - return_value=('ok', b'')) as mockobj: - self.assertEqual( - set_gc_status(conf, 'Successful'), ('ok', b'')) - mockobj.assert_called_once_with( - ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'], - rcs=[0]) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py deleted file mode 100644 index 430cc69f..00000000 --- a/tests/unittests/test_vmware_config_file.py +++ /dev/null @@ -1,545 +0,0 @@ -# Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2016 VMware INC. -# -# Author: Sankar Tanguturi -# Pengpeng Sun -# -# This file is part of cloud-init. See LICENSE file for license information. - -import logging -import os -import sys -import tempfile -import textwrap - -from cloudinit.sources.DataSourceOVF import get_network_config_from_conf -from cloudinit.sources.DataSourceOVF import read_vmware_imc -from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum -from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile -from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet -from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator -from cloudinit.tests.helpers import CiTestCase - -logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) -logger = logging.getLogger(__name__) - - -class TestVmwareConfigFile(CiTestCase): - - def test_utility_methods(self): - """Tests basic utility methods of ConfigFile class""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - cf.clear() - - self.assertEqual(0, len(cf), "clear size") - - cf._insertKey(" PASSWORD|-PASS ", " foo ") - cf._insertKey("BAR", " ") - - self.assertEqual(2, len(cf), "insert size") - self.assertEqual('foo', cf["PASSWORD|-PASS"], "password") - self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") - self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"), - "keepPassword") - self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"), - "removePassword") - self.assertFalse("FOO" in cf, "hasFoo") - self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo") - self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo") - self.assertTrue("BAR" in cf, "hasBar") - self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar") - self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar") - - def test_datasource_instance_id(self): - """Tests instance id for the DatasourceOVF""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - instance_id_prefix = 'iid-vmware-' - - conf = Config(cf) - - (md1, _, _) = read_vmware_imc(conf) - self.assertIn(instance_id_prefix, md1["instance-id"]) - self.assertEqual(md1["instance-id"], 'iid-vmware-imc') - - (md2, _, _) = read_vmware_imc(conf) - self.assertIn(instance_id_prefix, md2["instance-id"]) - self.assertEqual(md2["instance-id"], 'iid-vmware-imc') - - self.assertEqual(md2["instance-id"], md1["instance-id"]) - - def test_configfile_static_2nics(self): - """Tests Config class for a configuration with two static NICs.""" - cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") - - conf = Config(cf) - - self.assertEqual('myhost1', conf.host_name, "hostName") - self.assertEqual('Africa/Abidjan', conf.timezone, "tz") - self.assertTrue(conf.utc, "utc") - - self.assertEqual(['10.20.145.1', '10.20.145.2'], - conf.name_servers, - "dns") - self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], - conf.dns_suffixes, - "suffixes") - - nics = conf.nics - ipv40 = nics[0].staticIpv4 - - self.assertEqual(2, len(nics), "nics") - self.assertEqual('NIC1', nics[0].name, "nic0") - self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") - self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0") - self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0") - self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0") - self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0") - self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0") - self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1") - - self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0") - self.assertEqual('fc00:10:20:87::154', - nics[0].staticIpv6[0].ip, - "ipv6Addr0") - - self.assertEqual('NIC2', nics[1].name, "nic1") - self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp") - - def test_config_file_dhcp_2nics(self): - """Tests Config class for a configuration with two DHCP NICs.""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - conf = Config(cf) - nics = conf.nics - self.assertEqual(2, len(nics), "nics") - self.assertEqual('NIC1', nics[0].name, "nic0") - self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") - self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0") - - def test_config_password(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - cf._insertKey("PASSWORD|-PASS", "test-password") - cf._insertKey("PASSWORD|RESET", "no") - - conf = Config(cf) - self.assertEqual('test-password', conf.admin_password, "password") - self.assertFalse(conf.reset_password, "do not reset password") - - def test_config_reset_passwd(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - cf._insertKey("PASSWORD|-PASS", "test-password") - cf._insertKey("PASSWORD|RESET", "random") - - conf = Config(cf) - with self.assertRaises(ValueError): - pw = conf.reset_password - self.assertIsNone(pw) - - cf.clear() - cf._insertKey("PASSWORD|RESET", "yes") - self.assertEqual(1, len(cf), "insert size") - - conf = Config(cf) - self.assertTrue(conf.reset_password, "reset password") - - def test_get_config_nameservers(self): - """Tests DNS and nameserver settings in a configuration.""" - cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") - - config = Config(cf) - - network_config = get_network_config_from_conf(config, False) - - self.assertEqual(1, network_config.get('version')) - - config_types = network_config.get('config') - name_servers = None - dns_suffixes = None - - for type in config_types: - if type.get('type') == 'nameserver': - name_servers = type.get('address') - dns_suffixes = type.get('search') - break - - self.assertEqual(['10.20.145.1', '10.20.145.2'], - name_servers, - "dns") - self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], - dns_suffixes, - "suffixes") - - def test_gen_subnet(self): - """Tests if gen_subnet properly calculates network subnet from - IPv4 address and netmask""" - ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'], - ['10.20.92.105', '255.255.252.0', '10.20.92.0'], - ['192.168.0.10', '255.255.0.0', '192.168.0.0']] - for entry in ip_subnet_list: - self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]), - "Subnet for a specified ip and netmask") - - def test_get_config_dns_suffixes(self): - """Tests if get_network_config_from_conf properly - generates nameservers and dns settings from a - specified configuration""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - config = Config(cf) - - network_config = get_network_config_from_conf(config, False) - - self.assertEqual(1, network_config.get('version')) - - config_types = network_config.get('config') - name_servers = None - dns_suffixes = None - - for type in config_types: - if type.get('type') == 'nameserver': - name_servers = type.get('address') - dns_suffixes = type.get('search') - break - - self.assertEqual([], - name_servers, - "dns") - self.assertEqual(['eng.vmware.com'], - dns_suffixes, - "suffixes") - - def test_get_nics_list_dhcp(self): - """Tests if NicConfigurator properly calculates network subnets - for a configuration with a list of DHCP NICs""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - config = Config(cf) - - nicConfigurator = NicConfigurator(config.nics, False) - nics_cfg_list = nicConfigurator.generate() - - self.assertEqual(2, len(nics_cfg_list), "number of config elements") - - nic1 = {'name': 'NIC1'} - nic2 = {'name': 'NIC2'} - for cfg in nics_cfg_list: - if cfg.get('name') == nic1.get('name'): - nic1.update(cfg) - elif cfg.get('name') == nic2.get('name'): - nic2.update(cfg) - - self.assertEqual('physical', nic1.get('type'), 'type of NIC1') - self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') - self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), - 'mac address of NIC1') - subnets = nic1.get('subnets') - self.assertEqual(1, len(subnets), 'number of subnets for NIC1') - subnet = subnets[0] - self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1') - self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type') - - self.assertEqual('physical', nic2.get('type'), 'type of NIC2') - self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') - self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'), - 'mac address of NIC2') - subnets = nic2.get('subnets') - self.assertEqual(1, len(subnets), 'number of subnets for NIC2') - subnet = subnets[0] - self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2') - self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type') - - def test_get_nics_list_static(self): - """Tests if NicConfigurator properly calculates network subnets - for a configuration with 2 static NICs""" - cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") - - config = Config(cf) - - nicConfigurator = NicConfigurator(config.nics, False) - nics_cfg_list = nicConfigurator.generate() - - self.assertEqual(2, len(nics_cfg_list), "number of elements") - - nic1 = {'name': 'NIC1'} - nic2 = {'name': 'NIC2'} - route_list = [] - for cfg in nics_cfg_list: - cfg_type = cfg.get('type') - if cfg_type == 'physical': - if cfg.get('name') == nic1.get('name'): - nic1.update(cfg) - elif cfg.get('name') == nic2.get('name'): - nic2.update(cfg) - - self.assertEqual('physical', nic1.get('type'), 'type of NIC1') - self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') - self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), - 'mac address of NIC1') - - subnets = nic1.get('subnets') - self.assertEqual(2, len(subnets), 'Number of subnets') - - static_subnet = [] - static6_subnet = [] - - for subnet in subnets: - subnet_type = subnet.get('type') - if subnet_type == 'static': - static_subnet.append(subnet) - elif subnet_type == 'static6': - static6_subnet.append(subnet) - else: - self.assertEqual(True, False, 'Unknown type') - if 'route' in subnet: - for route in subnet.get('routes'): - route_list.append(route) - - self.assertEqual(1, len(static_subnet), 'Number of static subnet') - self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet') - - subnet = static_subnet[0] - self.assertEqual('10.20.87.154', subnet.get('address'), - 'IPv4 address of static subnet') - self.assertEqual('255.255.252.0', subnet.get('netmask'), - 'NetMask of static subnet') - self.assertEqual('auto', subnet.get('control'), - 'control for static subnet') - - subnet = static6_subnet[0] - self.assertEqual('fc00:10:20:87::154', subnet.get('address'), - 'IPv6 address of static subnet') - self.assertEqual('64', subnet.get('netmask'), - 'NetMask of static6 subnet') - - route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10']) - for route in route_list: - self.assertEqual(10000, route.get('metric'), 'metric of route') - gateway = route.get('gateway') - if gateway in route_set: - route_set.discard(gateway) - else: - self.assertEqual(True, False, 'invalid gateway %s' % (gateway)) - - self.assertEqual('physical', nic2.get('type'), 'type of NIC2') - self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') - self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'), - 'mac address of NIC2') - - subnets = nic2.get('subnets') - self.assertEqual(1, len(subnets), 'Number of subnets for NIC2') - - subnet = subnets[0] - self.assertEqual('static', subnet.get('type'), 'Subnet type') - self.assertEqual('192.168.6.102', subnet.get('address'), - 'Subnet address') - self.assertEqual('255.255.0.0', subnet.get('netmask'), - 'Subnet netmask') - - def test_custom_script(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertIsNone(conf.custom_script_name) - cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") - conf = Config(cf) - self.assertEqual("test-script", conf.custom_script_name) - - def test_post_gc_status(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertFalse(conf.post_gc_status) - cf._insertKey("MISC|POST-GC-STATUS", "YES") - conf = Config(cf) - self.assertTrue(conf.post_gc_status) - - def test_no_default_run_post_script(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertFalse(conf.default_run_post_script) - cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO") - conf = Config(cf) - self.assertFalse(conf.default_run_post_script) - - def test_yes_default_run_post_script(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes") - conf = Config(cf) - self.assertTrue(conf.default_run_post_script) - - -class TestVmwareNetConfig(CiTestCase): - """Test conversion of vmware config to cloud-init config.""" - - maxDiff = None - - def _get_NicConfigurator(self, text): - fp = None - try: - with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), - delete=False) as fp: - fp.write(text) - fp.close() - cfg = Config(ConfigFile(fp.name)) - return NicConfigurator(cfg.nics, use_system_devices=False) - finally: - if fp: - os.unlink(fp.name) - - def test_non_primary_nic_without_gateway(self): - """A non primary nic set is not required to have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = myhost1 - DOMAINNAME = eng.vmware.com - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:a6:8c:08 - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 10.20.87.154 - NETMASK = 255.255.252.0 - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], - nc.generate()) - - def test_non_primary_nic_with_gateway(self): - """A non primary nic set can have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = myhost1 - DOMAINNAME = eng.vmware.com - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:a6:8c:08 - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 10.20.87.154 - NETMASK = 255.255.252.0 - GATEWAY = 10.20.87.253 - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0', - 'routes': - [{'type': 'route', 'destination': '10.20.84.0/22', - 'gateway': '10.20.87.253', 'metric': 10000}]}]}], - nc.generate()) - - def test_cust_non_primary_nic_with_gateway_(self): - """A customer non primary nic set can have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = static-debug-vm - DOMAINNAME = cluster.local - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:ac:d1:8a - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 100.115.223.75 - NETMASK = 255.255.255.0 - GATEWAY = 100.115.223.254 - - - [DNS] - DNSFROMDHCP=no - - NAMESERVER|1 = 8.8.8.8 - - [DATETIME] - UTC = yes - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:ac:d1:8a', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '100.115.223.75', 'netmask': '255.255.255.0', - 'routes': - [{'type': 'route', 'destination': '100.115.223.0/24', - 'gateway': '100.115.223.254', 'metric': 10000}]}]}], - nc.generate()) - - def test_a_primary_nic_with_gateway(self): - """A primary nic set can have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = myhost1 - DOMAINNAME = eng.vmware.com - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:a6:8c:08 - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 10.20.87.154 - NETMASK = 255.255.252.0 - PRIMARY = true - GATEWAY = 10.20.87.253 - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0', - 'gateway': '10.20.87.253'}]}], - nc.generate()) - - def test_meta_data(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertIsNone(conf.meta_data_name) - cf._insertKey("CLOUDINIT|METADATA", "test-metadata") - conf = Config(cf) - self.assertEqual("test-metadata", conf.meta_data_name) - - def test_user_data(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertIsNone(conf.user_data_name) - cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") - conf = Config(cf) - self.assertEqual("test-userdata", conf.user_data_name) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/util.py b/tests/unittests/util.py index 383f5f5c..2204c28f 100644 --- a/tests/unittests/util.py +++ b/tests/unittests/util.py @@ -15,7 +15,7 @@ def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None): """ paths = paths or helpers.Paths({}) sys_cfg = sys_cfg or {} - cls = distros.fetch(distro) if distro else TestingDistro + cls = distros.fetch(distro) if distro else MockDistro mydist = cls(distro, sys_cfg, paths) myds = DataSourceTesting(sys_cfg, mydist, paths) if metadata: @@ -49,14 +49,14 @@ class DataSourceTesting(DataSourceNone): return 'testing' -class TestingDistro(distros.Distro): - # TestingDistro is here to test base Distro class implementations +class MockDistro(distros.Distro): + # MockDistro is here to test base Distro class implementations def __init__(self, name="testingdistro", cfg=None, paths=None): if not cfg: cfg = {} if not paths: paths = {} - super(TestingDistro, self).__init__(name, cfg, paths) + super(MockDistro, self).__init__(name, cfg, paths) def install_packages(self, pkglist): pass diff --git a/tox.ini b/tox.ini index 874d3f20..ff888266 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py3, xenial-dev, flake8, pylint recreate = True [testenv] -commands = {envpython} -m pytest {posargs:tests/unittests cloudinit} +commands = {envpython} -m pytest {posargs:tests/unittests} setenv = LC_ALL = en_US.utf-8 passenv= @@ -37,7 +37,7 @@ deps = commands = {envpython} -m pytest \ --durations 10 \ {posargs:--cov=cloudinit --cov-branch \ - tests/unittests cloudinit} + tests/unittests} [testenv:py27] basepython = python2.7 @@ -86,7 +86,7 @@ deps = # [testenv:xenial-dev]. See the comment there for details. commands = python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests cloudinit} + python -m pytest {posargs:tests/unittests} basepython = python3 deps = # Refer to the comment in [xenial-shared-deps] for details @@ -104,7 +104,7 @@ deps = # changes here are reflected in [testenv:xenial]. commands = python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests cloudinit} + python -m pytest {posargs:tests/unittests} basepython = {[testenv:xenial]basepython} deps = # Refer to the comment in [xenial-shared-deps] for details @@ -163,7 +163,7 @@ setenv = [pytest] # TODO: s/--strict/--strict-markers/ once xenial support is dropped -testpaths = cloudinit tests/unittests +testpaths = tests/unittests addopts = --strict log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s log_date_format = %Y-%m-%d %H:%M:%S -- cgit v1.2.3